update: move [3] retry to outer loop

This commit is contained in:
hinto-janaiyo 2022-10-28 15:45:13 -04:00
parent 8780b0684d
commit 52070ae00c
No known key found for this signature in database
GPG key ID: D7483F6CA27D1B1D
5 changed files with 340 additions and 172 deletions

View file

@ -16,7 +16,7 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
pub const GUPAX_VERSION: &'static str = concat!("v", env!("CARGO_PKG_VERSION")); pub const GUPAX_VERSION: &'static str = concat!("v", env!("CARGO_PKG_VERSION"));
pub const P2POOL_VERSION: &'static str = "v2.4"; pub const P2POOL_VERSION: &'static str = "v2.3";
pub const XMRIG_VERSION: &'static str = "v6.18.0"; pub const XMRIG_VERSION: &'static str = "v6.18.0";
pub const COMMIT: &'static str = include_str!("../.git/refs/heads/main"); pub const COMMIT: &'static str = include_str!("../.git/refs/heads/main");

View file

@ -51,37 +51,36 @@ impl Gupax {
match Update::start(u, version) { match Update::start(u, version) {
Err(e) => { Err(e) => {
info!("Update | {} ... FAIL", e); info!("Update | {} ... FAIL", e);
*u2.lock().unwrap().msg.lock().unwrap() = MSG_FAILED.to_string(); *u2.lock().unwrap().msg.lock().unwrap() = format!("{} | {}", MSG_FAILED, e);
*u2.lock().unwrap().updating.lock().unwrap() = false;
},
_ => {
info!("Update ... OK");
*u2.lock().unwrap().msg.lock().unwrap() = MSG_SUCCESS.to_string();
*u2.lock().unwrap().prog.lock().unwrap() = 100;
*u2.lock().unwrap().updating.lock().unwrap() = false; *u2.lock().unwrap().updating.lock().unwrap() = false;
}, },
_ => (),
} }
}); });
} }
}); });
ui.vertical(|ui| { ui.vertical(|ui| {
ui.set_enabled(updating); ui.set_enabled(updating);
let msg = format!("{}\n{}{}", *update.msg.lock().unwrap(), *update.prog.lock().unwrap(), "%");
ui.add_sized([width, height*1.4], egui::Label::new(msg));
let height = height/2.0; let height = height/2.0;
let msg = format!("{}{}{}{}", *update.msg.lock().unwrap(), " ... ", *update.prog.lock().unwrap(), "%"); if updating {
ui.add_sized([width, height], egui::Label::new(msg)); ui.add_sized([width, height], egui::Spinner::new().size(height));
if updating { ui.add_sized([width, height], egui::Spinner::new().size(height)); } } else {
ui.add_sized([width, height], egui::ProgressBar::new(*update.prog.lock().unwrap() as f32 / 100.0)); ui.add_sized([width, height], egui::Label::new("..."));
}
ui.add_sized([width, height], egui::ProgressBar::new((update.prog.lock().unwrap().round() / 100.0)));
}); });
}); });
ui.horizontal(|ui| { ui.horizontal(|ui| {
ui.group(|ui| { ui.group(|ui| {
let width = (width - SPACE*10.0)/5.0; let width = (width - SPACE*9.8)/5.0;
let height = height/2.0; let height = height/2.0;
let mut style = (*ctx.style()).clone(); let mut style = (*ctx.style()).clone();
style.spacing.icon_width_inner = height / 6.0; style.spacing.icon_width_inner = width / 6.0;
style.spacing.icon_width = height / 4.0; style.spacing.icon_width = width / 4.0;
style.spacing.icon_spacing = width / 20.0; style.spacing.icon_spacing = 20.0;
ctx.set_style(style); ctx.set_style(style);
let height = height/2.0; let height = height/2.0;
ui.add_sized([width, height], egui::Checkbox::new(&mut state.auto_update, "Auto-update")).on_hover_text(GUPAX_AUTO_UPDATE); ui.add_sized([width, height], egui::Checkbox::new(&mut state.auto_update, "Auto-update")).on_hover_text(GUPAX_AUTO_UPDATE);
@ -99,12 +98,12 @@ impl Gupax {
ui.horizontal(|ui| { ui.horizontal(|ui| {
ui.label("P2Pool binary path:"); ui.label("P2Pool binary path:");
ui.spacing_mut().text_edit_width = ui.available_width() - 35.0; ui.spacing_mut().text_edit_width = ui.available_width() - SPACE;
ui.text_edit_singleline(&mut state.p2pool_path).on_hover_text(GUPAX_PATH_P2POOL); ui.text_edit_singleline(&mut state.p2pool_path).on_hover_text(GUPAX_PATH_P2POOL);
}); });
ui.horizontal(|ui| { ui.horizontal(|ui| {
ui.label("XMRig binary path: "); ui.label("XMRig binary path: ");
ui.spacing_mut().text_edit_width = ui.available_width() - 35.0; ui.spacing_mut().text_edit_width = ui.available_width() - SPACE;
ui.text_edit_singleline(&mut state.xmrig_path).on_hover_text(GUPAX_PATH_XMRIG); ui.text_edit_singleline(&mut state.xmrig_path).on_hover_text(GUPAX_PATH_XMRIG);
}); });
} }

View file

@ -457,7 +457,7 @@ impl eframe::App for App {
// Top: Tabs // Top: Tabs
egui::TopBottomPanel::top("top").show(ctx, |ui| { egui::TopBottomPanel::top("top").show(ctx, |ui| {
let width = (self.width - 95.0)/5.0; let width = (self.width - (SPACE*10.0))/5.0;
let height = self.height/10.0; let height = self.height/10.0;
ui.group(|ui| { ui.group(|ui| {
ui.add_space(4.0); ui.add_space(4.0);
@ -582,7 +582,6 @@ impl eframe::App for App {
ui.style_mut().override_text_style = Some(egui::TextStyle::Body); ui.style_mut().override_text_style = Some(egui::TextStyle::Body);
match self.tab { match self.tab {
Tab::About => { Tab::About => {
info!("");
ui.add_space(10.0); ui.add_space(10.0);
ui.vertical_centered(|ui| { ui.vertical_centered(|ui| {
// Display [Gupax] banner at max, 1/4 the available length // Display [Gupax] banner at max, 1/4 the available length

View file

@ -263,18 +263,19 @@ const DIRECTORY: &'static str = "Gupax";
const DIRECTORY: &'static str = "Gupax"; const DIRECTORY: &'static str = "Gupax";
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
const DIRECTORY: &'static str = "gupax"; const DIRECTORY: &'static str = "gupax";
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
const DEFAULT_P2POOL_PATH: &'static str = r"P2Pool\p2pool.exe"; pub const DEFAULT_P2POOL_PATH: &'static str = r"P2Pool\p2pool.exe";
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
const DEFAULT_P2POOL_PATH: &'static str = "P2Pool/p2pool"; pub const DEFAULT_P2POOL_PATH: &'static str = "P2Pool/p2pool";
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
const DEFAULT_P2POOL_PATH: &'static str = "p2pool/p2pool"; pub const DEFAULT_P2POOL_PATH: &'static str = "p2pool/p2pool";
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
const DEFAULT_XMRIG_PATH: &'static str = r"XMRig\xmrig.exe"; pub const DEFAULT_XMRIG_PATH: &'static str = r"XMRig\xmrig.exe";
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
const DEFAULT_XMRIG_PATH: &'static str = "XMRig/xmrig"; pub const DEFAULT_XMRIG_PATH: &'static str = "XMRig/xmrig";
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
const DEFAULT_XMRIG_PATH: &'static str = "xmrig/xmrig"; pub const DEFAULT_XMRIG_PATH: &'static str = "xmrig/xmrig";
//---------------------------------------------------------------------------------------------------- Error Enum //---------------------------------------------------------------------------------------------------- Error Enum
#[derive(Debug)] #[derive(Debug)]

View file

@ -45,8 +45,9 @@ use arti_hyper::*;
use log::*; use log::*;
use crate::update::Name::*; use crate::update::Name::*;
use std::path::PathBuf; use std::path::PathBuf;
use crate::state::Version; use crate::state::*;
use crate::constants::GUPAX_VERSION; use crate::constants::GUPAX_VERSION;
use walkdir::WalkDir;
// use tls_api_native_tls::{TlsConnector,TlsConnectorBuilder}; // use tls_api_native_tls::{TlsConnector,TlsConnectorBuilder};
@ -163,11 +164,23 @@ const MSG_TMP: &'static str = "Creating temporary directory";
const MSG_TOR: &'static str = "Creating Tor+HTTPS client"; const MSG_TOR: &'static str = "Creating Tor+HTTPS client";
const MSG_HTTPS: &'static str = "Creating HTTPS client"; const MSG_HTTPS: &'static str = "Creating HTTPS client";
const MSG_METADATA: &'static str = "Fetching package metadata"; const MSG_METADATA: &'static str = "Fetching package metadata";
const MSG_METADATA_RETRY: &'static str = "Fetching package metadata failed, attempt";
const MSG_COMPARE: &'static str = "Compare package versions"; const MSG_COMPARE: &'static str = "Compare package versions";
const MSG_UP_TO_DATE: &'static str = "All packages already up-to-date";
const MSG_DOWNLOAD: &'static str = "Downloading packages"; const MSG_DOWNLOAD: &'static str = "Downloading packages";
const MSG_DOWNLOAD_RETRY: &'static str = "Downloading packages failed, attempt";
const MSG_EXTRACT: &'static str = "Extracting packages";
const MSG_UPGRADE: &'static str = "Upgrading packages";
pub const MSG_SUCCESS: &'static str = "Update successful"; pub const MSG_SUCCESS: &'static str = "Update successful";
pub const MSG_FAILED: &'static str = "Update failed"; pub const MSG_FAILED: &'static str = "Update failed";
const INIT: &'static str = "------------------- Init -------------------";
const METADATA: &'static str = "----------------- Metadata -----------------";
const COMPARE: &'static str = "----------------- Compare ------------------";
const DOWNLOAD: &'static str = "----------------- Download -----------------";
const EXTRACT: &'static str = "----------------- Extract ------------------";
const UPGRADE: &'static str = "----------------- Upgrade ------------------";
// These two are sequential and not async so no need for a constant message. // These two are sequential and not async so no need for a constant message.
// The package in question will be known at runtime, so that will be printed. // The package in question will be known at runtime, so that will be printed.
//const MSG_EXTRACT: &'static str = "Extracting packages"; //const MSG_EXTRACT: &'static str = "Extracting packages";
@ -176,13 +189,14 @@ pub const MSG_FAILED: &'static str = "Update failed";
//---------------------------------------------------------------------------------------------------- Update struct/impl //---------------------------------------------------------------------------------------------------- Update struct/impl
// Contains values needed during update // Contains values needed during update
// Progress bar structure: // Progress bar structure:
// 0% | Create tmp directory and pkg list // 0% | Start
// 10% | Create Tor/HTTPS client // 5% | Create tmp directory, pkg list, fake user-agent
// 15% | Download Metadata (x3) // 5% | Create Tor/HTTPS client
// 15% | Compare Versions (x3) // 30% | Download Metadata (x3)
// 5% | Compare Versions (x3)
// 30% | Download Archive (x3) // 30% | Download Archive (x3)
// 15% | Extract (x3) // 5% | Extract (x3)
// 15% | Upgrade (x3) // 5% | Upgrade (x3)
#[derive(Clone)] #[derive(Clone)]
pub struct Update { pub struct Update {
@ -191,7 +205,7 @@ pub struct Update {
pub path_xmrig: String, // Full path to current xmrig pub path_xmrig: String, // Full path to current xmrig
pub tmp_dir: String, // Full path to temporary directory pub tmp_dir: String, // Full path to temporary directory
pub updating: Arc<Mutex<bool>>, // Is an update in progress? pub updating: Arc<Mutex<bool>>, // Is an update in progress?
pub prog: Arc<Mutex<u8>>, // Holds the 0-100% progress bar number pub prog: Arc<Mutex<f32>>, // Holds the 0-100% progress bar number
pub msg: Arc<Mutex<String>>, // Message to display on [Gupax] tab while updating pub msg: Arc<Mutex<String>>, // Message to display on [Gupax] tab while updating
pub tor: bool, // Is Tor enabled or not? pub tor: bool, // Is Tor enabled or not?
} }
@ -205,24 +219,26 @@ impl Update {
path_xmrig: path_xmrig.display().to_string(), path_xmrig: path_xmrig.display().to_string(),
tmp_dir: "".to_string(), tmp_dir: "".to_string(),
updating: Arc::new(Mutex::new(false)), updating: Arc::new(Mutex::new(false)),
prog: Arc::new(Mutex::new(0)), prog: Arc::new(Mutex::new(0.0)),
msg: Arc::new(Mutex::new(MSG_NONE.to_string())), msg: Arc::new(Mutex::new(MSG_NONE.to_string())),
tor, tor,
} }
} }
// Get a temporary random folder // Get a temporary random folder for package download contents
// for package download contents // This used to use [std::env::temp_dir()] but there were issues
// Will look like [/tmp/gupax_A1m98FN3fa/] on Unix // using [std::fs::rename()] on tmpfs -> disk (Invalid cross-device link (os error 18)).
pub fn get_tmp_dir() -> String { // So, uses the [Gupax] binary directory as a base, something like [/home/hinto/gupax/gupax_SG4xsDdVmr]
pub fn get_tmp_dir() -> Result<String, anyhow::Error> {
let rand_string: String = thread_rng() let rand_string: String = thread_rng()
.sample_iter(&Alphanumeric) .sample_iter(&Alphanumeric)
.take(10) .take(10)
.map(char::from) .map(char::from)
.collect(); .collect();
let tmp = std::env::temp_dir(); let base = crate::get_exe_dir()?;
let tmp = format!("{}{}{}{}", tmp.display(), "/gupax_", rand_string, "/"); let tmp_dir = format!("{}{}{}{}", base, "/gupax_", rand_string, "/");
tmp info!("Update | Temporary directory ... {}", tmp_dir);
Ok(tmp_dir)
} }
// Get a HTTPS client. Uses [Arti] if Tor is enabled. // Get a HTTPS client. Uses [Arti] if Tor is enabled.
@ -240,6 +256,8 @@ impl Update {
// //
pub async fn get_client(tor: bool) -> Result<ClientEnum, anyhow::Error> { pub async fn get_client(tor: bool) -> Result<ClientEnum, anyhow::Error> {
if tor { if tor {
// This one below is non-async, but it doesn't bootstrap immediately.
// let tor = TorClient::builder().bootstrap_behavior(arti_client::BootstrapBehavior::OnDemand).create_unbootstrapped()?;
let tor = TorClient::create_bootstrapped(TorClientConfig::default()).await?; let tor = TorClient::create_bootstrapped(TorClientConfig::default()).await?;
let tls = tls_api_native_tls::TlsConnector::builder()?.build()?; let tls = tls_api_native_tls::TlsConnector::builder()?.build()?;
let connector = ArtiHttpConnector::new(tor, tls); let connector = ArtiHttpConnector::new(tor, tls);
@ -262,31 +280,37 @@ impl Update {
// 5. extract, upgrade // 5. extract, upgrade
#[tokio::main] #[tokio::main]
pub async fn start(update: Arc<Mutex<Self>>, version: Version) -> Result<(), anyhow::Error> { pub async fn start(update: Arc<Mutex<Self>>, mut version: Version) -> Result<(), anyhow::Error> {
// Start //---------------------------------------------------------------------------------------------------- Init
*update.lock().unwrap().updating.lock().unwrap() = true; *update.lock().unwrap().updating.lock().unwrap() = true;
// Set timer
let now = std::time::Instant::now();
// Set progress bar // Set progress bar
*update.lock().unwrap().msg.lock().unwrap() = MSG_START.to_string(); *update.lock().unwrap().msg.lock().unwrap() = MSG_START.to_string();
*update.lock().unwrap().prog.lock().unwrap() = 0; *update.lock().unwrap().prog.lock().unwrap() = 0.0;
info!("Update | Init | {}...", *update.lock().unwrap().msg.lock().unwrap()); info!("Update | {}", INIT);
// Get temporary directory // Get temporary directory
*update.lock().unwrap().msg.lock().unwrap() = MSG_TMP.to_string(); *update.lock().unwrap().msg.lock().unwrap() = MSG_TMP.to_string();
// Cannot lock Arc<Mutex> twice in same line // Cannot lock Arc<Mutex> twice in same line
// so there will be some intermediate variables. // so there will be some intermediate variables.
info!("Update | Init | {} ... {}%", MSG_TMP.to_string(), *update.lock().unwrap().prog.lock().unwrap()); info!("Update | {}", MSG_TMP.to_string());
let tmp_dir = Self::get_tmp_dir(); let tmp_dir = Self::get_tmp_dir()?;
std::fs::create_dir(&tmp_dir)?;
// Make Pkg vector // Make Pkg vector
let prog = update.lock().unwrap().prog.clone(); let prog = update.lock().unwrap().prog.clone();
let msg = update.lock().unwrap().msg.clone(); let msg = update.lock().unwrap().msg.clone();
let vec = vec![ let mut vec = vec![
Pkg::new(Gupax, &tmp_dir, prog.clone(), msg.clone()), Pkg::new(Gupax, &tmp_dir, prog.clone(), msg.clone()),
Pkg::new(P2pool, &tmp_dir, prog.clone(), msg.clone()), Pkg::new(P2pool, &tmp_dir, prog.clone(), msg.clone()),
Pkg::new(Xmrig, &tmp_dir, prog.clone(), msg.clone()), Pkg::new(Xmrig, &tmp_dir, prog.clone(), msg.clone()),
]; ];
let mut handles: Vec<JoinHandle<()>> = vec![];
// Generate fake user-agent
let user_agent = Pkg::get_user_agent();
*update.lock().unwrap().prog.lock().unwrap() = 5.0;
// Create Tor/HTTPS client // Create Tor/HTTPS client
if update.lock().unwrap().tor { if update.lock().unwrap().tor {
@ -295,141 +319,287 @@ impl Update {
*update.lock().unwrap().msg.lock().unwrap() = MSG_HTTPS.to_string() *update.lock().unwrap().msg.lock().unwrap() = MSG_HTTPS.to_string()
} }
let prog = *update.lock().unwrap().prog.lock().unwrap(); let prog = *update.lock().unwrap().prog.lock().unwrap();
info!("Update | Init | {} ... {}%", *update.lock().unwrap().msg.lock().unwrap(), prog); info!("Update | {}", update.lock().unwrap().msg.lock().unwrap());
let client = Self::get_client(update.lock().unwrap().tor).await?; let client = Self::get_client(update.lock().unwrap().tor).await?;
*update.lock().unwrap().prog.lock().unwrap() += 10; *update.lock().unwrap().prog.lock().unwrap() += 5.0;
info!("Update | Init ... OK"); info!("Update | Init ... OK ... {}%", prog);
// Loop for metadata //---------------------------------------------------------------------------------------------------- Metadata
*update.lock().unwrap().msg.lock().unwrap() = MSG_METADATA.to_string(); *update.lock().unwrap().msg.lock().unwrap() = MSG_METADATA.to_string();
info!("Update | Metadata | Starting metadata fetch..."); info!("Update | {}", METADATA);
for pkg in vec.iter() { let mut vec2 = vec![];
// Clone data before sending to async // Loop process:
let name = pkg.name.clone(); // 1. Start all async metadata fetches
let new_ver = Arc::clone(&pkg.new_ver); // 2. Wait for all to finish
let prog = Arc::clone(&pkg.prog); // 3. Iterate over all [pkg.new_ver], check if empty
let client = client.clone(); // 4. If not empty, move [pkg] to different vec
let link = pkg.link_metadata.to_string(); // 5. At end, if original vec isn't empty, that means something failed
// Send to async // 6. Redo loop [3] times, with the original vec (that now only has the failed pkgs)
let handle: JoinHandle<()> = tokio::spawn(async move { //
match client { // This logic was originally in the [Pkg::get_metadata()]
ClientEnum::Tor(t) => Pkg::get_metadata(name, new_ver, prog, t, link).await, // function itself but for some reason, it was getting skipped over,
ClientEnum::Https(h) => Pkg::get_metadata(name, new_ver, prog, h, link).await, // so the [new_ver] check is now here, in the outer scope.
}; for i in 1..=3 {
}); if i > 1 { *update.lock().unwrap().msg.lock().unwrap() = format!("{} [{}/3]", MSG_METADATA_RETRY.to_string(), i); }
handles.push(handle); let mut handles: Vec<JoinHandle<Result<(), anyhow::Error>>> = vec![];
for pkg in vec.iter() {
// Clone data before sending to async
let name = pkg.name.clone();
let new_ver = Arc::clone(&pkg.new_ver);
let client = client.clone();
let link = pkg.link_metadata.to_string();
// Send to async
let handle: JoinHandle<Result<(), anyhow::Error>> = tokio::spawn(async move {
match client {
ClientEnum::Tor(t) => Pkg::get_metadata(name, new_ver, t, link, user_agent).await,
ClientEnum::Https(h) => Pkg::get_metadata(name, new_ver, h, link, user_agent).await,
}
});
handles.push(handle);
}
// Handle await
for handle in handles {
// Two [??] will send the error.
// We don't actually want to return the error here since we
// prefer looping and retrying over immediately erroring out.
match handle.await? {
Err(e) => warn!("Update | {}", e),
_ => (),
}
}
// Check for empty version
let mut indexes = vec![];
for (index, pkg) in vec.iter().enumerate() {
if pkg.new_ver.lock().unwrap().is_empty() {
warn!("Update | {} failed, attempt [{}/3]...", pkg.name, i);
} else {
indexes.push(index);
vec2.push(pkg.clone());
*update.lock().unwrap().prog.lock().unwrap() += 10.0;
info!("Update | {} {} ... OK", pkg.name, pkg.new_ver.lock().unwrap());
}
}
// Order indexes from biggest to smallest
// This prevents shifting the whole vector and causing panics.
indexes.sort();
indexes.reverse();
for index in indexes {
vec.remove(index);
}
if vec.is_empty() { break }
} }
// Unwrap async if vec.is_empty() {
for handle in handles { info!("Update | Metadata ... OK ... {}%", update.lock().unwrap().prog.lock().unwrap());
handle.await?; } else {
error!("Update | Metadata ... FAIL");
return Err(anyhow!("Metadata fetch failed", ))
} }
info!("Update | Metadata ... OK");
// Loop for version comparison //---------------------------------------------------------------------------------------------------- Compare
*update.lock().unwrap().msg.lock().unwrap() = MSG_COMPARE.to_string(); *update.lock().unwrap().msg.lock().unwrap() = MSG_COMPARE.to_string();
info!("Update | Compare | Starting version comparison..."); info!("Update | {}", COMPARE);
let prog = update.lock().unwrap().prog.clone(); let prog = update.lock().unwrap().prog.clone();
let msg = update.lock().unwrap().msg.clone(); let msg = update.lock().unwrap().msg.clone();
let mut vec2 = vec![]; let mut vec3 = vec![];
for pkg in vec.iter() { let mut new_pkgs = vec![];
for pkg in vec2.iter() {
let new_ver = pkg.new_ver.lock().unwrap().to_owned(); let new_ver = pkg.new_ver.lock().unwrap().to_owned();
match pkg.name { match pkg.name {
Gupax => { Gupax => {
if new_ver == GUPAX_VERSION { if new_ver == GUPAX_VERSION {
info!("Update | Compare | {} {} == {} ... SKIPPING", pkg.name, pkg.new_ver.lock().unwrap(), GUPAX_VERSION); info!("Update | {} {} == {} ... SKIPPING", pkg.name, pkg.new_ver.lock().unwrap(), GUPAX_VERSION);
} else { } else {
info!("Update | Compare | {} {} != {} ... ADDING", pkg.name, pkg.new_ver.lock().unwrap(), GUPAX_VERSION); info!("Update | {} {} != {} ... ADDING", pkg.name, pkg.new_ver.lock().unwrap(), GUPAX_VERSION);
vec2.push(pkg); new_pkgs.push(format!("\nGupax {}{}", GUPAX_VERSION, pkg.new_ver.lock().unwrap()));
vec3.push(pkg);
} }
} }
P2pool => { P2pool => {
if new_ver == version.p2pool { if new_ver == version.p2pool {
info!("Update | Compare | {} {} == {} ... SKIPPING", pkg.name, pkg.new_ver.lock().unwrap(), version.p2pool); info!("Update | {} {} == {} ... SKIPPING", pkg.name, pkg.new_ver.lock().unwrap(), version.p2pool);
} else { } else {
info!("Update | Compare | {} {} != {} ... ADDING", pkg.name, pkg.new_ver.lock().unwrap(), version.p2pool); info!("Update | {} {} != {} ... ADDING", pkg.name, pkg.new_ver.lock().unwrap(), version.p2pool);
vec2.push(pkg); new_pkgs.push(format!("\nP2Pool {}{}", version.p2pool, pkg.new_ver.lock().unwrap()));
vec3.push(pkg);
} }
} }
Xmrig => { Xmrig => {
if new_ver == GUPAX_VERSION { if new_ver == GUPAX_VERSION {
info!("Update | Compare | {} {} == {} ... SKIPPING", pkg.name, pkg.new_ver.lock().unwrap(), version.xmrig); info!("Update | {} {} == {} ... SKIPPING", pkg.name, pkg.new_ver.lock().unwrap(), version.xmrig);
} else { } else {
info!("Update | Compare | {} {} != {} ... ADDING", pkg.name, pkg.new_ver.lock().unwrap(), version.xmrig); info!("Update | {} {} != {} ... ADDING", pkg.name, pkg.new_ver.lock().unwrap(), version.xmrig);
vec2.push(pkg); new_pkgs.push(format!("\nXMRig {}{}", version.xmrig, pkg.new_ver.lock().unwrap()));
vec3.push(pkg);
} }
} }
} }
*update.lock().unwrap().prog.lock().unwrap() += 5;
} }
info!("Update | Compare ... OK"); *update.lock().unwrap().prog.lock().unwrap() += 5.0;
info!("Update | Compare ... OK ... {}%", update.lock().unwrap().prog.lock().unwrap());
// Return if 0 (all packages up-to-date)
// Get amount of packages to divide up the percentage increases
let pkg_amount = vec3.len() as f32;
if pkg_amount == 0.0 {
info!("Update | All packages up-to-date ... RETURNING");
*update.lock().unwrap().prog.lock().unwrap() = 100.0;
*update.lock().unwrap().msg.lock().unwrap() = MSG_UP_TO_DATE.to_string();
return Ok(())
}
let new_pkgs: String = new_pkgs.concat();
// Loop for download //---------------------------------------------------------------------------------------------------- Download
let mut handles: Vec<JoinHandle<()>> = vec![]; *update.lock().unwrap().msg.lock().unwrap() = format!("{}{}", MSG_DOWNLOAD, new_pkgs);
*update.lock().unwrap().msg.lock().unwrap() = MSG_DOWNLOAD.to_string(); info!("Update | {}", DOWNLOAD);
info!("Update | Download | Starting download..."); let mut vec4 = vec![];
for pkg in vec2.iter() { for i in 1..=3 {
// Clone data before async if i > 1 { *update.lock().unwrap().msg.lock().unwrap() = format!("{} [{}/3]{}", MSG_DOWNLOAD_RETRY.to_string(), i, new_pkgs); }
let name = pkg.name.clone(); let mut handles: Vec<JoinHandle<Result<(), anyhow::Error>>> = vec![];
let bytes = Arc::clone(&pkg.bytes); for pkg in vec3.iter() {
let prog = Arc::clone(&pkg.prog); // Clone data before async
let client = client.clone(); let name = pkg.name.clone();
let version = pkg.new_ver.lock().unwrap(); let bytes = Arc::clone(&pkg.bytes);
let link; let client = client.clone();
// Download link = PREFIX + Version (found at runtime) + SUFFIX + Version + EXT let version = pkg.new_ver.lock().unwrap();
// Example: https://github.com/hinto-janaiyo/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-standalone-x64 let link;
// XMRig doesn't have a [v], so slice it out // Download link = PREFIX + Version (found at runtime) + SUFFIX + Version + EXT
if pkg.name == Name::Xmrig { // Example: https://github.com/hinto-janaiyo/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-standalone-x64
link = pkg.link_prefix.to_string() + &version + &pkg.link_suffix + &version[1..] + &pkg.link_extension; // XMRig doesn't have a [v], so slice it out
// TODO FIX ME if pkg.name == Name::Xmrig {
// This is temp link for v0.0.1 of [Gupax] link = pkg.link_prefix.to_string() + &version + &pkg.link_suffix + &version[1..] + &pkg.link_extension;
} else if pkg.name == Name::Gupax { // TODO FIX ME
link = "https://github.com/hinto-janaiyo/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-x64".to_string() // This is temp link for v0.0.1 of [Gupax]
} else { } else if pkg.name == Name::Gupax {
link = pkg.link_prefix.to_string() + &version + &pkg.link_suffix + &version + &pkg.link_extension; link = "https://github.com/hinto-janaiyo/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-x64".to_string()
} else {
link = pkg.link_prefix.to_string() + &version + &pkg.link_suffix + &version + &pkg.link_extension;
}
info!("Update | {} ... {}", pkg.name, link);
let handle: JoinHandle<Result<(), anyhow::Error>> = tokio::spawn(async move {
match client {
ClientEnum::Tor(t) => Pkg::get_bytes(name, bytes, t, link, user_agent).await,
ClientEnum::Https(h) => Pkg::get_bytes(name, bytes, h, link, user_agent).await,
}
});
handles.push(handle);
} }
info!("Update | Download | {} ... {}", pkg.name, link); // Handle await
let handle: JoinHandle<()> = tokio::spawn(async move { for handle in handles {
match client { match handle.await? {
ClientEnum::Tor(t) => Pkg::get_bytes(name, bytes, prog, t, link).await, Err(e) => warn!("Update | {}", e),
ClientEnum::Https(h) => Pkg::get_bytes(name, bytes, prog, h, link).await, _ => (),
}; }
}); }
handles.push(handle); // Check for empty bytes
let mut indexes = vec![];
for (index, pkg) in vec3.iter().enumerate() {
if pkg.bytes.lock().unwrap().is_empty() {
warn!("Update | {} failed, attempt [{}/3]...", pkg.name, i);
} else {
indexes.push(index);
vec4.push(pkg.clone());
*update.lock().unwrap().prog.lock().unwrap() += (30.0 / pkg_amount).round();
info!("Update | {} ... OK", pkg.name);
}
}
// Order indexes from biggest to smallest
// This prevents shifting the whole vector and causing panics.
indexes.sort();
indexes.reverse();
for index in indexes {
vec3.remove(index);
}
if vec3.is_empty() { break }
} }
for handle in handles { if vec3.is_empty() {
handle.await?; info!("Update | Download ... OK ... {}%", *update.lock().unwrap().prog.lock().unwrap());
} else {
error!("Update | Download ... FAIL");
return Err(anyhow!("Download failed", ))
} }
info!("Update | Download ... OK");
// Write to disk, extract //---------------------------------------------------------------------------------------------------- Extract
let tmp = Self::get_tmp_dir(); *update.lock().unwrap().msg.lock().unwrap() = format!("{}{}", MSG_EXTRACT, new_pkgs);
// std::fs::OpenOptions::new().mode(0o700).create(true).write(true).open(&tmp); info!("Update | {}", EXTRACT);
std::fs::create_dir(&tmp)?; for pkg in vec4.iter() {
info!("Update | Extract | Starting extraction..."); let tmp = tmp_dir.to_owned() + &pkg.name.to_string();
for pkg in vec2.iter() {
let tmp = tmp.to_string() + &pkg.name.to_string();
if pkg.name == Name::Gupax { if pkg.name == Name::Gupax {
#[cfg(target_family = "unix")]
std::fs::OpenOptions::new().create(true).write(true).mode(0o770).open(&tmp)?;
std::fs::write(tmp, pkg.bytes.lock().unwrap().as_ref())?; std::fs::write(tmp, pkg.bytes.lock().unwrap().as_ref())?;
} else { } else {
#[cfg(target_family = "unix")]
tar::Archive::new(flate2::read::GzDecoder::new(pkg.bytes.lock().unwrap().as_ref())).unpack(tmp)?; tar::Archive::new(flate2::read::GzDecoder::new(pkg.bytes.lock().unwrap().as_ref())).unpack(tmp)?;
} }
*pkg.prog.lock().unwrap() += 5; *update.lock().unwrap().prog.lock().unwrap() += (5.0 / pkg_amount).round();
info!("Update | Extract | {} ... {}%", pkg.name, pkg.prog.lock().unwrap()); info!("Update | {} ... OK", pkg.name);
} }
info!("Update | Extract ... OK"); info!("Update | Extract ... OK ... {}%", *update.lock().unwrap().prog.lock().unwrap());
//---------------------------------------------------------------------------------------------------- Upgrade
// 0. Walk directories
// 1. If basename matches known binary name, start
// 2. Rename tmp path into current path
// 3a. Update [State/Version]
// 3b. Gupax version is builtin to binary, so no state change needed
*update.lock().unwrap().msg.lock().unwrap() = format!("{}{}", MSG_UPGRADE, new_pkgs);
info!("Update | {}", UPGRADE);
// Just in case, create all folders
for entry in WalkDir::new(tmp_dir.clone()) {
let entry = entry?.clone();
let basename = entry.file_name().to_str().ok_or(anyhow::Error::msg("WalkDir basename failed"))?;
match basename {
"Gupax" => {
let path = update.lock().unwrap().path_gupax.clone();
info!("Update | Moving [{}] -> [{}]", entry.path().display(), path);
std::fs::rename(entry.path(), path)?;
*update.lock().unwrap().prog.lock().unwrap() += (5.0 / pkg_amount).round();
},
"p2pool" => {
let path = update.lock().unwrap().path_p2pool.clone();
let path = std::path::Path::new(&path);
info!("Update | Moving [{}] -> [{}]", entry.path().display(), path.display());
std::fs::create_dir_all(path.parent().ok_or(anyhow::Error::msg("P2Pool path failed"))?)?;
std::fs::rename(entry.path(), path)?;
version.p2pool = Pkg::get_new_pkg_version(P2pool, &vec4)?;
*update.lock().unwrap().prog.lock().unwrap() += (5.0 / pkg_amount).round();
},
"xmrig" => {
let path = update.lock().unwrap().path_xmrig.clone();
let path = std::path::Path::new(&path);
info!("Update | Moving [{}] -> [{}]", entry.path().display(), path.display());
std::fs::create_dir_all(path.parent().ok_or(anyhow::Error::msg("XMRig path failed"))?)?;
std::fs::rename(entry.path(), path)?;
version.xmrig = Pkg::get_new_pkg_version(Xmrig, &vec4)?;
*update.lock().unwrap().prog.lock().unwrap() += (5.0 / pkg_amount).round();
},
_ => (),
}
}
info!("Update | Removing temporary directory ... {}", tmp_dir);
std::fs::remove_dir_all(&tmp_dir)?;
let seconds = now.elapsed().as_secs();
info!("Update ... Seconds elapsed: [{}s]", seconds);
info!("Update ... OK ... 100%");
match seconds {
0 => *update.lock().unwrap().msg.lock().unwrap() = format!("{}! Took 0 seconds... Do you have 10Gbit internet or something...?!{}", MSG_SUCCESS, new_pkgs),
1 => *update.lock().unwrap().msg.lock().unwrap() = format!("{}! Took 1 second... Wow!{}", MSG_SUCCESS, new_pkgs),
_ => *update.lock().unwrap().msg.lock().unwrap() = format!("{}! Took {} seconds.{}", MSG_SUCCESS, seconds, new_pkgs),
}
*update.lock().unwrap().prog.lock().unwrap() = 100.0;
*update.lock().unwrap().updating.lock().unwrap() = false; *update.lock().unwrap().updating.lock().unwrap() = false;
Ok(()) Ok(())
} }
} }
#[derive(Debug,Clone)] #[derive(Debug,Clone)]
enum ClientEnum { pub enum ClientEnum {
Tor(Client<ArtiHttpConnector<tor_rtcompat::PreferredRuntime, tls_api_native_tls::TlsConnector>>), Tor(Client<ArtiHttpConnector<tor_rtcompat::PreferredRuntime, tls_api_native_tls::TlsConnector>>),
Https(Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>), Https(Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>),
} }
//---------------------------------------------------------------------------------------------------- Pkg struct/impl //---------------------------------------------------------------------------------------------------- Pkg struct/impl
#[derive(Debug)] #[derive(Debug,Clone)]
pub struct Pkg { pub struct Pkg {
name: Name, name: Name,
link_metadata: &'static str, link_metadata: &'static str,
@ -437,7 +607,7 @@ pub struct Pkg {
link_suffix: &'static str, link_suffix: &'static str,
link_extension: &'static str, link_extension: &'static str,
tmp_dir: String, tmp_dir: String,
prog: Arc<Mutex<u8>>, prog: Arc<Mutex<f32>>,
msg: Arc<Mutex<String>>, msg: Arc<Mutex<String>>,
bytes: Arc<Mutex<hyper::body::Bytes>>, bytes: Arc<Mutex<hyper::body::Bytes>>,
old_ver: String, old_ver: String,
@ -445,7 +615,7 @@ pub struct Pkg {
} }
impl Pkg { impl Pkg {
pub fn new(name: Name, tmp_dir: &String, prog: Arc<Mutex<u8>>, msg: Arc<Mutex<String>>) -> Self { pub fn new(name: Name, tmp_dir: &String, prog: Arc<Mutex<f32>>, msg: Arc<Mutex<String>>) -> Self {
let link_metadata = match name { let link_metadata = match name {
Gupax => GUPAX_METADATA, Gupax => GUPAX_METADATA,
P2pool => P2POOL_METADATA, P2pool => P2POOL_METADATA,
@ -481,9 +651,17 @@ impl Pkg {
} }
} }
//---------------------------------------------------------------------------------------------------- Pkg functions
// Generate fake [User-Agent] HTTP header
pub fn get_user_agent() -> &'static str {
let rand = thread_rng().gen_range(0..50);
let user_agent = FAKE_USER_AGENT[rand];
info!("Update | Randomly selecting User-Agent ({}/50) ... {}", rand, user_agent);
user_agent
}
// Generate GET request based off input URI + fake user agent // Generate GET request based off input URI + fake user agent
pub fn get_request(link: String) -> Result<Request<Body>, anyhow::Error> { pub fn get_request(link: String, user_agent: &'static str) -> Result<Request<Body>, anyhow::Error> {
let user_agent = FAKE_USER_AGENT[thread_rng().gen_range(0..50)];
let request = Request::builder() let request = Request::builder()
.method("GET") .method("GET")
.uri(link) .uri(link)
@ -494,50 +672,41 @@ impl Pkg {
// Get metadata using [Generic hyper::client<C>] & [Request] // Get metadata using [Generic hyper::client<C>] & [Request]
// and change [version, prog] under an Arc<Mutex> // and change [version, prog] under an Arc<Mutex>
pub async fn get_metadata<C>(name: Name, new_ver: Arc<Mutex<String>>, prog: Arc<Mutex<u8>>, client: Client<C>, link: String) -> Result<(), Error> pub async fn get_metadata<C>(name: Name, new_ver: Arc<Mutex<String>>, client: Client<C>, link: String, user_agent: &'static str) -> Result<(), Error>
where C: hyper::client::connect::Connect + Clone + Send + Sync + 'static, { where C: hyper::client::connect::Connect + Clone + Send + Sync + 'static, {
// Retry [3] times if version is not [v*] let request = Pkg::get_request(link.clone(), user_agent)?;
for i in 0..3 { let mut response = client.request(request).await?;
let request = Pkg::get_request(link.clone())?; let body = hyper::body::to_bytes(response.body_mut()).await?;
let mut response = client.request(request).await?; let body: TagName = serde_json::from_slice(&body)?;
let body = hyper::body::to_bytes(response.body_mut()).await?; *new_ver.lock().unwrap() = body.tag_name.clone();
let body: TagName = serde_json::from_slice(&body)?; Ok(())
if body.tag_name.starts_with('v') {
*new_ver.lock().unwrap() = body.tag_name.clone();
*prog.lock().unwrap() += 5;
info!("Update | Metadata | {} {} ... {}%", name, body.tag_name, *prog.lock().unwrap());
return Ok(())
}
warn!("Update | Metadata | {} failed, retry [{}/3]...", name, i);
}
error!("Update | Metadata | {} failed", name);
Err(anyhow!("{} | Metadata fetch failed", name))
} }
// Takes a [Request], fills the appropriate [Pkg] // Takes a [Request], fills the appropriate [Pkg]
// [bytes] field with the [Archive/Standalone] // [bytes] field with the [Archive/Standalone]
pub async fn get_bytes<C>(name: Name, bytes: Arc<Mutex<bytes::Bytes>>, prog: Arc<Mutex<u8>>, client: Client<C>, link: String) -> Result<(), anyhow::Error> pub async fn get_bytes<C>(name: Name, bytes: Arc<Mutex<bytes::Bytes>>, client: Client<C>, link: String, user_agent: &'static str) -> Result<(), anyhow::Error>
where C: hyper::client::connect::Connect + Clone + Send + Sync + 'static, { where C: hyper::client::connect::Connect + Clone + Send + Sync + 'static, {
// GitHub sends a 302 redirect, so we must follow // GitHub sends a 302 redirect, so we must follow
// the [Location] header... only if Reqwest had custom // the [Location] header... only if Reqwest had custom
// connectors so I didn't have to manually do this... // connectors so I didn't have to manually do this...
// Also, retry [3] times if [Bytes] == empty let request = Self::get_request(link.clone(), user_agent)?;
for i in 0..3 { let response = client.request(request).await?;
let request = Pkg::get_request(link.clone())?; let request = Self::get_request(response.headers().get(hyper::header::LOCATION).unwrap().to_str()?.to_string(), user_agent)?;
let response = client.request(request).await?; let response = client.request(request).await?;
let request = Self::get_request(response.headers().get(hyper::header::LOCATION).unwrap().to_str()?.to_string())?; let body = hyper::body::to_bytes(response.into_body()).await?;
let response = client.request(request).await?; *bytes.lock().unwrap() = body;
let body = hyper::body::to_bytes(response.into_body()).await?; Ok(())
if ! body.is_empty() { }
*bytes.lock().unwrap() = body;
*prog.lock().unwrap() += 10; // Take in a [Name] and [Vec] of [Pkg]s, find
info!("Update | Download | {} ... {}%", name, *prog.lock().unwrap()); // that [Name]'s corresponding new version.
return Ok(()) fn get_new_pkg_version(name: Name, vec: &Vec<&Pkg>) -> Result<String, Error> {
for pkg in vec.iter() {
if pkg.name == name {
return Ok(pkg.new_ver.lock().unwrap().to_string())
} }
warn!("Update | Metadata | {} download bytes are empty, retry [{}/3]...", name, i);
} }
error!("Update | Download | {} failed", name); Err(anyhow::Error::msg("Couldn't find new_pkg_version"))
Err(anyhow!("{} | Download failed", name))
} }
} }