feat: new update logic

- feat: update xmrig and p2pool only if bundle button is checked
- feat: default value for bundle button depends of bundle or standalone
version, with feature bundle.
- feat: update CI to use the feature bundle and produce different Gupaxx
binairies for standalone and bundle version.
- feat: ask user to restart Gupaxx after updating.
- feat: prevent user to update twice without restart
- feat: use bundled XMRig and P2Pool of Gupaxx instead of upstream
version.
- feat: update test
- feat: update DIFFERENCES and ARCHITECTURE to reflect updates
differences
- fix: temporary directories of updates not deleted introduced in fork
- fix: #4
This commit is contained in:
Cyrix126 2024-04-21 14:59:04 +02:00
parent 697fd99124
commit 6b27ff499f
12 changed files with 317 additions and 689 deletions

View file

@ -54,12 +54,22 @@ jobs:
cargo bundle --release --target aarch64-apple-darwin cargo bundle --release --target aarch64-apple-darwin
mv target/release/bundle/osx/Gupaxx.app Gupaxx-macos-x64.app mv target/release/bundle/osx/Gupaxx.app Gupaxx-macos-x64.app
mv target/aarch64-apple-darwin/release/bundle/osx/Gupaxx.app Gupaxx-macos-arm64.app mv target/aarch64-apple-darwin/release/bundle/osx/Gupaxx.app Gupaxx-macos-arm64.app
tar -cf macos.tar Gupaxx-macos-arm64.app Gupaxx-macos-x64.app cargo bundle --release --features=bundle
cargo bundle --release --target aarch64-apple-darwin --features=bundle
mv target/release/bundle/osx/Gupaxx.app Gupaxx-macos-x64.app_b
mv target/aarch64-apple-darwin/release/bundle/osx/Gupaxx.app Gupaxx-macos-arm64.app_b
tar -cf macos.tar Gupaxx-macos-arm64.app Gupaxx-macos-x64.app Gupaxx-macos-arm64_b.app Gupaxx-macos-x64_b.app
elif [ "$RUNNER_OS" == "Linux" ]; then elif [ "$RUNNER_OS" == "Linux" ]; then
cargo build --release --target x86_64-unknown-linux-gnu cargo build --release --target x86_64-unknown-linux-gnu
mv target/x86_64-unknown-linux-gnu/release/gupaxx . mv target/x86_64-unknown-linux-gnu/release/gupaxx .
tar -cf linux.tar gupaxx cargo build --release --target x86_64-unknown-linux-gnu --features=bundle
mv target/x86_64-unknown-linux-gnu/release/gupaxx gupaxx_b
tar -cf linux.tar gupaxx gupaxx_b
cargo build --release --target x86_64-pc-windows-gnu cargo build --release --target x86_64-pc-windows-gnu
mv target/x86_64-pc-windows-gnu/release/gupaxx.exe .
cargo build --release --target x86_64-pc-windows-gnu --features=bundle
mv target/x86_64-pc-windows-gnu/release/gupaxx.exe gupaxx_b.exe
tar -cf windows.tar gupaxx.exe gupaxx_b.exe
fi fi
shell: bash shell: bash
@ -68,7 +78,7 @@ jobs:
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: windows name: windows
path: target/x86_64-pc-windows-gnu/release/gupaxx.exe path: windows.tar
- name: Archive - name: Archive
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}

View file

@ -29,3 +29,14 @@ Status of process for Xmrig use for some information an image of data when the p
The node of xmrig in upstream can not change without a restart of the process.In this fork, the node used by xmrig needs to be updated without restart (using the config HTTP API of xmrig). The node of xmrig in upstream can not change without a restart of the process.In this fork, the node used by xmrig needs to be updated without restart (using the config HTTP API of xmrig).
So Gupaxx need to refresh the value of status tab submenu process for xmrig where before the values could not change without a restart of the process. So Gupaxx need to refresh the value of status tab submenu process for xmrig where before the values could not change without a restart of the process.
The field node from ImgXmrig needs to be moved to PubXvbApi. This value must be updated by xmrig at start and by XvB process at runtime. The field node from ImgXmrig needs to be moved to PubXvbApi. This value must be updated by xmrig at start and by XvB process at runtime.
## Updates
A new option in Gupaxx tab advanced will enable bundled updates.
The binary included of gupaxx will have default value for bundled updates depending if it is coming from standalone or bundle release.
Updates from Gupaxx will do the following differently from upstream:
- check if using bundled or standalone with state. Update only Gupaxx binary if the latter or xmrig and p2pool from bundle version if the former.
- prevent user to run updates twice without restart.
- ask the user to restart Gupaxx
- do not verify if file p2pool or xmrig exist. (so that the update can create them).

1
Cargo.lock generated
View file

@ -2075,6 +2075,7 @@ dependencies = [
"benri", "benri",
"bounded-vec-deque", "bounded-vec-deque",
"bytes", "bytes",
"cfg-if",
"chrono", "chrono",
"derive_more", "derive_more",
"dirs", "dirs",

View file

@ -25,6 +25,7 @@ incremental = true
[features] [features]
default = [] default = []
bundle = []
distro = [] distro = []
[dependencies] [dependencies]
@ -73,6 +74,7 @@ readable = "0.16"
chrono = {version="0.4.37", default-features=false, features=["clock", "std"]} chrono = {version="0.4.37", default-features=false, features=["clock", "std"]}
enclose = "1.1.8" enclose = "1.1.8"
bounded-vec-deque = {version="0.1.1", default-features=false} bounded-vec-deque = {version="0.1.1", default-features=false}
cfg-if = "1.0"
# Unix dependencies # Unix dependencies
[target.'cfg(unix)'.dependencies] [target.'cfg(unix)'.dependencies]
eframe = { version = "0.27.2", features = ["wgpu"] } eframe = { version = "0.27.2", features = ["wgpu"] }

View file

@ -23,6 +23,10 @@ The rendering of Tabs has been modified so that the minimum stated size of the w
The rendering of the benchmark table and of console outputs were calculating every line at the same time. Now it only renders what you see. It is a significant improvement for your processor, and you can feel the difference if it is not very powerful. The rendering of the benchmark table and of console outputs were calculating every line at the same time. Now it only renders what you see. It is a significant improvement for your processor, and you can feel the difference if it is not very powerful.
Updates from Gupaxx does not retrieve xmrig and p2pool from upstream anymore, but use versions in the bundled version. This modification prevent bad surprise (see #3).
It also allows advanced users to use your their own version of p2pool and xmrig.The standalone version of Gupaxx will not replace them.
## Security ## Security
With the upgrade of dependencies, cargo audit show no warnings instead of 5 vulnerabilities and 4 allowed warnings for Gupax. With the upgrade of dependencies, cargo audit show no warnings instead of 5 vulnerabilities and 4 allowed warnings for Gupax.

View file

@ -49,7 +49,7 @@ impl Gupax {
ui.add_sized([width, button], Button::new("Updates are disabled")) ui.add_sized([width, button], Button::new("Updates are disabled"))
.on_disabled_hover_text(DISTRO_NO_UPDATE); .on_disabled_hover_text(DISTRO_NO_UPDATE);
#[cfg(not(feature = "distro"))] #[cfg(not(feature = "distro"))]
ui.set_enabled(!updating); ui.set_enabled(!updating && *lock!(restart) == Restart::No);
#[cfg(not(feature = "distro"))] #[cfg(not(feature = "distro"))]
if ui if ui
.add_sized([width, button], Button::new("Check for updates")) .add_sized([width, button], Button::new("Check for updates"))
@ -78,7 +78,7 @@ impl Gupax {
debug!("Gupaxx Tab | Rendering bool buttons"); debug!("Gupaxx Tab | Rendering bool buttons");
ui.horizontal(|ui| { ui.horizontal(|ui| {
ui.group(|ui| { ui.group(|ui| {
let width = (size.x - SPACE * 12.0) / 6.0; let width = (size.x - SPACE * 15.0) / 7.0;
let height = if self.simple { let height = if self.simple {
size.y / 10.0 size.y / 10.0
} else { } else {
@ -89,6 +89,9 @@ impl Gupax {
ui.add_sized(size, Checkbox::new(&mut self.auto_update, "Auto-Update")) ui.add_sized(size, Checkbox::new(&mut self.auto_update, "Auto-Update"))
.on_hover_text(GUPAX_AUTO_UPDATE); .on_hover_text(GUPAX_AUTO_UPDATE);
ui.separator(); ui.separator();
ui.add_sized(size, Checkbox::new(&mut self.bundled, "Bundle"))
.on_hover_text(GUPAX_BUNDLED_UPDATE);
ui.separator();
ui.add_sized(size, Checkbox::new(&mut self.auto_p2pool, "Auto-P2Pool")) ui.add_sized(size, Checkbox::new(&mut self.auto_p2pool, "Auto-P2Pool"))
.on_hover_text(GUPAX_AUTO_P2POOL); .on_hover_text(GUPAX_AUTO_P2POOL);
ui.separator(); ui.separator();
@ -100,13 +103,13 @@ impl Gupax {
ui.separator(); ui.separator();
ui.add_sized( ui.add_sized(
size, size,
Checkbox::new(&mut self.ask_before_quit, "Ask before quit"), Checkbox::new(&mut self.ask_before_quit, "Confirm quit"),
) )
.on_hover_text(GUPAX_ASK_BEFORE_QUIT); .on_hover_text(GUPAX_ASK_BEFORE_QUIT);
ui.separator(); ui.separator();
ui.add_sized( ui.add_sized(
size, size,
Checkbox::new(&mut self.save_before_quit, "Save before quit"), Checkbox::new(&mut self.save_before_quit, "Save on quit"),
) )
.on_hover_text(GUPAX_SAVE_BEFORE_QUIT); .on_hover_text(GUPAX_SAVE_BEFORE_QUIT);
}); });

View file

@ -15,13 +15,12 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::components::update::Pkg; use crate::components::update::get_user_agent;
use crate::{constants::*, macros::*}; use crate::{constants::*, macros::*};
use egui::Color32; use egui::Color32;
use log::*; use log::*;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use reqwest::{Client, RequestBuilder}; use reqwest::{Client, RequestBuilder};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@ -384,7 +383,7 @@ impl Ping {
let client = Client::new(); let client = Client::new();
// Random User Agent // Random User Agent
let rand_user_agent = Pkg::get_user_agent(); let rand_user_agent = get_user_agent();
// Handle vector // Handle vector
let mut handles = Vec::with_capacity(REMOTE_NODE_LENGTH); let mut handles = Vec::with_capacity(REMOTE_NODE_LENGTH);
let node_vec = arc_mut!(Vec::with_capacity(REMOTE_NODE_LENGTH)); let node_vec = arc_mut!(Vec::with_capacity(REMOTE_NODE_LENGTH));
@ -486,8 +485,7 @@ mod test {
use crate::components::node::{ use crate::components::node::{
format_ip, REMOTE_NODES, REMOTE_NODE_LENGTH, REMOTE_NODE_MAX_CHARS, format_ip, REMOTE_NODES, REMOTE_NODE_LENGTH, REMOTE_NODE_MAX_CHARS,
}; };
use crate::components::update::Pkg; use crate::components::update::get_user_agent;
#[test] #[test]
fn validate_node_ips() { fn validate_node_ips() {
for (ip, location, rpc, zmq) in REMOTE_NODES { for (ip, location, rpc, zmq) in REMOTE_NODES {
@ -524,7 +522,7 @@ mod test {
let client = Client::new(); let client = Client::new();
// Random User Agent // Random User Agent
let rand_user_agent = Pkg::get_user_agent(); let rand_user_agent = get_user_agent();
// Only fail this test if >50% of nodes fail. // Only fail this test if >50% of nodes fail.
const HALF_REMOTE_NODES: usize = REMOTE_NODE_LENGTH / 2; const HALF_REMOTE_NODES: usize = REMOTE_NODE_LENGTH / 2;

View file

@ -24,20 +24,15 @@
// b. auto-update at startup // b. auto-update at startup
//---------------------------------------------------------------------------------------------------- Imports //---------------------------------------------------------------------------------------------------- Imports
use crate::components::update::Name::*;
use crate::{ use crate::{
app::Restart, app::Restart,
constants::GUPAX_VERSION, constants::GUPAX_VERSION,
disk::{ disk::{state::State, *},
state::{State, Version},
*,
},
macros::*, macros::*,
miscs::get_exe_dir, miscs::get_exe_dir,
utils::errors::{ErrorButtons, ErrorFerris, ErrorState}, utils::errors::{ErrorButtons, ErrorFerris, ErrorState},
}; };
use anyhow::{anyhow, Error}; use anyhow::{anyhow, Error};
use bytes::Bytes;
use log::*; use log::*;
use rand::distributions::Alphanumeric; use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@ -46,20 +41,8 @@ use reqwest::{Client, RequestBuilder};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use tokio::task::JoinHandle;
use walkdir::WalkDir; use walkdir::WalkDir;
// On apple-darwin targets there is an issue with the native and rustls
// tls implementation so this makes it fall back to the openssl variant.
//
// https://gitlab.torproject.org/tpo/core/arti/-/issues/715
// #[cfg(not(target_os = "macos"))]
// use tls_api_native_tls::TlsConnector;
// #[cfg(target_os = "macos")]
// use tls_api_openssl::TlsConnector;
// use tls_api::{TlsConnector as TlsConnectorTrait, TlsConnectorBuilder};
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
use zip::ZipArchive; use zip::ZipArchive;
//#[cfg(target_family = "unix")] //#[cfg(target_family = "unix")]
@ -68,114 +51,41 @@ use zip::ZipArchive;
//---------------------------------------------------------------------------------------------------- Constants //---------------------------------------------------------------------------------------------------- Constants
// Package naming schemes: // Package naming schemes:
// gupax | gupax-vX.X.X-(windows|macos|linux)-(x64|arm64)-(standalone|bundle).(zip|tar.gz) // gupax | gupax-vX.X.X-(windows|macos|linux)-(x64|arm64)-(standalone|bundle).(zip|tar.gz)
// p2pool | p2pool-vX.X.X-(windows|macos|linux)-(x64|aarch64).(zip|tar.gz)
// xmrig | xmrig-X.X.X-(msvc-win64|macos-x64|macos-arm64|linux-static-x64).(zip|tar.gz)
//
// Download link = PREFIX + Version (found at runtime) + SUFFIX + Version + EXT // Download link = PREFIX + Version (found at runtime) + SUFFIX + Version + EXT
// Example: https://github.com/hinto-janai/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-standalone-x64.tar.gz // Example: https://github.com/hinto-janai/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-standalone-x64.tar.gz
// //
// Exceptions (there are always exceptions...):
// - XMRig doesn't have a [v], so it is [xmrig-6.18.0-...]
// - XMRig separates the hash and signature
// - P2Pool hashes are in UPPERCASE
const GUPAX_METADATA: &str = "https://api.github.com/repos/Cyrix126/gupaxx/releases/latest"; const GUPAX_METADATA: &str = "https://api.github.com/repos/Cyrix126/gupaxx/releases/latest";
const P2POOL_METADATA: &str = "https://api.github.com/repos/SChernykh/p2pool/releases/latest";
const XMRIG_METADATA: &str = "https://api.github.com/repos/xmrig/xmrig/releases/latest";
const GUPAX_PREFIX: &str = "https://github.com/Cyrix126/gupaxx/releases/download/"; cfg_if::cfg_if! {
const P2POOL_PREFIX: &str = "https://github.com/SChernykh/p2pool/releases/download/"; if #[cfg(target_family = "unix")] {
const XMRIG_PREFIX: &str = "https://github.com/xmrig/xmrig/releases/download/"; pub(super) const GUPAX_BINARY: &str = "gupaxx";
pub(super) const P2POOL_BINARY: &str = "p2pool";
const GUPAX_SUFFIX: &str = "/gupaxx-"; pub(super) const XMRIG_BINARY: &str = "xmrig";
const P2POOL_SUFFIX: &str = "/p2pool-"; }
const XMRIG_SUFFIX: &str = "/xmrig-"; }
cfg_if::cfg_if! {
// const GUPAX_HASH: &str = "SHA256SUMS"; if #[cfg(target_os = "windows")] {
// const P2POOL_HASH: &str = "sha256sums.txt.asc"; pub(super) const OS_TARGET: &str = "windows";
// const XMRIG_HASH: &str = "SHA256SUMS"; pub(super) const ARCHIVE_EXT: &str = "zip";
#[cfg(target_os = "windows")]
mod impl_platform {
pub(super) const GUPAX_EXTENSION: &str = "-windows-x64-standalone.zip";
pub(super) const P2POOL_EXTENSION: &str = "-windows-x64.zip";
pub(super) const XMRIG_EXTENSION: &str = "-msvc-win64.zip";
pub(super) const GUPAX_BINARY: &str = "Gupaxx.exe"; pub(super) const GUPAX_BINARY: &str = "Gupaxx.exe";
pub(super) const P2POOL_BINARY: &str = "p2pool.exe"; pub(super) const P2POOL_BINARY: &str = "p2pool.exe";
pub(super) const XMRIG_BINARY: &str = "xmrig.exe"; pub(super) const XMRIG_BINARY: &str = "xmrig.exe";
pub(super) const VALID_GUPAX_1: &str = "GUPAXX.exe"; } else if #[cfg(target_os = "linux")] {
pub(super) const VALID_GUPAX_2: &str = "Gupaxx.exe"; pub(super) const OS_TARGET: &str = "linux";
pub(super) const VALID_GUPAX_3: &str = "gupaxx.exe"; pub(super) const ARCHIVE_EXT: &str = "tar.gz";
pub(super) const VALID_XMRIG_1: &str = "XMRIG.exe"; } else if #[cfg(target_os = "macos")] {
pub(super) const VALID_XMRIG_2: &str = "XMRig.exe"; pub(super) const OS_TARGET: &str = "macos";
pub(super) const VALID_XMRIG_3: &str = "Xmrig.exe"; pub(super) const ARCHIVE_EXT: &str = "tar.gz";
pub(super) const VALID_XMRIG_4: &str = "xmrig.exe"; }
pub(super) const VALID_P2POOL_1: &str = "P2POOL.exe";
pub(super) const VALID_P2POOL_2: &str = "P2Pool.exe";
pub(super) const VALID_P2POOL_3: &str = "P2pool.exe";
pub(super) const VALID_P2POOL_4: &str = "p2pool.exe";
} }
#[cfg(target_family = "unix")]
mod impl_unix {
pub(super) const GUPAX_BINARY: &str = "gupaxx";
// pub(super) const P2POOL_BINARY: &str = "p2pool";
// pub(super) const XMRIG_BINARY: &str = "xmrig";
pub(super) const VALID_GUPAX_1: &str = "GUPAXX";
pub(super) const VALID_GUPAX_2: &str = "Gupaxx";
pub(super) const VALID_GUPAX_3: &str = "gupaxx";
pub(super) const VALID_XMRIG_1: &str = "XMRIG";
pub(super) const VALID_XMRIG_2: &str = "XMRig";
pub(super) const VALID_XMRIG_3: &str = "Xmrig";
pub(super) const VALID_XMRIG_4: &str = "xmrig";
pub(super) const VALID_P2POOL_1: &str = "P2POOL";
pub(super) const VALID_P2POOL_2: &str = "P2Pool";
pub(super) const VALID_P2POOL_3: &str = "P2pool";
pub(super) const VALID_P2POOL_4: &str = "p2pool";
}
#[cfg(target_os = "macos")]
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
mod impl_platform { pub(super) const ARCH_TARGET: &str = "x64";
pub(super) use super::impl_unix::*;
pub(super) const GUPAX_EXTENSION: &str = "-macos-x64-standalone.tar.gz";
pub(super) const P2POOL_EXTENSION: &str = "-macos-x64.tar.gz";
pub(super) const XMRIG_EXTENSION: &str = "-macos-x64.tar.gz";
}
#[cfg(target_os = "macos")]
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
mod impl_platform { pub(super) const ARCH_TARGET: &str = "arm64";
pub(super) use super::impl_unix::*;
pub(super) const GUPAX_EXTENSION: &str = "-macos-arm64-standalone.tar.gz"; // Some fake Curl/Wget user-agents because GitHub API requires one// user-agent might be fingerprintable without all the associated headers.
pub(super) const P2POOL_EXTENSION: &str = "-macos-aarch64.tar.gz";
pub(super) const XMRIG_EXTENSION: &str = "-macos-arm64.tar.gz";
}
#[cfg(target_os = "linux")]
mod impl_platform {
pub(super) use super::impl_unix::*;
pub(super) const GUPAX_EXTENSION: &str = "-linux-x64-standalone.tar.gz";
pub(super) const P2POOL_EXTENSION: &str = "-linux-x64.tar.gz";
pub(super) const XMRIG_EXTENSION: &str = "-linux-static-x64.tar.gz";
}
use impl_platform::*;
// const VALID_GUPAX: [&str; 3] = [VALID_GUPAX_1, VALID_GUPAX_2, VALID_GUPAX_3];
const VALID_XMRIG: [&str; 4] = [VALID_XMRIG_1, VALID_XMRIG_2, VALID_XMRIG_3, VALID_XMRIG_4];
const VALID_P2POOL: [&str; 4] = [
VALID_P2POOL_1,
VALID_P2POOL_2,
VALID_P2POOL_3,
VALID_P2POOL_4,
];
// Some fake Curl/Wget user-agents because GitHub API requires one and a Tor browser
// user-agent might be fingerprintable without all the associated headers.
const FAKE_USER_AGENT: [&str; 25] = [ const FAKE_USER_AGENT: [&str; 25] = [
"Wget/1.16.3", "Wget/1.16.3",
"Wget/1.17", "Wget/1.17",
@ -207,17 +117,13 @@ const FAKE_USER_AGENT: [&str; 25] = [
const MSG_NONE: &str = "No update in progress"; const MSG_NONE: &str = "No update in progress";
const MSG_START: &str = "Starting update"; const MSG_START: &str = "Starting update";
const MSG_TMP: &str = "Creating temporary directory"; const MSG_TMP: &str = "Creating temporary directory";
// const MSG_TOR: &str = "Creating Tor+HTTPS client";
const MSG_HTTPS: &str = "Creating HTTPS client"; const MSG_HTTPS: &str = "Creating HTTPS client";
const MSG_METADATA: &str = "Fetching package metadata"; const MSG_METADATA: &str = "Fetching package metadata";
const MSG_METADATA_RETRY: &str = "Fetching package metadata failed, attempt";
const MSG_COMPARE: &str = "Compare package versions"; const MSG_COMPARE: &str = "Compare package versions";
const MSG_UP_TO_DATE: &str = "All packages already up-to-date"; const MSG_UP_TO_DATE: &str = "All packages already up-to-date";
const MSG_DOWNLOAD: &str = "Downloading packages"; const MSG_DOWNLOAD: &str = "Downloading packages";
const MSG_DOWNLOAD_RETRY: &str = "Downloading packages failed, attempt";
const MSG_EXTRACT: &str = "Extracting packages"; const MSG_EXTRACT: &str = "Extracting packages";
const MSG_UPGRADE: &str = "Upgrading packages"; const MSG_UPGRADE: &str = "Upgrading packages";
pub const MSG_SUCCESS: &str = "Update successful";
pub const MSG_FAILED: &str = "Update failed"; pub const MSG_FAILED: &str = "Update failed";
pub const MSG_FAILED_HELP: &str = pub const MSG_FAILED_HELP: &str =
"Consider manually replacing your executable from github releases: https://github.com/Cyrix126/gupaxx/releases"; "Consider manually replacing your executable from github releases: https://github.com/Cyrix126/gupaxx/releases";
@ -241,10 +147,8 @@ pub fn check_p2pool_path(path: &str) -> bool {
return false; return false;
} }
}; };
path == VALID_P2POOL[0]
|| path == VALID_P2POOL[1] path == P2POOL_BINARY
|| path == VALID_P2POOL[2]
|| path == VALID_P2POOL[3]
} }
pub fn check_xmrig_path(path: &str) -> bool { pub fn check_xmrig_path(path: &str) -> bool {
@ -259,10 +163,7 @@ pub fn check_xmrig_path(path: &str) -> bool {
return false; return false;
} }
}; };
path == VALID_XMRIG[0] path == XMRIG_BINARY
|| path == VALID_XMRIG[1]
|| path == VALID_XMRIG[2]
|| path == VALID_XMRIG[3]
} }
//---------------------------------------------------------------------------------------------------- Update struct/impl //---------------------------------------------------------------------------------------------------- Update struct/impl
@ -270,7 +171,7 @@ pub fn check_xmrig_path(path: &str) -> bool {
// Progress bar structure: // Progress bar structure:
// 0% | Start // 0% | Start
// 5% | Create tmp directory, pkg list, fake user-agent // 5% | Create tmp directory, pkg list, fake user-agent
// 5% | Create Tor/HTTPS client // 5% | Create HTTPS client
// 30% | Download Metadata (x3) // 30% | Download Metadata (x3)
// 5% | Compare Versions (x3) // 5% | Compare Versions (x3)
// 30% | Download Archive (x3) // 30% | Download Archive (x3)
@ -282,6 +183,7 @@ pub struct Update {
pub path_gupax: String, // Full path to current gupax pub path_gupax: String, // Full path to current gupax
pub path_p2pool: String, // Full path to current p2pool pub path_p2pool: String, // Full path to current p2pool
pub path_xmrig: String, // Full path to current xmrig pub path_xmrig: String, // Full path to current xmrig
#[cfg(target_os = "windows")]
pub tmp_dir: String, // Full path to temporary directory pub tmp_dir: String, // Full path to temporary directory
pub updating: Arc<Mutex<bool>>, // Is an update in progress? pub updating: Arc<Mutex<bool>>, // Is an update in progress?
pub prog: Arc<Mutex<f32>>, // Holds the 0-100% progress bar number pub prog: Arc<Mutex<f32>>, // Holds the 0-100% progress bar number
@ -295,6 +197,7 @@ impl Update {
path_gupax, path_gupax,
path_p2pool: path_p2pool.display().to_string(), path_p2pool: path_p2pool.display().to_string(),
path_xmrig: path_xmrig.display().to_string(), path_xmrig: path_xmrig.display().to_string(),
#[cfg(target_os = "windows")]
tmp_dir: "".to_string(), tmp_dir: "".to_string(),
updating: arc_mut!(false), updating: arc_mut!(false),
prog: arc_mut!(0.0), prog: arc_mut!(0.0),
@ -321,21 +224,6 @@ impl Update {
Ok(tmp_dir) Ok(tmp_dir)
} }
// #[cold]
// #[inline(never)]
// Get an HTTPS client. Uses [Arti] if Tor is enabled.
// The base type looks something like [hyper::Client<...>].
// This is then wrapped with the custom [ClientEnum] type to implement
// dynamically returning either a [Tor+TLS|TLS-only] client at based on user settings.
// tor == true? => return Tor client
// tor == false? => return normal TLS client
//
// Since functions that take generic INPUT are much easier to implement,
// [get_response()] just takes a [hyper::Client<C>], which is passed to
// it via deconstructing this [ClientEnum] with a match, like so:
// ClientEnum::Tor(T) => get_response(... T ...)
// ClientEnum::Https(H) => get_response(... H ...)
//
#[cold] #[cold]
#[inline(never)] #[inline(never)]
// Intermediate function that spawns a new thread // Intermediate function that spawns a new thread
@ -358,9 +246,11 @@ impl Update {
error!("Update | This is the [Linux distro] version of Gupax, updates are disabled"); error!("Update | This is the [Linux distro] version of Gupax, updates are disabled");
#[cfg(feature = "distro")] #[cfg(feature = "distro")]
return; return;
// verify validity of absolute path for p2pool and xmrig only if we want to update them.
if lock!(og).gupax.bundled {
// Check P2Pool path for safety // Check P2Pool path for safety
// Attempt relative to absolute path // Attempt relative to absolute path
// it's ok if file doesn't exist. User could enable bundled version for the first time.
let p2pool_path = match into_absolute_path(gupax.p2pool_path.clone()) { let p2pool_path = match into_absolute_path(gupax.p2pool_path.clone()) {
Ok(p) => p, Ok(p) => p,
Err(e) => { Err(e) => {
@ -375,43 +265,6 @@ impl Update {
return; return;
} }
}; };
// Attempt to get basename
let file = match p2pool_path.file_name() {
Some(p) => {
// Attempt to turn into str
match p.to_str() {
Some(p) => p,
None => {
error_state.set("Provided P2Pool path could not be turned into a UTF-8 string (are you using non-English characters?)", ErrorFerris::Error, ErrorButtons::Okay);
return;
}
}
}
None => {
error_state.set(
"Provided P2Pool path could not be found",
ErrorFerris::Error,
ErrorButtons::Okay,
);
return;
}
};
// If it doesn't look like [P2Pool], its probably a bad move
// to overwrite it with an update, so set an error.
// Doesnt seem like you can [match] on array indexes
// so that explains the ridiculous if/else.
if check_p2pool_path(file) {
info!("Update | Using P2Pool path: [{}]", p2pool_path.display());
} else {
warn!(
"Update | Aborting update, incorrect P2Pool path: [{}]",
file
);
let text = format!("Provided P2Pool path seems incorrect. Not starting update for safety.\nTry one of these: {:?}", VALID_P2POOL);
error_state.set(text, ErrorFerris::Error, ErrorButtons::Okay);
return;
}
// Check XMRig path for safety // Check XMRig path for safety
let xmrig_path = match into_absolute_path(gupax.xmrig_path.clone()) { let xmrig_path = match into_absolute_path(gupax.xmrig_path.clone()) {
Ok(p) => p, Ok(p) => p,
@ -427,37 +280,9 @@ impl Update {
return; return;
} }
}; };
let file = match xmrig_path.file_name() {
Some(p) => {
// Attempt to turn into str
match p.to_str() {
Some(p) => p,
None => {
error_state.set("Provided XMRig path could not be turned into a UTF-8 string (are you using non-English characters?)", ErrorFerris::Error, ErrorButtons::Okay);
return;
}
}
}
None => {
error_state.set(
"Provided XMRig path could not be found",
ErrorFerris::Error,
ErrorButtons::Okay,
);
return;
}
};
if check_xmrig_path(file) {
info!("Update | Using XMRig path: [{}]", xmrig_path.display());
} else {
warn!("Update | Aborting update, incorrect XMRig path: [{}]", file);
let text = format!("Provided XMRig path seems incorrect. Not starting update for safety.\nTry one of these: {:?}", VALID_XMRIG);
error_state.set(text, ErrorFerris::Error, ErrorButtons::Okay);
return;
}
lock!(update).path_p2pool = p2pool_path.display().to_string(); lock!(update).path_p2pool = p2pool_path.display().to_string();
lock!(update).path_xmrig = xmrig_path.display().to_string(); lock!(update).path_xmrig = xmrig_path.display().to_string();
}
// Clone before thread spawn // Clone before thread spawn
let og = Arc::clone(og); let og = Arc::clone(og);
@ -467,7 +292,7 @@ impl Update {
let restart = Arc::clone(restart); let restart = Arc::clone(restart);
info!("Spawning update thread..."); info!("Spawning update thread...");
std::thread::spawn(move || { std::thread::spawn(move || {
match Update::start(update.clone(), og.clone(), state_ver.clone(), restart) { match Update::start(update.clone(), og.clone(), restart) {
Ok(_) => { Ok(_) => {
info!("Update | Saving state..."); info!("Update | Saving state...");
let original_version = lock!(og).version.clone(); let original_version = lock!(og).version.clone();
@ -504,8 +329,7 @@ impl Update {
#[tokio::main] #[tokio::main]
pub async fn start( pub async fn start(
update: Arc<Mutex<Self>>, update: Arc<Mutex<Self>>,
_og: Arc<Mutex<State>>, og: Arc<Mutex<State>>,
state_ver: Arc<Mutex<Version>>,
restart: Arc<Mutex<Restart>>, restart: Arc<Mutex<Restart>>,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
#[cfg(feature = "distro")] #[cfg(feature = "distro")]
@ -532,14 +356,11 @@ impl Update {
let tmp_dir = Self::get_tmp_dir()?; let tmp_dir = Self::get_tmp_dir()?;
std::fs::create_dir(&tmp_dir)?; std::fs::create_dir(&tmp_dir)?;
// Make Pkg vector
let mut vec = vec![Pkg::new(Gupax), Pkg::new(P2pool), Pkg::new(Xmrig)];
// Generate fake user-agent // Generate fake user-agent
let user_agent = Pkg::get_user_agent(); let user_agent = get_user_agent();
*lock2!(update, prog) = 5.0; *lock2!(update, prog) = 5.0;
// Create Tor/HTTPS client // Create HTTPS client
let lock = lock!(update); let lock = lock!(update);
let msg = MSG_HTTPS.to_string(); let msg = MSG_HTTPS.to_string();
info!("Update | {}", msg); info!("Update | {}", msg);
@ -552,237 +373,115 @@ impl Update {
//---------------------------------------------------------------------------------------------------- Metadata //---------------------------------------------------------------------------------------------------- Metadata
*lock2!(update, msg) = MSG_METADATA.to_string(); *lock2!(update, msg) = MSG_METADATA.to_string();
info!("Update | {}", METADATA); info!("Update | {}", METADATA);
let mut vec2 = vec![];
// Loop process: // Loop process:
// 1. Start all async metadata fetches // reqwest will retry himself
// 2. Wait for all to finish
// 3. Iterate over all [pkg.new_ver], check if empty
// 4. If not empty, move [pkg] to different vec
// 5. At end, if original vec isn't empty, that means something failed
// 6. Redo loop [3] times (rebuild circuit if using Tor), with the original vec (that now only has the failed pkgs)
//
// This logic was originally in the [Pkg::get_metadata()]
// function itself but for some reason, it was getting skipped over,
// so the [new_ver] check is now here, in the outer scope.
for i in 1..=3 {
if i > 1 {
*lock2!(update, msg) = format!("{} [{}/3]", MSG_METADATA_RETRY, i);
}
let mut handles: Vec<JoinHandle<Result<(), anyhow::Error>>> = vec![];
for pkg in vec.iter() {
// Clone data before sending to async
let new_ver = Arc::clone(&pkg.new_ver);
let client = client.clone();
let link = pkg.link_metadata.to_string();
// Send to async // Send to async
let handle: JoinHandle<Result<(), anyhow::Error>> = tokio::spawn(async move { let new_ver = if let Ok(new_ver) =
Pkg::get_metadata(new_ver, &client, link, user_agent).await get_metadata(&client, GUPAX_METADATA.to_string(), user_agent).await
}); {
handles.push(handle); new_ver
}
// Handle await
for handle in handles {
// Two [??] will send the error.
// We don't actually want to return the error here since we
// prefer looping and retrying over immediately erroring out.
if let Err(e) = handle.await? {
warn!("Update | {}", e)
}
}
// Check for empty version
let mut indexes = vec![];
for (index, pkg) in vec.iter().enumerate() {
if lock!(pkg.new_ver).is_empty() {
warn!("Update | {} failed, attempt [{}/3]...", pkg.name, i + 1);
} else {
indexes.push(index);
vec2.push(pkg.clone());
*lock2!(update, prog) += 10.0;
info!("Update | {} {} ... OK", pkg.name, lock!(pkg.new_ver));
}
}
// Order indexes from biggest to smallest
// This prevents shifting the whole vector and causing panics.
indexes.sort();
indexes.reverse();
for index in indexes {
vec.remove(index);
}
if vec.is_empty() {
break;
}
// Some Tor exit nodes seem to be blocked by GitHub's API,
// so recreate the circuit every loop.
}
if vec.is_empty() {
info!("Update | Metadata ... OK ... {}%", lock2!(update, prog));
} else { } else {
error!("Update | Metadata ... FAIL"); error!("Update | Metadata ... FAIL");
return Err(anyhow!("Metadata fetch failed")); return Err(anyhow!("Metadata fetch failed"));
} };
*lock2!(update, prog) += 10.0;
info!("Update | Gupaxx {} ... OK", new_ver);
//---------------------------------------------------------------------------------------------------- Compare //---------------------------------------------------------------------------------------------------- Compare
*lock2!(update, msg) = MSG_COMPARE.to_string(); *lock2!(update, msg) = MSG_COMPARE.to_string();
info!("Update | {}", COMPARE); info!("Update | {}", COMPARE);
let mut vec3 = vec![]; let diff = GUPAX_VERSION != new_ver;
let mut new_pkgs = vec![];
for pkg in vec2.iter() {
let new_ver = lock!(pkg.new_ver).clone();
let diff;
let old_ver;
let name;
match pkg.name {
Gupax => {
// Compare against the built-in compiled version as well as an in-memory version
// that gets updated during an update. This prevents the updater always thinking
// there's a new Gupax update since the user didnt restart and is still technically
// using the old version (even though the underlying binary was updated).
old_ver = lock!(state_ver).gupax.clone();
diff = old_ver != new_ver && GUPAX_VERSION != new_ver;
name = "Gupaxx";
}
P2pool => {
old_ver = lock!(state_ver).p2pool.clone();
diff = old_ver != new_ver;
name = "P2Pool";
}
Xmrig => {
old_ver = lock!(state_ver).xmrig.clone();
diff = old_ver != new_ver;
name = "XMRig";
}
}
if diff { if diff {
info!( info!(
"Update | {} {} != {} ... ADDING", "Update | Gupaxx {} != {} ... ADDING",
pkg.name, old_ver, new_ver GUPAX_VERSION, new_ver
); );
new_pkgs.push(format!("\n{} {} -> {}", name, old_ver, new_ver));
vec3.push(pkg);
} else { } else {
info!( info!(
"Update | {} {} == {} ... SKIPPING", "Update | Gupaxx {} == {} ... SKIPPING",
pkg.name, old_ver, new_ver GUPAX_VERSION, new_ver
); );
} info!("Update | All packages up-to-date ... RETURNING");
*lock2!(update, prog) = 100.0;
*lock2!(update, msg) = MSG_UP_TO_DATE.to_string();
return Ok(());
} }
*lock2!(update, prog) += 5.0; *lock2!(update, prog) += 5.0;
info!("Update | Compare ... OK ... {}%", lock2!(update, prog)); info!("Update | Compare ... OK ... {}%", lock2!(update, prog));
// Return if 0 (all packages up-to-date) // Return if 0 (all packages up-to-date)
// Get amount of packages to divide up the percentage increases // Get amount of packages to divide up the percentage increases
let pkg_amount = vec3.len() as f32;
if pkg_amount == 0.0 {
info!("Update | All packages up-to-date ... RETURNING");
*lock2!(update, prog) = 100.0;
*lock2!(update, msg) = MSG_UP_TO_DATE.to_string();
return Ok(());
}
let new_pkgs: String = new_pkgs.concat();
//---------------------------------------------------------------------------------------------------- Download //---------------------------------------------------------------------------------------------------- Download
*lock2!(update, msg) = format!("{}{}", MSG_DOWNLOAD, new_pkgs); *lock2!(update, msg) = format!("{} Gupaxx", MSG_DOWNLOAD);
info!("Update | {}", DOWNLOAD); info!("Update | {}", DOWNLOAD);
let mut vec4 = vec![];
for i in 1..=3 {
if i > 1 {
*lock2!(update, msg) = format!("{} [{}/3]{}", MSG_DOWNLOAD_RETRY, i, new_pkgs);
}
let mut handles: Vec<JoinHandle<Result<(), anyhow::Error>>> = vec![];
for pkg in vec3.iter() {
// Clone data before async // Clone data before async
let bytes = Arc::clone(&pkg.bytes); let version = new_ver;
let client = client.clone();
let version = lock!(pkg.new_ver);
// Download link = PREFIX + Version (found at runtime) + SUFFIX + Version + EXT // Download link = PREFIX + Version (found at runtime) + SUFFIX + Version + EXT
// Example: https://github.com/hinto-janai/gupax/releases/download/v0.0.1/gupax-v0.0.1-linux-x64-standalone // Example: https://github.com/Cyrix126/gupaxx/releases/download/v1.0.0/gupaxx-v1.0.0-linux-x64-standalone.tar.gz
// XMRig doesn't have a [v], so slice it out // prefix: https://github.com/Cyrix126/gupaxx/releases/download
let link = match pkg.name { // version: v1.0.0
Name::Xmrig => { // suffix: gupaxx
pkg.link_prefix.to_string() // version: v1.0.0
+ &version // os
+ pkg.link_suffix // arch
+ &version[1..] // standalone or bundled
+ pkg.link_extension // archive extension
} let bundle = if lock!(og).gupax.bundled {
_ => { "bundle"
pkg.link_prefix.to_string()
+ &version
+ pkg.link_suffix
+ &version
+ pkg.link_extension
}
};
info!("Update | {} ... {}", pkg.name, link);
let handle: JoinHandle<Result<(), anyhow::Error>> =
tokio::spawn(
async move { Pkg::get_bytes(bytes, &client, link, user_agent).await },
);
handles.push(handle);
}
// Handle await
for handle in handles {
if let Err(e) = handle.await? {
warn!("Update | {}", e)
}
}
// Check for empty bytes
let mut indexes = vec![];
for (index, pkg) in vec3.iter().enumerate() {
if lock!(pkg.bytes).is_empty() {
warn!("Update | {} failed, attempt [{}/3]...", pkg.name, i);
} else { } else {
indexes.push(index); "standalone"
vec4.push(*pkg); };
*lock2!(update, prog) += (30.0 / pkg_amount).round(); let link = [
info!("Update | {} ... OK", pkg.name); "https://github.com/Cyrix126/gupaxx/releases/download/",
} &version,
} "/gupaxx-",
// Order indexes from biggest to smallest &version,
// This prevents shifting the whole vector and causing panics. "-",
indexes.sort(); OS_TARGET,
indexes.reverse(); "-",
for index in indexes { ARCH_TARGET,
vec3.remove(index); "-",
} bundle,
if vec3.is_empty() { ".",
break; ARCHIVE_EXT,
} ]
} .concat();
if vec3.is_empty() { info!("Update | Gupaxx ... {}", link);
info!("Update | Download ... OK ... {}%", *lock2!(update, prog)); let bytes = if let Ok(bytes) = get_bytes(&client, link, user_agent).await {
bytes
} else { } else {
error!("Update | Download ... FAIL"); error!("Update | Download ... FAIL");
return Err(anyhow!("Download failed")); return Err(anyhow!("Download failed"));
} };
*lock2!(update, prog) += 30.0;
info!("Update | Gupaxx ... OK");
info!("Update | Download ... OK ... {}%", *lock2!(update, prog));
//---------------------------------------------------------------------------------------------------- Extract //---------------------------------------------------------------------------------------------------- Extract
*lock2!(update, msg) = format!("{}{}", MSG_EXTRACT, new_pkgs); *lock2!(update, msg) = format!("{} Gupaxx", MSG_EXTRACT);
info!("Update | {}", EXTRACT); info!("Update | {}", EXTRACT);
for pkg in vec4.iter() { let tmp = tmp_dir.to_owned();
let tmp = match pkg.name {
Name::Gupax => tmp_dir.to_owned() + GUPAX_BINARY,
_ => tmp_dir.to_owned() + &pkg.name.to_string(),
};
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
ZipArchive::extract( ZipArchive::extract(
&mut ZipArchive::new(std::io::Cursor::new(lock!(pkg.bytes).as_ref()))?, &mut ZipArchive::new(std::io::Cursor::new(bytes.as_ref()))?,
tmp, tmp,
)?; )?;
#[cfg(target_family = "unix")] #[cfg(target_family = "unix")]
tar::Archive::new(flate2::read::GzDecoder::new(lock!(pkg.bytes).as_ref())) tar::Archive::new(flate2::read::GzDecoder::new(bytes.as_ref())).unpack(tmp)?;
.unpack(tmp)?; *lock2!(update, prog) += 5.0;
*lock2!(update, prog) += (5.0 / pkg_amount).round(); info!("Update | Gupaxx ... OK");
info!("Update | {} ... OK", pkg.name);
}
info!("Update | Extract ... OK ... {}%", *lock2!(update, prog)); info!("Update | Extract ... OK ... {}%", *lock2!(update, prog));
//---------------------------------------------------------------------------------------------------- Upgrade //---------------------------------------------------------------------------------------------------- Upgrade
// if bundled, directories p2pool and xmrig will exist.
// if not, only gupaxx binary will be present.
// 1. Walk directories // 1. Walk directories
// 2. If basename matches known binary name, start //
// 3. Rename tmp path into current path // 3. Rename tmp path into current path
// 4. Update [State/Version] // 4. Update [State/Version]
*lock2!(update, msg) = format!("{}{}", MSG_UPGRADE, new_pkgs); *lock2!(update, msg) = format!("Gupaxx {}", MSG_UPGRADE);
info!("Update | {}", UPGRADE); info!("Update | {}", UPGRADE);
// If this bool doesn't get set, something has gone wrong because // If this bool doesn't get set, something has gone wrong because
// we _didn't_ find a binary even though we downloaded it. // we _didn't_ find a binary even though we downloaded it.
@ -793,25 +492,17 @@ impl Update {
if !entry.file_type().is_file() { if !entry.file_type().is_file() {
continue; continue;
} }
let basename = entry let name = entry
.file_name() .file_name()
.to_str() .to_str()
.ok_or_else(|| anyhow!("WalkDir basename failed"))?; .ok_or_else(|| anyhow!("WalkDir basename failed"))?;
match basename {
VALID_GUPAX_1 | VALID_GUPAX_2 | VALID_GUPAX_3 | VALID_P2POOL_1 | VALID_P2POOL_2
| VALID_P2POOL_3 | VALID_P2POOL_4 | VALID_XMRIG_1 | VALID_XMRIG_2
| VALID_XMRIG_3 | VALID_XMRIG_4 => {
found = true;
let name = match basename {
VALID_GUPAX_1 | VALID_GUPAX_2 | VALID_GUPAX_3 => Gupax,
VALID_P2POOL_1 | VALID_P2POOL_2 | VALID_P2POOL_3 | VALID_P2POOL_4 => P2pool,
_ => Xmrig,
};
let path = match name { let path = match name {
Gupax => lock!(update).path_gupax.clone(), GUPAX_BINARY => lock!(update).path_gupax.clone(),
P2pool => lock!(update).path_p2pool.clone(), P2POOL_BINARY => lock!(update).path_p2pool.clone(),
Xmrig => lock!(update).path_xmrig.clone(), XMRIG_BINARY => lock!(update).path_xmrig.clone(),
_ => continue,
}; };
found = true;
let path = Path::new(&path); let path = Path::new(&path);
// Unix can replace running binaries no problem (they're loaded into memory) // Unix can replace running binaries no problem (they're loaded into memory)
// Windows locks binaries in place, so we must move (rename) current binary // Windows locks binaries in place, so we must move (rename) current binary
@ -821,9 +512,10 @@ impl Update {
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
if path.exists() { if path.exists() {
let tmp_windows = match name { let tmp_windows = match name {
Gupax => tmp_dir.clone() + "gupaxx_old.exe", GUPAX_BINARY => tmp_dir.clone() + "gupaxx_old.exe",
P2pool => tmp_dir.clone() + "p2pool_old.exe", P2POOL_BINARY => tmp_dir.clone() + "p2pool_old.exe",
Xmrig => tmp_dir.clone() + "xmrig_old.exe", XMRIG_BINARY => tmp_dir.clone() + "xmrig_old.exe",
_ => continue,
}; };
info!( info!(
"Update | WINDOWS ONLY ... Moving old [{}] -> [{}]", "Update | WINDOWS ONLY ... Moving old [{}] -> [{}]",
@ -837,31 +529,20 @@ impl Update {
entry.path().display(), entry.path().display(),
path.display() path.display()
); );
// Create folder for [P2Pool/XMRig] // if bundled, create directory for p2pool and xmrig if not present
if name == P2pool || name == Xmrig { if lock!(og).gupax.bundled {
if name == P2POOL_BINARY || name == XMRIG_BINARY {
std::fs::create_dir_all( std::fs::create_dir_all(
path.parent() path.parent()
.ok_or_else(|| anyhow!(format!("{} path failed", name)))?, .ok_or_else(|| anyhow!(format!("{} path failed", name)))?,
)?; )?;
} }
}
// Move downloaded path into old path // Move downloaded path into old path
std::fs::rename(entry.path(), path)?; std::fs::rename(entry.path(), path)?;
// Update [State] version
match name {
Gupax => {
lock!(state_ver).gupax = Pkg::get_new_pkg_version(Gupax, &vec4)?;
// If we're updating Gupax, set the [Restart] state so that the user knows to restart // If we're updating Gupax, set the [Restart] state so that the user knows to restart
*lock!(restart) = Restart::Yes; *lock!(restart) = Restart::Yes;
} *lock2!(update, prog) += 5.0;
P2pool => {
lock!(state_ver).p2pool = Pkg::get_new_pkg_version(P2pool, &vec4)?
}
Xmrig => lock!(state_ver).xmrig = Pkg::get_new_pkg_version(Xmrig, &vec4)?,
};
*lock2!(update, prog) += (5.0 / pkg_amount).round();
}
_ => (),
}
} }
if !found { if !found {
return Err(anyhow!("Fatal error: Package binary could not be found")); return Err(anyhow!("Fatal error: Package binary could not be found"));
@ -875,130 +556,68 @@ impl Update {
let seconds = now.elapsed().as_secs(); let seconds = now.elapsed().as_secs();
info!("Update | Seconds elapsed ... [{}s]", seconds); info!("Update | Seconds elapsed ... [{}s]", seconds);
match seconds { *lock2!(update, msg) = format!(
0 => { "Updated from {} to {}\nYou need to restart Gupaxx.",
*lock2!(update, msg) = GUPAX_VERSION, version
format!("{}! Took 0 seconds... What...?!{}", MSG_SUCCESS, new_pkgs) );
}
1 => {
*lock2!(update, msg) = format!("{}! Took 1 second... Wow!{}", MSG_SUCCESS, new_pkgs)
}
_ => {
*lock2!(update, msg) =
format!("{}! Took {} seconds.{}", MSG_SUCCESS, seconds, new_pkgs)
}
}
*lock2!(update, prog) = 100.0; *lock2!(update, prog) = 100.0;
Ok(()) Ok(())
} }
} }
//---------------------------------------------------------------------------------------------------- Pkg struct/impl //---------------------------------------------------------------------------------------------------- Pkg functions
#[derive(Debug, Clone)] #[cold]
pub struct Pkg { #[inline(never)]
name: Name, // Generate fake [User-Agent] HTTP header
link_metadata: &'static str, pub fn get_user_agent() -> &'static str {
link_prefix: &'static str,
link_suffix: &'static str,
link_extension: &'static str,
bytes: Arc<Mutex<Bytes>>,
new_ver: Arc<Mutex<String>>,
}
impl Pkg {
#[cold]
#[inline(never)]
pub fn new(name: Name) -> Self {
let link_metadata = match name {
Gupax => GUPAX_METADATA,
P2pool => P2POOL_METADATA,
Xmrig => XMRIG_METADATA,
};
let link_prefix = match name {
Gupax => GUPAX_PREFIX,
P2pool => P2POOL_PREFIX,
Xmrig => XMRIG_PREFIX,
};
let link_suffix = match name {
Gupax => GUPAX_SUFFIX,
P2pool => P2POOL_SUFFIX,
Xmrig => XMRIG_SUFFIX,
};
let link_extension = match name {
Gupax => GUPAX_EXTENSION,
P2pool => P2POOL_EXTENSION,
Xmrig => XMRIG_EXTENSION,
};
Self {
name,
link_metadata,
link_prefix,
link_suffix,
link_extension,
bytes: arc_mut!(bytes::Bytes::new()),
new_ver: arc_mut!(String::new()),
}
}
//---------------------------------------------------------------------------------------------------- Pkg functions
#[cold]
#[inline(never)]
// Generate fake [User-Agent] HTTP header
pub fn get_user_agent() -> &'static str {
let index = FAKE_USER_AGENT.len() - 1; let index = FAKE_USER_AGENT.len() - 1;
let rand = thread_rng().gen_range(0..index); let rand = thread_rng().gen_range(0..index);
let user_agent = FAKE_USER_AGENT[rand]; let user_agent = FAKE_USER_AGENT[rand];
info!("Randomly selected User-Agent ({rand}/{index}) ... {user_agent}"); info!("Randomly selected User-Agent ({rand}/{index}) ... {user_agent}");
user_agent user_agent
} }
#[cold] #[cold]
#[inline(never)] #[inline(never)]
// Generate GET request based off input URI + fake user agent // Generate GET request based off input URI + fake user agent
fn get_request( fn get_request(
client: &Client, client: &Client,
link: String, link: String,
user_agent: &'static str, user_agent: &'static str,
) -> Result<RequestBuilder, anyhow::Error> { ) -> Result<RequestBuilder, anyhow::Error> {
Ok(client.get(link).header(USER_AGENT, user_agent)) Ok(client.get(link).header(USER_AGENT, user_agent))
} }
#[cold] #[cold]
#[inline(never)] #[inline(never)]
// Get metadata using [Generic hyper::client<C>] & [Request] // Get metadata using [Generic hyper::client<C>] & [Request]
// and change [version, prog] under an Arc<Mutex> // and change [version, prog] under an Arc<Mutex>
async fn get_metadata( async fn get_metadata(
new_ver: Arc<Mutex<String>>,
client: &Client, client: &Client,
link: String, link: String,
user_agent: &'static str, user_agent: &'static str,
) -> Result<(), Error> { ) -> Result<String, Error> {
let request = Pkg::get_request(client, link, user_agent)?; let request = get_request(client, link, user_agent)?;
let response = request.send().await?; let response = request.send().await?;
dbg!(&response);
let body = response.json::<TagName>().await?; let body = response.json::<TagName>().await?;
*lock!(new_ver) = body.tag_name; Ok(body.tag_name)
Ok(()) }
}
#[cold] #[cold]
#[inline(never)] #[inline(never)]
// Takes a [Request], fills the appropriate [Pkg] async fn get_bytes(
// [bytes] field with the [Archive/Standalone]
async fn get_bytes(
bytes: Arc<Mutex<bytes::Bytes>>,
client: &Client, client: &Client,
link: String, link: String,
user_agent: &'static str, user_agent: &'static str,
) -> Result<(), anyhow::Error> { ) -> Result<bytes::Bytes, anyhow::Error> {
let request = Self::get_request(client, link, user_agent)?; let request = get_request(client, link, user_agent)?;
let mut response = request.send().await?; let mut response = request.send().await?;
// GitHub sends a 302 redirect, so we must follow // GitHub sends a 302 redirect, so we must follow
// the [Location] header... only if Reqwest had custom // the [Location] header... only if Reqwest had custom
// connectors so I didn't have to manually do this... // connectors so I didn't have to manually do this...
if response.headers().contains_key(LOCATION) { if response.headers().contains_key(LOCATION) {
response = Self::get_request( response = get_request(
client, client,
response response
.headers() .headers()
@ -1012,22 +631,7 @@ impl Pkg {
.await?; .await?;
} }
let body = response.bytes().await?; let body = response.bytes().await?;
*lock!(bytes) = body; Ok(body)
Ok(())
}
#[cold]
#[inline(never)]
// Take in a [Name] and [Vec] of [Pkg]s, find
// that [Name]'s corresponding new version.
fn get_new_pkg_version(name: Name, vec: &[&Pkg]) -> Result<String, Error> {
for pkg in vec.iter() {
if pkg.name == name {
return Ok(lock!(pkg.new_ver).to_string());
}
}
Err(anyhow!("Couldn't find new_pkg_version"))
}
} }
// This inherits the value of [tag_name] from GitHub's JSON API // This inherits the value of [tag_name] from GitHub's JSON API
@ -1035,16 +639,3 @@ impl Pkg {
struct TagName { struct TagName {
tag_name: String, tag_name: String,
} }
#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Name {
Gupax,
P2pool,
Xmrig,
}
impl std::fmt::Display for Name {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}

View file

@ -191,6 +191,7 @@ pub struct Gupax {
pub selected_scale: f32, pub selected_scale: f32,
pub tab: Tab, pub tab: Tab,
pub ratio: Ratio, pub ratio: Ratio,
pub bundled: bool,
} }
#[derive(Clone, Eq, PartialEq, Debug, Deserialize, Serialize)] #[derive(Clone, Eq, PartialEq, Debug, Deserialize, Serialize)]
@ -288,6 +289,10 @@ impl Default for Gupax {
selected_scale: APP_DEFAULT_SCALE, selected_scale: APP_DEFAULT_SCALE,
ratio: Ratio::Width, ratio: Ratio::Width,
tab: Tab::Xvb, tab: Tab::Xvb,
#[cfg(feature = "bundle")]
bundled: true,
#[cfg(not(feature = "bundle"))]
bundled: false,
} }
} }
} }

View file

@ -43,6 +43,7 @@ mod test {
selected_scale = 0.0 selected_scale = 0.0
tab = "About" tab = "About"
ratio = "Width" ratio = "Width"
bundled = false
[status] [status]
submenu = "P2pool" submenu = "P2pool"

View file

@ -141,7 +141,7 @@ pub fn get_exe_dir() -> Result<String, std::io::Error> {
#[cold] #[cold]
#[inline(never)] #[inline(never)]
pub fn clean_dir() -> Result<(), anyhow::Error> { pub fn clean_dir() -> Result<(), anyhow::Error> {
let regex = Regex::new("^gupax_update_[A-Za-z0-9]{10}$").unwrap(); let regex = Regex::new("^gupaxx_update_[A-Za-z0-9]{10}$").unwrap();
for entry in std::fs::read_dir(get_exe_dir()?)? { for entry in std::fs::read_dir(get_exe_dir()?)? {
let entry = entry?; let entry = entry?;
if !entry.path().is_dir() { if !entry.path().is_dir() {

View file

@ -264,8 +264,10 @@ pub const STATUS_SUBMENU_OTHER_BENCHMARKS: &str =
// Gupaxx // Gupaxx
pub const GUPAX_UPDATE: &str = pub const GUPAX_UPDATE: &str =
"Check for updates on Gupaxx, P2Pool, and XMRig via GitHub's API and upgrade automatically"; "Check for updates on Gupaxx and bundled versions of P2Pool and XMRig via GitHub's API and upgrade automatically";
pub const GUPAX_AUTO_UPDATE: &str = "Automatically check for updates at startup"; pub const GUPAX_AUTO_UPDATE: &str = "Automatically check for updates at startup";
pub const GUPAX_BUNDLED_UPDATE: &str =
"Update XMRig and P2Pool with bundled versions of latest Gupaxx. It will replace any present xmrig and p2pool binary in their specified path.";
pub const GUPAX_SHOULD_RESTART: &str = pub const GUPAX_SHOULD_RESTART: &str =
"Gupaxx was updated. A restart is recommended but not required"; "Gupaxx was updated. A restart is recommended but not required";
// #[cfg(not(target_os = "macos"))] // #[cfg(not(target_os = "macos"))]