mirror of
https://github.com/serai-dex/serai.git
synced 2024-12-22 11:39:35 +00:00
Don't clear cache within a batch build
A caller *can* call batch from a threaded environment and still trigger this at this time. I'm unsure that use case exists/matters. If GITHUB_CI is set, build in two batches to try and avoid storage limits.
This commit is contained in:
parent
9da1d714b3
commit
aa666afc08
2 changed files with 25 additions and 9 deletions
|
@ -9,7 +9,7 @@ use std::{
|
||||||
use tokio::{sync::Mutex, process::Command};
|
use tokio::{sync::Mutex, process::Command};
|
||||||
|
|
||||||
static BUILT: OnceLock<Mutex<HashMap<String, Arc<Mutex<bool>>>>> = OnceLock::new();
|
static BUILT: OnceLock<Mutex<HashMap<String, Arc<Mutex<bool>>>>> = OnceLock::new();
|
||||||
pub async fn build(name: String) {
|
async fn build_inner(name: String) {
|
||||||
let built = BUILT.get_or_init(|| Mutex::new(HashMap::new()));
|
let built = BUILT.get_or_init(|| Mutex::new(HashMap::new()));
|
||||||
// Only one call to build will acquire this lock
|
// Only one call to build will acquire this lock
|
||||||
let mut built_lock = built.lock().await;
|
let mut built_lock = built.lock().await;
|
||||||
|
@ -202,6 +202,11 @@ pub async fn build(name: String) {
|
||||||
|
|
||||||
println!("Built!");
|
println!("Built!");
|
||||||
|
|
||||||
|
// Set built
|
||||||
|
*built_lock = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clear_cache_if_github() {
|
||||||
if std::env::var("GITHUB_CI").is_ok() {
|
if std::env::var("GITHUB_CI").is_ok() {
|
||||||
println!("In CI, so clearing cache to prevent hitting the storage limits.");
|
println!("In CI, so clearing cache to prevent hitting the storage limits.");
|
||||||
if !Command::new("docker")
|
if !Command::new("docker")
|
||||||
|
@ -215,20 +220,23 @@ pub async fn build(name: String) {
|
||||||
.status
|
.status
|
||||||
.success()
|
.success()
|
||||||
{
|
{
|
||||||
println!("failed to clear cache after building {name}\n");
|
println!("failed to clear cache\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set built
|
pub async fn build(name: String) {
|
||||||
*built_lock = true;
|
build_inner(name).await;
|
||||||
|
clear_cache_if_github().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn build_batch(names: Vec<String>) {
|
pub async fn build_batch(names: Vec<String>) {
|
||||||
let mut handles = vec![];
|
let mut handles = vec![];
|
||||||
for name in names.into_iter().collect::<HashSet<_>>() {
|
for name in names.into_iter().collect::<HashSet<_>>() {
|
||||||
handles.push(tokio::spawn(build(name)));
|
handles.push(tokio::spawn(build_inner(name)));
|
||||||
}
|
}
|
||||||
for handle in handles {
|
for handle in handles {
|
||||||
handle.await.unwrap();
|
handle.await.unwrap();
|
||||||
}
|
}
|
||||||
|
clear_cache_if_github().await;
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,13 +32,21 @@ pub struct Handles {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn full_stack(name: &str) -> (Handles, Vec<TestBodySpecification>) {
|
pub async fn full_stack(name: &str) -> (Handles, Vec<TestBodySpecification>) {
|
||||||
let mut docker_names = serai_processor_tests::docker_names(NetworkId::Bitcoin);
|
let mut processor_docker_names = serai_processor_tests::docker_names(NetworkId::Bitcoin);
|
||||||
docker_names.append(&mut serai_processor_tests::docker_names(NetworkId::Monero));
|
processor_docker_names.extend(serai_processor_tests::docker_names(NetworkId::Monero));
|
||||||
docker_names.extend([
|
|
||||||
|
let mut docker_names = vec![
|
||||||
serai_message_queue_tests::docker_name(),
|
serai_message_queue_tests::docker_name(),
|
||||||
serai_coordinator_tests::serai_docker_name(),
|
serai_coordinator_tests::serai_docker_name(),
|
||||||
serai_coordinator_tests::coordinator_docker_name(),
|
serai_coordinator_tests::coordinator_docker_name(),
|
||||||
]);
|
];
|
||||||
|
|
||||||
|
// If this is in the GH CI, build in two stages so we don't hit storage limits
|
||||||
|
if std::env::var("GITHUB_CI").is_ok() {
|
||||||
|
serai_docker_tests::build_batch(processor_docker_names).await;
|
||||||
|
} else {
|
||||||
|
docker_names.extend(processor_docker_names);
|
||||||
|
}
|
||||||
serai_docker_tests::build_batch(docker_names).await;
|
serai_docker_tests::build_batch(docker_names).await;
|
||||||
|
|
||||||
let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance().await;
|
let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance().await;
|
||||||
|
|
Loading…
Reference in a new issue