Use debug builds in our Dockerfiles to reduce CI times (#462)

* Use debug builds in our Dockerfiles to reduce CI times

Also enables only spawning the mdns service when debug in the coordinator.

* Correct underflow in processor

Prior undetected due to relase builds not having bounds checks enabled.

* Restore Serai release due to CI/RPC failures caused by compiling it in debug mode

This is *probably* worth an issue filed upstream, if it can be tracked down.

* Correct failing debug asserts in Monero

These debug asserts assumed there was a change address to take the remainder.
If there's no change address, the remainder is shunted to the fee, causing the
fee to be distinct from the estimate.

We presumably need to modify monero-serai such that change: None isn't valid,
and users must use Change::Fingerprintable(None).
This commit is contained in:
Luke Parker 2023-11-29 00:24:37 -05:00 committed by GitHub
parent 695d1f0ecf
commit f0ff3a18d2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 62 additions and 46 deletions

View file

@ -314,6 +314,7 @@ pub struct SignableTransaction {
protocol: Protocol,
r_seed: Option<Zeroizing<[u8; 32]>>,
inputs: Vec<(SpendableOutput, Decoys)>,
has_change: bool,
payments: Vec<InternalPayment>,
data: Vec<Vec<u8>>,
fee: u64,
@ -385,7 +386,7 @@ fn need_additional(payments: &[InternalPayment]) -> (bool, bool) {
(subaddresses, additional)
}
fn sanity_check_change_payment(payments: &[InternalPayment], has_change_address: bool) {
fn sanity_check_change_payment_quantity(payments: &[InternalPayment], has_change_address: bool) {
debug_assert_eq!(
payments
.iter()
@ -516,7 +517,7 @@ impl SignableTransaction {
}
// Sanity check we have the expected number of change outputs
sanity_check_change_payment(&payments, change_address.is_some());
sanity_check_change_payment_quantity(&payments, change_address.is_some());
// Modify the amount of the change output
if change_address.is_some() {
@ -527,23 +528,34 @@ impl SignableTransaction {
}
// Sanity check the change again after modifying
sanity_check_change_payment(&payments, change_address.is_some());
sanity_check_change_payment_quantity(&payments, change_address.is_some());
// Sanity check outgoing amount + fee == incoming amount
debug_assert_eq!(
payments
.iter()
.map(|payment| match *payment {
InternalPayment::Payment(payment) => payment.1,
InternalPayment::Change(_, amount) => amount,
})
.sum::<u64>() +
fee,
in_amount,
"Outgoing amount + fee != incoming amount"
);
if change_address.is_some() {
debug_assert_eq!(
payments
.iter()
.map(|payment| match *payment {
InternalPayment::Payment(payment) => payment.1,
InternalPayment::Change(_, amount) => amount,
})
.sum::<u64>() +
fee,
in_amount,
"Outgoing amount + fee != incoming amount"
);
}
Ok(SignableTransaction { protocol, r_seed, inputs, payments, data, fee, fee_rate })
Ok(SignableTransaction {
protocol,
r_seed,
inputs,
payments,
has_change: change_address.is_some(),
data,
fee,
fee_rate,
})
}
pub fn fee(&self) -> u64 {
@ -778,7 +790,9 @@ impl SignableTransaction {
});
encrypted_amounts.push(EncryptedAmount::Compact { amount: output.amount });
}
debug_assert_eq!(self.fee, fee, "transaction will use an unexpected fee");
if self.has_change {
debug_assert_eq!(self.fee, fee, "transaction will use an unexpected fee");
}
(
Transaction {
@ -844,11 +858,13 @@ impl SignableTransaction {
_ => unreachable!("attempted to sign a TX which wasn't CLSAG"),
}
debug_assert_eq!(
self.fee_rate.calculate_fee_from_weight(tx.weight()),
tx.rct_signatures.base.fee,
"transaction used unexpected fee",
);
if self.has_change {
debug_assert_eq!(
self.fee_rate.calculate_fee_from_weight(tx.weight()),
tx.rct_signatures.base.fee,
"transaction used unexpected fee",
);
}
Ok(tx)
}

View file

@ -191,7 +191,7 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
#[derive(NetworkBehaviour)]
struct Behavior {
gossipsub: GsBehavior,
//#[cfg(debug_assertions)]
#[cfg(debug_assertions)]
mdns: libp2p::mdns::tokio::Behaviour,
}
@ -261,8 +261,7 @@ impl LibP2p {
},
// Only use MDNS in debug environments, as it should have no value in a release build
// TODO: We do tests on release binaries as of right now...
//#[cfg(debug_assertions)]
#[cfg(debug_assertions)]
mdns: {
log::info!("creating mdns service");
libp2p::mdns::tokio::Behaviour::new(libp2p::mdns::Config::default(), throwaway_peer_id)
@ -371,7 +370,7 @@ impl LibP2p {
// Handle new incoming messages
event = swarm.next() => {
match event {
//#[cfg(debug_assertions)]
#[cfg(debug_assertions)]
Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns(
libp2p::mdns::Event::Discovered(list),
))) => {
@ -383,7 +382,7 @@ impl LibP2p {
}
}
}
//#[cfg(debug_assertions)]
#[cfg(debug_assertions)]
Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns(
libp2p::mdns::Event::Expired(list),
))) => {

View file

@ -46,8 +46,8 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build -p serai-coordinator --release --all-features && \
mv /serai/target/release/serai-coordinator /serai/bin
cargo build -p serai-coordinator --all-features && \
mv /serai/target/debug/serai-coordinator /serai/bin
FROM debian:bookworm-slim as image
COPY --from=mimalloc libmimalloc.so /usr/lib

View file

@ -1,2 +1,2 @@
cargo build -p serai-coordinator --release --all-features && \
mv /serai/target/release/serai-coordinator /serai/bin
cargo build -p serai-coordinator --all-features && \
mv /serai/target/debug/serai-coordinator /serai/bin

View file

@ -46,8 +46,8 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build --release --all-features -p serai-message-queue && \
mv /serai/target/release/serai-message-queue /serai/bin
cargo build --all-features -p serai-message-queue && \
mv /serai/target/debug/serai-message-queue /serai/bin
FROM debian:bookworm-slim as image
COPY --from=mimalloc libmimalloc.so /usr/lib

View file

@ -1,2 +1,2 @@
cargo build --release --all-features -p serai-message-queue && \
mv /serai/target/release/serai-message-queue /serai/bin
cargo build --all-features -p serai-message-queue && \
mv /serai/target/debug/serai-message-queue /serai/bin

View file

@ -46,8 +46,8 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build --release --features "binaries bitcoin" -p serai-processor && \
mv /serai/target/release/serai-processor /serai/bin
cargo build --features "binaries bitcoin" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin
FROM debian:bookworm-slim as image
COPY --from=mimalloc libmimalloc.so /usr/lib

View file

@ -1,2 +1,2 @@
cargo build --release --features "binaries bitcoin" -p serai-processor && \
mv /serai/target/release/serai-processor /serai/bin
cargo build --features "binaries bitcoin" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin

View file

@ -46,8 +46,8 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build --release --features "binaries monero" -p serai-processor && \
mv /serai/target/release/serai-processor /serai/bin
cargo build --features "binaries monero" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin
FROM debian:bookworm-slim as image
COPY --from=mimalloc libmimalloc.so /usr/lib

View file

@ -1,2 +1,2 @@
cargo build --release --features "binaries monero" -p serai-processor && \
mv /serai/target/release/serai-processor /serai/bin
cargo build --features "binaries monero" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin

View file

@ -569,12 +569,13 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
// KeyGen specifically may take a notable amount of processing time
// While that shouldn't be an issue in practice, as after processing an attempt it'll handle
// the other messages in the queue, it may be beneficial to parallelize these
// They could likely be parallelized by type (KeyGen, Sign, Substrate) without issue
// They could potentially be parallelized by type (KeyGen, Sign, Substrate) without issue
msg = coordinator.recv() => {
assert_eq!(msg.id, (last_coordinator_msg.unwrap_or(msg.id - 1) + 1));
if let Some(last_coordinator_msg) = last_coordinator_msg {
assert_eq!(msg.id, last_coordinator_msg + 1);
}
last_coordinator_msg = Some(msg.id);
// Only handle this if we haven't already
if HandledMessageDb::get(&main_db, msg.id).is_none() {
HandledMessageDb::set(&mut txn, msg.id, &());