machete, drain > mem::swap for clarity reasons

This commit is contained in:
Luke Parker 2024-09-20 02:30:08 -04:00
parent a0ed043372
commit 2c8af04781
4 changed files with 8 additions and 14 deletions

2
Cargo.lock generated
View file

@ -8326,7 +8326,6 @@ version = "0.1.0"
name = "serai-ethereum-processor" name = "serai-ethereum-processor"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"alloy-consensus",
"alloy-core", "alloy-core",
"alloy-provider", "alloy-provider",
"alloy-rlp", "alloy-rlp",
@ -8716,7 +8715,6 @@ dependencies = [
"build-solidity-contracts", "build-solidity-contracts",
"ethereum-schnorr-contract", "ethereum-schnorr-contract",
"group", "group",
"k256",
"serai-client", "serai-client",
"serai-processor-ethereum-deployer", "serai-processor-ethereum-deployer",
"serai-processor-ethereum-erc20", "serai-processor-ethereum-erc20",

View file

@ -284,21 +284,19 @@ pub async fn main_loop<
let key_to_activate = let key_to_activate =
KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0);
/*
`acknowledge_batch` takes burns to optimize handling returns with standard payments.
That's why handling these with a Batch (and not waiting until the following potential
`queue_burns` call makes sense. As for which Batch, the first is equally valid unless
we want to start introspecting (and should be our only Batch anyways).
*/
let mut this_batchs_burns = vec![];
std::mem::swap(&mut burns, &mut this_batchs_burns);
// This is a cheap call as it internally just queues this to be done later // This is a cheap call as it internally just queues this to be done later
let _: () = scanner.acknowledge_batch( let _: () = scanner.acknowledge_batch(
txn.take().unwrap(), txn.take().unwrap(),
id, id,
in_instructions, in_instructions,
this_batchs_burns, /*
`acknowledge_batch` takes burns to optimize handling returns with standard
payments. That's why handling these with a Batch (and not waiting until the
following potential `queue_burns` call makes sense. As for which Batch, the first
is equally valid unless we want to start introspecting (and should be our only
Batch anyways).
*/
burns.drain(..).collect(),
key_to_activate, key_to_activate,
); );
} }

View file

@ -32,7 +32,6 @@ k256 = { version = "^0.13.1", default-features = false, features = ["std"] }
alloy-core = { version = "0.8", default-features = false } alloy-core = { version = "0.8", default-features = false }
alloy-rlp = { version = "0.3", default-features = false } alloy-rlp = { version = "0.3", default-features = false }
alloy-consensus = { version = "0.3", default-features = false }
alloy-rpc-types-eth = { version = "0.3", default-features = false } alloy-rpc-types-eth = { version = "0.3", default-features = false }
alloy-transport = { version = "0.3", default-features = false } alloy-transport = { version = "0.3", default-features = false }

View file

@ -18,7 +18,6 @@ workspace = true
[dependencies] [dependencies]
group = { version = "0.13", default-features = false } group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
alloy-core = { version = "0.8", default-features = false } alloy-core = { version = "0.8", default-features = false }
alloy-consensus = { version = "0.3", default-features = false } alloy-consensus = { version = "0.3", default-features = false }