mirror of
https://github.com/monero-project/monero.git
synced 2024-11-18 00:37:43 +00:00
core: speed up output index unique set calculation
A sort+uniq step was done for every tx in a 200 block chunk, causing a lot of repeated scanning as the size of the offset map got larger with every added tx. We now do the step only once at the end of the loop. Doing it this way potentially uses more memory, but testing shows that it's currently only about 2% more.
This commit is contained in:
parent
19d7f568ce
commit
5d4ef719b9
1 changed files with 8 additions and 8 deletions
|
@ -3866,17 +3866,17 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::list<block_complete_e
|
||||||
offset_map[in_to_key.amount].push_back(offset);
|
offset_map[in_to_key.amount].push_back(offset);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort and remove duplicate absolute_offsets in offset_map
|
|
||||||
for (auto &offsets : offset_map)
|
|
||||||
{
|
|
||||||
std::sort(offsets.second.begin(), offsets.second.end());
|
|
||||||
auto last = std::unique(offsets.second.begin(), offsets.second.end());
|
|
||||||
offsets.second.erase(last, offsets.second.end());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sort and remove duplicate absolute_offsets in offset_map
|
||||||
|
for (auto &offsets : offset_map)
|
||||||
|
{
|
||||||
|
std::sort(offsets.second.begin(), offsets.second.end());
|
||||||
|
auto last = std::unique(offsets.second.begin(), offsets.second.end());
|
||||||
|
offsets.second.erase(last, offsets.second.end());
|
||||||
|
}
|
||||||
|
|
||||||
// [output] stores all transactions for each tx_out_index::hash found
|
// [output] stores all transactions for each tx_out_index::hash found
|
||||||
std::vector<std::unordered_map<crypto::hash, cryptonote::transaction>> transactions(amounts.size());
|
std::vector<std::unordered_map<crypto::hash, cryptonote::transaction>> transactions(amounts.size());
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue