2015-12-31 06:39:56 +00:00
|
|
|
// Copyright (c) 2014-2016, The Monero Project
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// All rights reserved.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without modification, are
|
|
|
|
// permitted provided that the following conditions are met:
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// 1. Redistributions of source code must retain the above copyright notice, this list of
|
|
|
|
// conditions and the following disclaimer.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
|
|
|
// of conditions and the following disclaimer in the documentation and/or other
|
|
|
|
// materials provided with the distribution.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// 3. Neither the name of the copyright holder nor the names of its contributors may be
|
|
|
|
// used to endorse or promote products derived from this software without specific
|
|
|
|
// prior written permission.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
|
|
|
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
|
|
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
|
|
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
|
|
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
#include <boost/serialization/serialization.hpp>
|
|
|
|
#include <boost/serialization/version.hpp>
|
|
|
|
#include <boost/serialization/list.hpp>
|
|
|
|
#include <boost/multi_index_container.hpp>
|
|
|
|
#include <boost/multi_index/global_fun.hpp>
|
|
|
|
#include <boost/multi_index/hashed_index.hpp>
|
|
|
|
#include <boost/multi_index/member.hpp>
|
|
|
|
#include <boost/foreach.hpp>
|
|
|
|
#include <atomic>
|
2015-09-01 08:40:43 +00:00
|
|
|
#include <unordered_map>
|
|
|
|
#include <unordered_set>
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
#include "syncobj.h"
|
|
|
|
#include "string_tools.h"
|
|
|
|
#include "cryptonote_basic.h"
|
|
|
|
#include "common/util.h"
|
|
|
|
#include "cryptonote_protocol/cryptonote_protocol_defs.h"
|
|
|
|
#include "rpc/core_rpc_server_commands_defs.h"
|
|
|
|
#include "difficulty.h"
|
|
|
|
#include "cryptonote_core/cryptonote_format_utils.h"
|
|
|
|
#include "verification_context.h"
|
|
|
|
#include "crypto/hash.h"
|
|
|
|
#include "checkpoints.h"
|
2015-09-12 10:14:00 +00:00
|
|
|
#include "hardfork.h"
|
2015-03-06 20:20:45 +00:00
|
|
|
#include "blockchain_db/blockchain_db.h"
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
namespace cryptonote
|
|
|
|
{
|
2014-10-28 03:44:45 +00:00
|
|
|
class tx_memory_pool;
|
2016-02-08 18:47:56 +00:00
|
|
|
struct test_options;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/** Declares ways in which the BlockchainDB backend should be told to sync
|
|
|
|
*
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
enum blockchain_db_sync_mode
|
|
|
|
{
|
2016-03-25 06:22:06 +00:00
|
|
|
db_sync, //!< handle syncing calls instead of the backing db, synchronously
|
|
|
|
db_async, //!< handle syncing calls instead of the backing db, asynchronously
|
|
|
|
db_nosync //!< Leave syncing up to the backing db (safest, but slowest because of disk I/O)
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
};
|
2015-12-14 04:54:39 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
/************************************************************************/
|
|
|
|
/* */
|
|
|
|
/************************************************************************/
|
|
|
|
class Blockchain
|
|
|
|
{
|
|
|
|
public:
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief Now-defunct (TODO: remove) struct from in-memory blockchain
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
struct transaction_chain_entry
|
|
|
|
{
|
|
|
|
transaction tx;
|
|
|
|
uint64_t m_keeper_block_height;
|
|
|
|
size_t m_blob_size;
|
|
|
|
std::vector<uint64_t> m_global_output_indexes;
|
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief container for passing a block and metadata about it on the blockchain
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
struct block_extended_info
|
|
|
|
{
|
2016-03-25 06:22:06 +00:00
|
|
|
block bl; //!< the block
|
|
|
|
uint64_t height; //!< the height of the block in the blockchain
|
|
|
|
size_t block_cumulative_size; //!< the size (in bytes) of the block
|
|
|
|
difficulty_type cumulative_difficulty; //!< the accumulated difficulty after that block
|
|
|
|
uint64_t already_generated_coins; //!< the total coins minted after that block
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief Blockchain constructor
|
|
|
|
*
|
|
|
|
* @param tx_pool a reference to the transaction pool to be kept by the Blockchain
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
Blockchain(tx_memory_pool& tx_pool);
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief Initialize the Blockchain state
|
|
|
|
*
|
|
|
|
* @param db a pointer to the backing store to use for the blockchain
|
|
|
|
* @param testnet true if on testnet, else false
|
|
|
|
* @param test_options test parameters
|
|
|
|
*
|
|
|
|
* @return true on success, false if any initialization steps fail
|
|
|
|
*/
|
2016-02-08 18:47:56 +00:00
|
|
|
bool init(BlockchainDB* db, const bool testnet = false, const cryptonote::test_options *test_options = NULL);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Initialize the Blockchain state
|
|
|
|
*
|
|
|
|
* @param db a pointer to the backing store to use for the blockchain
|
|
|
|
* @param hf a structure containing hardfork information
|
|
|
|
* @param testnet true if on testnet, else false
|
|
|
|
*
|
|
|
|
* @return true on success, false if any initialization steps fail
|
|
|
|
*/
|
2016-02-08 18:47:56 +00:00
|
|
|
bool init(BlockchainDB* db, HardFork*& hf, const bool testnet = false);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Uninitializes the blockchain state
|
|
|
|
*
|
|
|
|
* Saves to disk any state that needs to be maintained
|
|
|
|
*
|
|
|
|
* @return true on success, false if any uninitialization steps fail
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool deinit();
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief assign a set of blockchain checkpoint hashes
|
|
|
|
*
|
|
|
|
* @param chk_pts the set of checkpoints to assign
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
void set_checkpoints(checkpoints&& chk_pts) { m_checkpoints = chk_pts; }
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get blocks and transactions from blocks based on start height and count
|
|
|
|
*
|
|
|
|
* @param start_offset the height on the blockchain to start at
|
|
|
|
* @param count the number of blocks to get, if there are as many after start_offset
|
|
|
|
* @param blocks return-by-reference container to put result blocks in
|
|
|
|
* @param txs return-by-reference container to put result transactions in
|
|
|
|
*
|
|
|
|
* @return false if start_offset > blockchain height, else true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_blocks(uint64_t start_offset, size_t count, std::list<block>& blocks, std::list<transaction>& txs) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get blocks from blocks based on start height and count
|
|
|
|
*
|
|
|
|
* @param start_offset the height on the blockchain to start at
|
|
|
|
* @param count the number of blocks to get, if there are as many after start_offset
|
|
|
|
* @param blocks return-by-reference container to put result blocks in
|
|
|
|
*
|
|
|
|
* @return false if start_offset > blockchain height, else true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_blocks(uint64_t start_offset, size_t count, std::list<block>& blocks) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief compiles a list of all blocks stored as alternative chains
|
|
|
|
*
|
|
|
|
* @param blocks return-by-reference container to put result blocks in
|
|
|
|
*
|
|
|
|
* @return true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_alternative_blocks(std::list<block>& blocks) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief returns the number of alternative blocks stored
|
|
|
|
*
|
|
|
|
* @return the number of alternative blocks stored
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
size_t get_alternative_blocks_count() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets a block's hash given a height
|
|
|
|
*
|
|
|
|
* @param height the height of the block
|
|
|
|
*
|
|
|
|
* @return the hash of the block at the requested height, or a zeroed hash if there is no such block
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
crypto::hash get_block_id_by_height(uint64_t height) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the block with a given hash
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
* @param blk return-by-reference variable to put result block in
|
|
|
|
*
|
|
|
|
* @return true if the block was found, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_block_by_hash(const crypto::hash &h, block &blk) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get all block hashes (main chain, alt chains, and invalid blocks)
|
|
|
|
*
|
|
|
|
* @param main return-by-reference container to put result main chain blocks' hashes in
|
|
|
|
* @param alt return-by-reference container to put result alt chain blocks' hashes in
|
|
|
|
* @param invalid return-by-reference container to put result invalid blocks' hashes in
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
void get_all_known_block_ids(std::list<crypto::hash> &main, std::list<crypto::hash> &alt, std::list<crypto::hash> &invalid) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief performs some preprocessing on a group of incoming blocks to speed up verification
|
|
|
|
*
|
|
|
|
* @param blocks a list of incoming blocks
|
|
|
|
*
|
|
|
|
* @return false on erroneous blocks, else true
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
bool prepare_handle_incoming_blocks(const std::list<block_complete_entry> &blocks);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief incoming blocks post-processing, cleanup, and disk sync
|
|
|
|
*
|
|
|
|
* @param force_sync if true, and Blockchain is handling syncing to disk, always sync
|
|
|
|
*
|
|
|
|
* @return true
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
bool cleanup_handle_incoming_blocks(bool force_sync = false);
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief search the blockchain for a transaction by hash
|
|
|
|
*
|
|
|
|
* @param id the hash to search for
|
|
|
|
*
|
|
|
|
* @return true if the tx exists, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool have_tx(const crypto::hash &id) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief check if any key image in a transaction has already been spent
|
|
|
|
*
|
|
|
|
* @param tx the transaction to check
|
|
|
|
*
|
|
|
|
* @return true if any key image is already spent in the blockchain, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool have_tx_keyimges_as_spent(const transaction &tx) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief check if a key image is already spent on the blockchain
|
|
|
|
*
|
|
|
|
* Whenever a transaction output is used as an input for another transaction
|
|
|
|
* (a true input, not just one of a mixing set), a key image is generated
|
|
|
|
* and stored in the transaction in order to prevent double spending. If
|
|
|
|
* this key image is seen again, the transaction using it is rejected.
|
|
|
|
*
|
|
|
|
* @param key_im the key image to search for
|
|
|
|
*
|
|
|
|
* @return true if the key image is already spent in the blockchain, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool have_tx_keyimg_as_spent(const crypto::key_image &key_im) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get the current height of the blockchain
|
|
|
|
*
|
|
|
|
* @return the height
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
uint64_t get_current_blockchain_height() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get the hash of the most recent block on the blockchain
|
|
|
|
*
|
|
|
|
* @return the hash
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
crypto::hash get_tail_id() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get the height and hash of the most recent block on the blockchain
|
|
|
|
*
|
|
|
|
* @param height return-by-reference variable to store the height in
|
|
|
|
*
|
|
|
|
* @return the hash
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
crypto::hash get_tail_id(uint64_t& height) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief returns the difficulty target the next block to be added must meet
|
|
|
|
*
|
|
|
|
* @return the target
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
difficulty_type get_difficulty_for_next_block();
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief adds a block to the blockchain
|
|
|
|
*
|
|
|
|
* Adds a new block to the blockchain. If the block's parent is not the
|
|
|
|
* current top of the blockchain, the block may be added to an alternate
|
|
|
|
* chain. If the block does not belong, is already in the blockchain
|
|
|
|
* or an alternate chain, or is invalid, return false.
|
|
|
|
*
|
|
|
|
* @param bl_ the block to be added
|
|
|
|
* @param bvc metadata about the block addition's success/failure
|
|
|
|
*
|
|
|
|
* @return true on successful addition to the blockchain, else false
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool add_new_block(const block& bl_, block_verification_context& bvc);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief clears the blockchain and starts a new one
|
|
|
|
*
|
|
|
|
* @param b the first block in the new chain (the genesis block)
|
|
|
|
*
|
|
|
|
* @return true on success, else false
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool reset_and_set_genesis_block(const block& b);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief creates a new block to mine against
|
|
|
|
*
|
|
|
|
* @param b return-by-reference block to be filled in
|
|
|
|
* @param miner_address address new coins for the block will go to
|
|
|
|
* @param di return-by-reference tells the miner what the difficulty target is
|
|
|
|
* @param height return-by-reference tells the miner what height it's mining against
|
|
|
|
* @param ex_nonce extra data to be added to the miner transaction's extra
|
|
|
|
*
|
|
|
|
* @return true if block template filled in successfully, else false
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
bool create_block_template(block& b, const account_public_address& miner_address, difficulty_type& di, uint64_t& height, const blobdata& ex_nonce);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks if a block is known about with a given hash
|
|
|
|
*
|
|
|
|
* This function checks the main chain, alternate chains, and invalid blocks
|
|
|
|
* for a block with the given hash
|
|
|
|
*
|
|
|
|
* @param id the hash to search for
|
|
|
|
*
|
|
|
|
* @return true if the block is known, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool have_block(const crypto::hash& id) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the total number of transactions on the main chain
|
|
|
|
*
|
|
|
|
* @return the number of transactions on the main chain
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
size_t get_total_transactions() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the hashes for a subset of the blockchain
|
|
|
|
*
|
|
|
|
* puts into list <ids> a list of hashes representing certain blocks
|
|
|
|
* from the blockchain in reverse chronological order
|
|
|
|
*
|
|
|
|
* the blocks chosen, at the time of this writing, are:
|
|
|
|
* the most recent 11
|
|
|
|
* powers of 2 less recent from there, so 13, 17, 25, etc...
|
|
|
|
*
|
|
|
|
* @param ids return-by-reference list to put the resulting hashes in
|
|
|
|
*
|
|
|
|
* @return true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_short_chain_history(std::list<crypto::hash>& ids) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get recent block hashes for a foreign chain
|
|
|
|
*
|
|
|
|
* Find the split point between us and foreign blockchain and return
|
|
|
|
* (by reference) the most recent common block hash along with up to
|
|
|
|
* BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT additional (more recent) hashes.
|
|
|
|
*
|
|
|
|
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
|
|
|
|
* @param resp return-by-reference the split height and subsequent blocks' hashes
|
|
|
|
*
|
|
|
|
* @return true if a block found in common, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, NOTIFY_RESPONSE_CHAIN_ENTRY::request& resp) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief find the most recent common point between ours and a foreign chain
|
|
|
|
*
|
|
|
|
* This function takes a list of block hashes from another node
|
|
|
|
* on the network to find where the split point is between us and them.
|
|
|
|
* This is used to see what to send another node that needs to sync.
|
|
|
|
*
|
|
|
|
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
|
|
|
|
* @param starter_offset return-by-reference the most recent common block's height
|
|
|
|
*
|
|
|
|
* @return true if a block found in common, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, uint64_t& starter_offset) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get recent blocks for a foreign chain
|
|
|
|
*
|
|
|
|
* This function gets recent blocks relative to a foreign chain, starting either at
|
|
|
|
* a requested height or whatever height is the most recent ours and the foreign
|
|
|
|
* chain have in common.
|
|
|
|
*
|
|
|
|
* @param req_start_block if non-zero, specifies a start point (otherwise find most recent commonality)
|
|
|
|
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
|
|
|
|
* @param blocks return-by-reference the blocks and their transactions
|
|
|
|
* @param total_height return-by-reference our current blockchain height
|
|
|
|
* @param start_height return-by-reference the height of the first block returned
|
|
|
|
* @param max_count the max number of blocks to get
|
|
|
|
*
|
|
|
|
* @return true if a block found in common or req_start_block specified, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool find_blockchain_supplement(const uint64_t req_start_block, const std::list<crypto::hash>& qblock_ids, std::list<std::pair<block, std::list<transaction> > >& blocks, uint64_t& total_height, uint64_t& start_height, size_t max_count) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief retrieves a set of blocks and their transactions, and possibly other transactions
|
|
|
|
*
|
|
|
|
* the request object encapsulates a list of block hashes and a (possibly empty) list of
|
|
|
|
* transaction hashes. for each block hash, the block is fetched along with all of that
|
|
|
|
* block's transactions. Any transactions requested separately are fetched afterwards.
|
|
|
|
*
|
|
|
|
* @param arg the request
|
|
|
|
* @param rsp return-by-reference the response to fill in
|
|
|
|
*
|
|
|
|
* @return true unless any blocks or transactions are missing
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool handle_get_objects(NOTIFY_REQUEST_GET_OBJECTS::request& arg, NOTIFY_RESPONSE_GET_OBJECTS::request& rsp);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets random outputs to mix with
|
|
|
|
*
|
|
|
|
* This function takes an RPC request for outputs to mix with
|
|
|
|
* and creates an RPC response with the resultant output indices.
|
|
|
|
*
|
|
|
|
* Outputs to mix with are randomly selected from the utxo set
|
|
|
|
* for each output amount in the request.
|
|
|
|
*
|
|
|
|
* @param req the output amounts and number of mixins to select
|
|
|
|
* @param res return-by-reference the resultant output indices
|
|
|
|
*
|
|
|
|
* @return true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::request& req, COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::response& res) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the global indices for outputs from a given transaction
|
|
|
|
*
|
|
|
|
* This function gets the global indices for all outputs belonging
|
|
|
|
* to a specific transaction.
|
|
|
|
*
|
|
|
|
* @param tx_id the hash of the transaction to fetch indices for
|
|
|
|
* @param indexs return-by-reference the global indices for the transaction's outputs
|
|
|
|
*
|
|
|
|
* @return false if the transaction does not exist, or if no indices are found, otherwise true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector<uint64_t>& indexs) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief stores the blockchain
|
|
|
|
*
|
|
|
|
* If Blockchain is handling storing of the blockchain (rather than BlockchainDB),
|
|
|
|
* this initiates a blockchain save.
|
|
|
|
*
|
|
|
|
* @return true unless saving the blockchain fails
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool store_blockchain();
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief validates a transaction's inputs
|
|
|
|
*
|
|
|
|
* validates a transaction's inputs as correctly used and not previously
|
|
|
|
* spent. also returns the hash and height of the most recent block
|
|
|
|
* which contains an output that was used as an input to the transaction.
|
|
|
|
*
|
|
|
|
* @param tx the transaction to validate
|
|
|
|
* @param pmax_used_block_height return-by-reference block height of most recent input
|
|
|
|
* @param max_used_block_id return-by-reference block hash of most recent input
|
2016-03-27 11:35:36 +00:00
|
|
|
* @param tvc returned information about tx verification
|
2016-03-25 06:22:06 +00:00
|
|
|
* @param kept_by_block whether or not the transaction is from a previously-verified block
|
|
|
|
*
|
|
|
|
* @return false if any input is invalid, otherwise true
|
|
|
|
*/
|
2016-03-27 11:35:36 +00:00
|
|
|
bool check_tx_inputs(const transaction& tx, uint64_t& pmax_used_block_height, crypto::hash& max_used_block_id, tx_verification_context &tvc, bool kept_by_block = false);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief check that a transaction's outputs conform to current standards
|
|
|
|
*
|
|
|
|
* This function checks, for example at the time of this writing, that
|
|
|
|
* each output is of the form a * 10^b (phrased differently, that if
|
|
|
|
* written out would have only one non-zero digit in base 10).
|
|
|
|
*
|
|
|
|
* @param tx the transaction to check the outputs of
|
2016-03-27 11:35:36 +00:00
|
|
|
* @param tvc returned info about tx verification
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* @return false if any outputs do not conform, otherwise true
|
|
|
|
*/
|
2016-03-27 11:35:36 +00:00
|
|
|
bool check_tx_outputs(const transaction& tx, tx_verification_context &tvc);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the blocksize limit based on recent blocks
|
|
|
|
*
|
|
|
|
* @return the limit
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
uint64_t get_current_cumulative_blocksize_limit() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks if the blockchain is currently being stored
|
|
|
|
*
|
|
|
|
* Note: this should be meaningless in cases where Blockchain is not
|
|
|
|
* directly managing saving the blockchain to disk.
|
|
|
|
*
|
|
|
|
* @return true if Blockchain is having the chain stored currently, else false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool is_storing_blockchain()const{return m_is_blockchain_storing;}
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the difficulty of the block with a given height
|
|
|
|
*
|
|
|
|
* @param i the height
|
|
|
|
*
|
|
|
|
* @return the difficulty
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
uint64_t block_difficulty(uint64_t i) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets blocks based on a list of block hashes
|
|
|
|
*
|
|
|
|
* @tparam t_ids_container a standard-iterable container
|
|
|
|
* @tparam t_blocks_container a standard-iterable container
|
|
|
|
* @tparam t_missed_container a standard-iterable container
|
|
|
|
* @param block_ids a container of block hashes for which to get the corresponding blocks
|
|
|
|
* @param blocks return-by-reference a container to store result blocks in
|
|
|
|
* @param missed_bs return-by-reference a container to store missed blocks in
|
|
|
|
*
|
|
|
|
* @return false if an unexpected exception occurs, else true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
template<class t_ids_container, class t_blocks_container, class t_missed_container>
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_blocks(const t_ids_container& block_ids, t_blocks_container& blocks, t_missed_container& missed_bs) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets transactions based on a list of transaction hashes
|
|
|
|
*
|
|
|
|
* @tparam t_ids_container a standard-iterable container
|
|
|
|
* @tparam t_tx_container a standard-iterable container
|
|
|
|
* @tparam t_missed_container a standard-iterable container
|
|
|
|
* @param txs_ids a container of hashes for which to get the corresponding transactions
|
|
|
|
* @param txs return-by-reference a container to store result transactions in
|
|
|
|
* @param missed_txs return-by-reference a container to store missed transactions in
|
|
|
|
*
|
|
|
|
* @return false if an unexpected exception occurs, else true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
template<class t_ids_container, class t_tx_container, class t_missed_container>
|
2014-12-06 22:40:33 +00:00
|
|
|
bool get_transactions(const t_ids_container& txs_ids, t_tx_container& txs, t_missed_container& missed_txs) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
//debug functions
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief prints data about a snippet of the blockchain
|
|
|
|
*
|
|
|
|
* if start_index is greater than the blockchain height, do nothing
|
|
|
|
*
|
|
|
|
* @param start_index height on chain to start at
|
|
|
|
* @param end_index height on chain to end at
|
|
|
|
*/
|
2015-09-19 10:25:57 +00:00
|
|
|
void print_blockchain(uint64_t start_index, uint64_t end_index) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief prints every block's hash
|
|
|
|
*
|
|
|
|
* WARNING: This function will absolutely crush a terminal in prints, so
|
|
|
|
* it is recommended to redirect this output to a log file (or null sink
|
|
|
|
* if a log file is already set up, as should be the default)
|
|
|
|
*/
|
2015-09-19 10:25:57 +00:00
|
|
|
void print_blockchain_index() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief currently does nothing, candidate for removal
|
|
|
|
*
|
|
|
|
* @param file
|
|
|
|
*/
|
2015-09-19 10:25:57 +00:00
|
|
|
void print_blockchain_outs(const std::string& file) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief check the blockchain against a set of checkpoints
|
|
|
|
*
|
|
|
|
* If a block fails a checkpoint and enforce is enabled, the blockchain
|
|
|
|
* will be rolled back to two blocks prior to that block. If enforce
|
|
|
|
* is disabled, as is currently the default case with DNS-based checkpoints,
|
|
|
|
* an error will be printed to the user but no other action will be taken.
|
|
|
|
*
|
|
|
|
* @param points the checkpoints to check against
|
|
|
|
* @param enforce whether or not to take action on failure
|
|
|
|
*/
|
2015-02-10 23:25:15 +00:00
|
|
|
void check_against_checkpoints(const checkpoints& points, bool enforce);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief configure whether or not to enforce DNS-based checkpoints
|
|
|
|
*
|
|
|
|
* @param enforce the new enforcement setting
|
|
|
|
*/
|
2014-11-01 23:03:37 +00:00
|
|
|
void set_enforce_dns_checkpoints(bool enforce);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief loads new checkpoints from a file and optionally from DNS
|
|
|
|
*
|
|
|
|
* @param file_path the path of the file to look for and load checkpoints from
|
|
|
|
* @param check_dns whether or not to check for new DNS-based checkpoints
|
|
|
|
*
|
|
|
|
* @return false if any enforced checkpoint type fails to load, otherwise true
|
|
|
|
*/
|
2014-11-01 23:03:37 +00:00
|
|
|
bool update_checkpoints(const std::string& file_path, bool check_dns);
|
2014-10-28 03:44:45 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
// user options, must be called before calling init()
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
//FIXME: parameter names don't match function definition in .cpp file
|
|
|
|
/**
|
|
|
|
* @brief sets various performance options
|
|
|
|
*
|
|
|
|
* @param block_threads max number of threads when preparing blocks for addition
|
|
|
|
* @param blocks_per_sync number of blocks to cache before syncing to database
|
|
|
|
* @param sync_mode the ::blockchain_db_sync_mode to use
|
|
|
|
* @param fast_sync sync using built-in block hashes as trusted
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
void set_user_options(uint64_t block_threads, uint64_t blocks_per_sync,
|
2015-12-14 04:54:39 +00:00
|
|
|
blockchain_db_sync_mode sync_mode, bool fast_sync);
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief set whether or not to show/print time statistics
|
|
|
|
*
|
|
|
|
* @param stats the new time stats setting
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
void set_show_time_stats(bool stats) { m_show_time_stats = stats; }
|
2015-12-14 04:54:39 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets the hardfork voting state object
|
|
|
|
*
|
|
|
|
* @return the HardFork object
|
|
|
|
*/
|
2015-09-13 17:09:57 +00:00
|
|
|
HardFork::State get_hard_fork_state() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the current hardfork version in use/voted for
|
|
|
|
*
|
|
|
|
* @return the version
|
|
|
|
*/
|
2015-09-20 17:41:38 +00:00
|
|
|
uint8_t get_current_hard_fork_version() const { return m_hardfork->get_current_version(); }
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief returns the newest hardfork version known to the blockchain
|
|
|
|
*
|
|
|
|
* @return the version
|
|
|
|
*/
|
2015-09-20 17:41:38 +00:00
|
|
|
uint8_t get_ideal_hard_fork_version() const { return m_hardfork->get_ideal_version(); }
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief returns the newest hardfork version voted to be enabled
|
|
|
|
* as of a certain height
|
|
|
|
*
|
|
|
|
* @param height the height for which to check version info
|
|
|
|
*
|
|
|
|
* @return the version
|
|
|
|
*/
|
2015-11-13 08:24:47 +00:00
|
|
|
uint8_t get_ideal_hard_fork_version(uint64_t height) const { return m_hardfork->get_ideal_version(height); }
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get information about hardfork voting for a version
|
|
|
|
*
|
|
|
|
* @param version the version in question
|
|
|
|
* @param window the size of the voting window
|
|
|
|
* @param votes the number of votes to enable <version>
|
|
|
|
* @param threshold the number of votes required to enable <version>
|
|
|
|
* @param earliest_height the earliest height at which <version> is allowed
|
|
|
|
* @param voting which version this node is voting for/using
|
|
|
|
*
|
|
|
|
* @return whether the version queried is enabled
|
|
|
|
*/
|
2015-12-19 14:52:30 +00:00
|
|
|
bool get_hard_fork_voting_info(uint8_t version, uint32_t &window, uint32_t &votes, uint32_t &threshold, uint64_t &earliest_height, uint8_t &voting) const;
|
2015-09-13 17:09:57 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief remove transactions from the transaction pool (if present)
|
|
|
|
*
|
|
|
|
* @param txids a list of hashes of transactions to be removed
|
|
|
|
*
|
|
|
|
* @return false if any removals fail, otherwise true
|
|
|
|
*/
|
2016-01-30 13:28:26 +00:00
|
|
|
bool flush_txes_from_pool(const std::list<crypto::hash> &txids);
|
|
|
|
|
2016-03-26 14:30:23 +00:00
|
|
|
/**
|
|
|
|
* @brief return a histogram of outputs on the blockchain
|
|
|
|
*
|
|
|
|
* @param amounts optional set of amounts to lookup
|
2016-08-01 21:16:00 +00:00
|
|
|
* @param unlocked whether to restrict instances to unlocked ones
|
2016-03-26 14:30:23 +00:00
|
|
|
*
|
|
|
|
* @return a set of amount/instances
|
|
|
|
*/
|
2016-08-01 21:16:00 +00:00
|
|
|
std::map<uint64_t, uint64_t> get_output_histogram(const std::vector<uint64_t> &amounts, bool unlocked) const;
|
2016-03-26 14:30:23 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief perform a check on all key images in the blockchain
|
|
|
|
*
|
|
|
|
* @param std::function the check to perform, pass/fail
|
|
|
|
*
|
|
|
|
* @return false if any key image fails the check, otherwise true
|
|
|
|
*/
|
2015-10-25 10:45:25 +00:00
|
|
|
bool for_all_key_images(std::function<bool(const crypto::key_image&)>) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief perform a check on all blocks in the blockchain
|
|
|
|
*
|
|
|
|
* @param std::function the check to perform, pass/fail
|
|
|
|
*
|
|
|
|
* @return false if any block fails the check, otherwise true
|
|
|
|
*/
|
2015-10-25 10:45:25 +00:00
|
|
|
bool for_all_blocks(std::function<bool(uint64_t, const crypto::hash&, const block&)>) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief perform a check on all transactions in the blockchain
|
|
|
|
*
|
|
|
|
* @param std::function the check to perform, pass/fail
|
|
|
|
*
|
|
|
|
* @return false if any transaction fails the check, otherwise true
|
|
|
|
*/
|
2015-10-25 10:45:25 +00:00
|
|
|
bool for_all_transactions(std::function<bool(const crypto::hash&, const cryptonote::transaction&)>) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief perform a check on all outputs in the blockchain
|
|
|
|
*
|
|
|
|
* @param std::function the check to perform, pass/fail
|
|
|
|
*
|
|
|
|
* @return false if any output fails the check, otherwise true
|
|
|
|
*/
|
2015-10-25 10:45:25 +00:00
|
|
|
bool for_all_outputs(std::function<bool(uint64_t amount, const crypto::hash &tx_hash, size_t tx_idx)>) const;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get a reference to the BlockchainDB in use by Blockchain
|
|
|
|
*
|
|
|
|
* @return a reference to the BlockchainDB instance
|
|
|
|
*/
|
2015-03-22 17:57:18 +00:00
|
|
|
BlockchainDB& get_db()
|
2015-02-11 00:44:32 +00:00
|
|
|
{
|
2015-03-22 17:57:18 +00:00
|
|
|
return *m_db;
|
2015-02-11 00:44:32 +00:00
|
|
|
}
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get a number of outputs of a specific amount
|
|
|
|
*
|
|
|
|
* @param amount the amount
|
|
|
|
* @param offsets the indices (indexed to the amount) of the outputs
|
|
|
|
* @param outputs return-by-reference the outputs collected
|
|
|
|
* @param txs unused, candidate for removal
|
|
|
|
*/
|
2015-12-14 04:54:39 +00:00
|
|
|
void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets,
|
|
|
|
std::vector<output_data_t> &outputs, std::unordered_map<crypto::hash,
|
|
|
|
cryptonote::transaction> &txs) const;
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief computes the "short" and "long" hashes for a set of blocks
|
|
|
|
*
|
|
|
|
* @param height the height of the first block
|
|
|
|
* @param blocks the blocks to be hashed
|
|
|
|
* @param map return-by-reference the hashes for each block
|
|
|
|
*/
|
2015-12-14 04:54:39 +00:00
|
|
|
void block_longhash_worker(const uint64_t height, const std::vector<block> &blocks,
|
|
|
|
std::unordered_map<crypto::hash, crypto::hash> &map) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
private:
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
// TODO: evaluate whether or not each of these typedefs are left over from blockchain_storage
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::unordered_map<crypto::hash, size_t> blocks_by_id_index;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::unordered_map<crypto::hash, transaction_chain_entry> transactions_container;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::unordered_set<crypto::key_image> key_images_container;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::vector<block_extended_info> blocks_container;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::unordered_map<crypto::hash, block_extended_info> blocks_ext_by_hash;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::unordered_map<crypto::hash, block> blocks_by_hash;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
typedef std::map<uint64_t, std::vector<std::pair<crypto::hash, size_t>>> outputs_container; //crypto::hash - tx hash, size_t - index of out in transaction
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
BlockchainDB* m_db;
|
|
|
|
|
|
|
|
tx_memory_pool& m_tx_pool;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
2014-12-06 22:40:33 +00:00
|
|
|
mutable epee::critical_section m_blockchain_lock; // TODO: add here reader/writer lock
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// main chain
|
|
|
|
transactions_container m_transactions;
|
|
|
|
size_t m_current_block_cumul_sz_limit;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
// metadata containers
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
std::unordered_map<crypto::hash, std::unordered_map<crypto::key_image, std::vector<output_data_t>>> m_scan_table;
|
|
|
|
std::unordered_map<crypto::hash, crypto::hash> m_blocks_longhash_table;
|
2015-07-15 05:47:07 +00:00
|
|
|
std::unordered_map<crypto::hash, std::unordered_map<crypto::key_image, bool>> m_check_txin_table;
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
|
|
|
|
// SHA-3 hashes for each block and for fast pow checking
|
|
|
|
std::vector<crypto::hash> m_blocks_hash_check;
|
|
|
|
std::vector<crypto::hash> m_blocks_txs_check;
|
2015-12-14 04:54:39 +00:00
|
|
|
|
|
|
|
blockchain_db_sync_mode m_db_sync_mode;
|
|
|
|
bool m_fast_sync;
|
|
|
|
bool m_show_time_stats;
|
|
|
|
uint64_t m_db_blocks_per_sync;
|
|
|
|
uint64_t m_max_prepare_blocks_threads;
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
uint64_t m_fake_pow_calc_time;
|
|
|
|
uint64_t m_fake_scan_time;
|
2015-12-14 04:54:39 +00:00
|
|
|
uint64_t m_sync_counter;
|
|
|
|
std::vector<uint64_t> m_timestamps;
|
|
|
|
std::vector<difficulty_type> m_difficulties;
|
|
|
|
uint64_t m_timestamps_and_difficulties_height;
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
|
2015-12-14 04:54:39 +00:00
|
|
|
boost::asio::io_service m_async_service;
|
|
|
|
boost::thread_group m_async_pool;
|
|
|
|
std::unique_ptr<boost::asio::io_service::work> m_async_work_idle;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// all alternative chains
|
|
|
|
blocks_ext_by_hash m_alternative_chains; // crypto::hash -> block_extended_info
|
|
|
|
|
|
|
|
// some invalid blocks
|
|
|
|
blocks_ext_by_hash m_invalid_blocks; // crypto::hash -> block_extended_info
|
|
|
|
|
|
|
|
|
|
|
|
checkpoints m_checkpoints;
|
|
|
|
std::atomic<bool> m_is_in_checkpoint_zone;
|
|
|
|
std::atomic<bool> m_is_blockchain_storing;
|
2014-11-01 23:03:37 +00:00
|
|
|
bool m_enforce_dns_checkpoints;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2015-09-20 17:41:38 +00:00
|
|
|
HardFork *m_hardfork;
|
2015-09-12 10:14:00 +00:00
|
|
|
|
2015-10-08 02:28:11 +00:00
|
|
|
bool m_testnet;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief collects the keys for all outputs being "spent" as an input
|
|
|
|
*
|
|
|
|
* This function makes sure that each "input" in an input (mixins) exists
|
|
|
|
* and collects the public key for each from the transaction it was included in
|
|
|
|
* via the visitor passed to it.
|
|
|
|
*
|
|
|
|
* If pmax_related_block_height is not NULL, its value is set to the height
|
|
|
|
* of the most recent block which contains an output used in the input set
|
|
|
|
*
|
|
|
|
* @tparam visitor_t a class encapsulating tx is unlocked and collect tx key
|
|
|
|
* @param tx_in_to_key a transaction input instance
|
|
|
|
* @param vis an instance of the visitor to use
|
|
|
|
* @param tx_prefix_hash the hash of the associated transaction_prefix
|
|
|
|
* @param pmax_related_block_height return-by-pointer the height of the most recent block in the input set
|
|
|
|
*
|
|
|
|
* @return false if any keys are not found or any inputs are not unlocked, otherwise true
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
template<class visitor_t>
|
|
|
|
inline bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t &vis, const crypto::hash &tx_prefix_hash, uint64_t* pmax_related_block_height = NULL) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief collect output public keys of a transaction input set
|
|
|
|
*
|
|
|
|
* This function locates all outputs associated with a given input set (mixins)
|
|
|
|
* and validates that they exist and are usable
|
|
|
|
* (unlocked, unspent is checked elsewhere).
|
|
|
|
*
|
|
|
|
* If pmax_related_block_height is not NULL, its value is set to the height
|
|
|
|
* of the most recent block which contains an output used in the input set
|
|
|
|
*
|
|
|
|
* @param txin the transaction input
|
|
|
|
* @param tx_prefix_hash the transaction prefix hash, for caching organization
|
|
|
|
* @param sig the input signature
|
|
|
|
* @param output_keys return-by-reference the public keys of the outputs in the input set
|
|
|
|
* @param pmax_related_block_height return-by-pointer the height of the most recent block in the input set
|
|
|
|
*
|
|
|
|
* @return false if any output is not yet unlocked, or is missing, otherwise true
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
bool check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector<crypto::signature>& sig, std::vector<crypto::public_key> &output_keys, uint64_t* pmax_related_block_height);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief validate a transaction's inputs and their keys
|
|
|
|
*
|
|
|
|
* This function validates transaction inputs and their keys. Previously
|
|
|
|
* it also performed double spend checking, but that has been moved to its
|
|
|
|
* own function.
|
|
|
|
*
|
|
|
|
* If pmax_related_block_height is not NULL, its value is set to the height
|
|
|
|
* of the most recent block which contains an output used in any input set
|
|
|
|
*
|
|
|
|
* Currently this function calls ring signature validation for each
|
|
|
|
* transaction.
|
|
|
|
*
|
|
|
|
* @param tx the transaction to validate
|
2016-03-27 11:35:36 +00:00
|
|
|
* @param tvc returned information about tx verification
|
2016-03-25 06:22:06 +00:00
|
|
|
* @param pmax_related_block_height return-by-pointer the height of the most recent block in the input set
|
|
|
|
*
|
|
|
|
* @return false if any validation step fails, otherwise true
|
|
|
|
*/
|
2016-03-27 11:35:36 +00:00
|
|
|
bool check_tx_inputs(const transaction& tx, tx_verification_context &tvc, uint64_t* pmax_used_block_height = NULL);
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief performs a blockchain reorganization according to the longest chain rule
|
|
|
|
*
|
|
|
|
* This function aggregates all the actions necessary to switch to a
|
|
|
|
* newly-longer chain. If any step in the reorganization process fails,
|
|
|
|
* the blockchain is reverted to its previous state.
|
|
|
|
*
|
|
|
|
* @param alt_chain the chain to switch to
|
|
|
|
* @param discard_disconnected_chain whether or not to keep the old chain as an alternate
|
|
|
|
*
|
|
|
|
* @return false if the reorganization fails, otherwise true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool switch_to_alternative_blockchain(std::list<blocks_ext_by_hash::iterator>& alt_chain, bool discard_disconnected_chain);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief removes the most recent block from the blockchain
|
|
|
|
*
|
|
|
|
* @return the block removed
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
block pop_block_from_blockchain();
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief validate and add a new block to the end of the blockchain
|
|
|
|
*
|
|
|
|
* This function is merely a convenience wrapper around the other
|
|
|
|
* of the same name. This one passes the block's hash to the other
|
|
|
|
* as well as the block and verification context.
|
|
|
|
*
|
|
|
|
* @param bl the block to be added
|
|
|
|
* @param bvc metadata concerning the block's validity
|
|
|
|
*
|
|
|
|
* @return true if the block was added successfully, otherwise false
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool handle_block_to_main_chain(const block& bl, block_verification_context& bvc);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief validate and add a new block to the end of the blockchain
|
|
|
|
*
|
|
|
|
* When a block is given to Blockchain to be added to the blockchain, it
|
|
|
|
* is passed here if it is determined to belong at the end of the current
|
|
|
|
* chain.
|
|
|
|
*
|
|
|
|
* @param bl the block to be added
|
|
|
|
* @param id the hash of the block
|
|
|
|
* @param bvc metadata concerning the block's validity
|
|
|
|
*
|
|
|
|
* @return true if the block was added successfully, otherwise false
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool handle_block_to_main_chain(const block& bl, const crypto::hash& id, block_verification_context& bvc);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief validate and add a new block to an alternate blockchain
|
|
|
|
*
|
|
|
|
* If a block to be added does not belong to the main chain, but there
|
|
|
|
* is an alternate chain to which it should be added, that is handled
|
|
|
|
* here.
|
|
|
|
*
|
|
|
|
* @param b the block to be added
|
|
|
|
* @param id the hash of the block
|
|
|
|
* @param bvc metadata concerning the block's validity
|
|
|
|
*
|
|
|
|
* @return true if the block was added successfully, otherwise false
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool handle_alternative_block(const block& b, const crypto::hash& id, block_verification_context& bvc);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets the difficulty requirement for a new block on an alternate chain
|
|
|
|
*
|
|
|
|
* @param alt_chain the chain to be added to
|
|
|
|
* @param bei the block being added (and metadata, see ::block_extended_info)
|
|
|
|
*
|
|
|
|
* @return the difficulty requirement
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
difficulty_type get_next_difficulty_for_alternative_chain(const std::list<blocks_ext_by_hash::iterator>& alt_chain, block_extended_info& bei) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief sanity checks a miner transaction before validating an entire block
|
|
|
|
*
|
|
|
|
* This function merely checks basic things like the structure of the miner
|
|
|
|
* transaction, the unlock time, and that the amount doesn't overflow.
|
|
|
|
*
|
|
|
|
* @param b the block containing the miner transaction
|
|
|
|
* @param height the height at which the block will be added
|
|
|
|
*
|
|
|
|
* @return false if anything is found wrong with the miner transaction, otherwise true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool prevalidate_miner_transaction(const block& b, uint64_t height);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief validates a miner (coinbase) transaction
|
|
|
|
*
|
|
|
|
* This function makes sure that the miner calculated his reward correctly
|
|
|
|
* and that his miner transaction totals reward + fee.
|
|
|
|
*
|
|
|
|
* @param b the block containing the miner transaction to be validated
|
|
|
|
* @param cumulative_block_size the block's size
|
|
|
|
* @param fee the total fees collected in the block
|
|
|
|
* @param base_reward return-by-reference the new block's generated coins
|
|
|
|
* @param already_generated_coins the amount of currency generated prior to this block
|
|
|
|
* @param partial_block_reward return-by-reference true if miner accepted only partial reward
|
2016-03-23 17:56:08 +00:00
|
|
|
* @param version hard fork version for that transaction
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* @return false if anything is found wrong with the miner transaction, otherwise true
|
|
|
|
*/
|
2016-03-23 17:56:08 +00:00
|
|
|
bool validate_miner_transaction(const block& b, size_t cumulative_block_size, uint64_t fee, uint64_t& base_reward, uint64_t already_generated_coins, bool &partial_block_reward, uint8_t version);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief reverts the blockchain to its previous state following a failed switch
|
|
|
|
*
|
|
|
|
* If Blockchain fails to switch to an alternate chain when it means
|
|
|
|
* to do so, this function reverts the blockchain to how it was before
|
|
|
|
* the attempted switch.
|
|
|
|
*
|
|
|
|
* @param original_chain the chain to switch back to
|
|
|
|
* @param rollback_height the height to revert to before appending the original chain
|
|
|
|
*
|
|
|
|
* @return false if something goes wrong with reverting (very bad), otherwise true
|
|
|
|
*/
|
2014-10-13 04:31:21 +00:00
|
|
|
bool rollback_blockchain_switching(std::list<block>& original_chain, uint64_t rollback_height);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets recent block sizes for median calculation
|
|
|
|
*
|
|
|
|
* get the block sizes of the last <count> blocks, and return by reference <sz>.
|
|
|
|
*
|
|
|
|
* @param sz return-by-reference the list of sizes
|
|
|
|
* @param count the number of blocks to get sizes for
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
void get_last_n_blocks_sizes(std::vector<size_t>& sz, size_t count) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief adds the given output to the requested set of random outputs
|
|
|
|
*
|
|
|
|
* @param result_outs return-by-reference the set the output is to be added to
|
|
|
|
* @param amount the output amount
|
|
|
|
* @param i the output index (indexed to amount)
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
void add_out_to_get_random_outs(COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs, uint64_t amount, size_t i) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks if a transaction is unlocked (its outputs spendable)
|
|
|
|
*
|
|
|
|
* This function checks to see if a transaction is unlocked.
|
|
|
|
* unlock_time is either a block index or a unix time.
|
|
|
|
*
|
|
|
|
* @param unlock_time the unlock parameter (height or time)
|
|
|
|
*
|
|
|
|
* @return true if spendable, otherwise false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool is_tx_spendtime_unlocked(uint64_t unlock_time) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief stores an invalid block in a separate container
|
|
|
|
*
|
|
|
|
* Storing invalid blocks allows quick dismissal of the same block
|
|
|
|
* if it is seen again.
|
|
|
|
*
|
|
|
|
* @param bl the invalid block
|
|
|
|
* @param h the block's hash
|
|
|
|
*
|
|
|
|
* @return false if the block cannot be stored for some reason, otherwise true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool add_block_as_invalid(const block& bl, const crypto::hash& h);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief stores an invalid block in a separate container
|
|
|
|
*
|
|
|
|
* Storing invalid blocks allows quick dismissal of the same block
|
|
|
|
* if it is seen again.
|
|
|
|
*
|
|
|
|
* @param bei the invalid block (see ::block_extended_info)
|
|
|
|
* @param h the block's hash
|
|
|
|
*
|
|
|
|
* @return false if the block cannot be stored for some reason, otherwise true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool add_block_as_invalid(const block_extended_info& bei, const crypto::hash& h);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks a block's timestamp
|
|
|
|
*
|
|
|
|
* This function grabs the timestamps from the most recent <n> blocks,
|
|
|
|
* where n = BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW. If there are not those many
|
|
|
|
* blocks in the blockchain, the timestap is assumed to be valid. If there
|
|
|
|
* are, this function returns:
|
|
|
|
* true if the block's timestamp is not less than the timestamp of the
|
|
|
|
* median of the selected blocks
|
|
|
|
* false otherwise
|
|
|
|
*
|
|
|
|
* @param b the block to be checked
|
|
|
|
*
|
|
|
|
* @return true if the block's timestamp is valid, otherwise false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool check_block_timestamp(const block& b) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks a block's timestamp
|
|
|
|
*
|
|
|
|
* If the block is not more recent than the median of the recent
|
|
|
|
* timestamps passed here, it is considered invalid.
|
|
|
|
*
|
|
|
|
* @param timestamps a list of the most recent timestamps to check against
|
|
|
|
* @param b the block to be checked
|
|
|
|
*
|
|
|
|
* @return true if the block's timestamp is valid, otherwise false
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool check_block_timestamp(std::vector<uint64_t>& timestamps, const block& b) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get the "adjusted time"
|
|
|
|
*
|
|
|
|
* Currently this simply returns the current time according to the
|
|
|
|
* user's machine.
|
|
|
|
*
|
|
|
|
* @return the current time
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
uint64_t get_adjusted_time() const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief finish an alternate chain's timestamp window from the main chain
|
|
|
|
*
|
|
|
|
* for an alternate chain, get the timestamps from the main chain to complete
|
|
|
|
* the needed number of timestamps for the BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.
|
|
|
|
*
|
|
|
|
* @param start_height the alternate chain's attachment height to the main chain
|
|
|
|
* @param timestamps return-by-value the timestamps set to be populated
|
|
|
|
*
|
|
|
|
* @return true unless start_height is greater than the current blockchain height
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool complete_timestamps_vector(uint64_t start_height, std::vector<uint64_t>& timestamps);
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief calculate the block size limit for the next block to be added
|
|
|
|
*
|
|
|
|
* @return true
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
bool update_next_cumulative_size_limit();
|
blockchain: fix a few block addition bugs
If the block reward was too high, the verification failed flag
was set, but the function continued. The code which was supposed
to trap this flag and return failure failed to trap it, and,
while the block was not added to the chain, the function would
return success.
The reason for avoiding returning when the block reward problem
was detected was to be able to return any transactions to the
pool if needed. This is now mooted by moving the transaction
return code to a separate function, which is now called at all
appropriate points, making the logic much simpler, and hopefully
correct now.
We also move the hard fork version check after the prev_id check,
as block which does not go on the top of the chain might not
have the expected version there, without being invalid just for
this reason.
Last, we trap the case where a block fails to be added due to
using already spent key images, to set the verification failed
flag.
2015-12-25 22:13:38 +00:00
|
|
|
void return_tx_to_pool(const std::vector<transaction> &txs);
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief make sure a transaction isn't attempting a double-spend
|
|
|
|
*
|
|
|
|
* @param tx the transaction to check
|
|
|
|
* @param keys_this_block a cumulative list of spent keys for the current block
|
|
|
|
*
|
|
|
|
* @return false if a double spend was detected, otherwise true
|
|
|
|
*/
|
2014-12-06 22:40:33 +00:00
|
|
|
bool check_for_double_spend(const transaction& tx, key_images_container& keys_this_block) const;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief validates a transaction input's ring signature
|
|
|
|
*
|
|
|
|
* @param tx_prefix_hash the transaction prefix' hash
|
|
|
|
* @param key_image the key image generated from the true input
|
|
|
|
* @param pubkeys the public keys for each input in the ring signature
|
|
|
|
* @param sig the signature generated for each input in the ring signature
|
|
|
|
* @param result false if the ring signature is invalid, otherwise true
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
void check_ring_signature(const crypto::hash &tx_prefix_hash, const crypto::key_image &key_image,
|
2015-12-14 04:54:39 +00:00
|
|
|
const std::vector<crypto::public_key> &pubkeys, const std::vector<crypto::signature> &sig, uint64_t &result);
|
2015-10-08 02:28:11 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief loads block hashes from compiled-in data set
|
|
|
|
*
|
|
|
|
* A (possibly empty) set of block hashes can be compiled into the
|
|
|
|
* monero daemon binary. This function loads those hashes into
|
|
|
|
* a useful state.
|
|
|
|
*/
|
|
|
|
void load_compiled_in_block_hashes();
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
} // namespace cryptonote
|