2019-03-05 21:05:34 +00:00
// Copyright (c) 2014-2019, The Monero Project
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// All rights reserved.
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2016-10-10 20:41:24 +00:00
//
2014-07-23 13:03:52 +00:00
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2014-03-03 22:07:58 +00:00
2019-04-25 16:35:27 +00:00
# include <boost/preprocessor/stringize.hpp>
2014-03-03 22:07:58 +00:00
# include "include_base_utils.h"
2017-11-25 22:25:05 +00:00
# include "string_tools.h"
2014-03-03 22:07:58 +00:00
using namespace epee ;
# include "core_rpc_server.h"
# include "common/command_line.h"
2017-02-24 23:16:13 +00:00
# include "common/updates.h"
# include "common/download.h"
# include "common/util.h"
2017-10-29 21:10:46 +00:00
# include "common/perf_timer.h"
2017-01-26 15:07:23 +00:00
# include "cryptonote_basic/cryptonote_format_utils.h"
# include "cryptonote_basic/account.h"
# include "cryptonote_basic/cryptonote_basic_impl.h"
2019-04-12 20:20:20 +00:00
# include "cryptonote_core/tx_sanity_check.h"
2014-03-03 22:07:58 +00:00
# include "misc_language.h"
2018-12-16 17:57:44 +00:00
# include "net/parse.h"
2018-01-20 10:38:14 +00:00
# include "storages/http_abstract_invoke.h"
2014-03-03 22:07:58 +00:00
# include "crypto/hash.h"
2017-02-05 22:48:03 +00:00
# include "rpc/rpc_args.h"
2018-10-20 02:06:03 +00:00
# include "rpc/rpc_handler.h"
2014-03-03 22:07:58 +00:00
# include "core_rpc_server_error_codes.h"
2017-11-29 22:53:58 +00:00
# include "p2p/net_node.h"
# include "version.h"
2014-03-03 22:07:58 +00:00
2017-02-27 20:25:35 +00:00
# undef MONERO_DEFAULT_LOG_CATEGORY
# define MONERO_DEFAULT_LOG_CATEGORY "daemon.rpc"
2016-06-29 19:43:14 +00:00
# define MAX_RESTRICTED_FAKE_OUTS_COUNT 40
2017-11-23 19:15:45 +00:00
# define MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT 5000
2016-06-29 19:43:14 +00:00
2019-05-12 13:27:34 +00:00
# define OUTPUT_HISTOGRAM_RECENT_CUTOFF_RESTRICTION (3 * 86400) // 3 days max, the wallet requests 1.8 days
2017-09-16 09:31:49 +00:00
namespace
{
void add_reason ( std : : string & reasons , const char * reason )
{
if ( ! reasons . empty ( ) )
reasons + = " , " ;
reasons + = reason ;
}
2019-02-21 12:55:55 +00:00
uint64_t round_up ( uint64_t value , uint64_t quantum )
{
return ( value + quantum - 1 ) / quantum * quantum ;
}
2019-01-31 10:44:08 +00:00
void store_difficulty ( cryptonote : : difficulty_type difficulty , uint64_t & sdiff , std : : string & swdiff , uint64_t & stop64 )
{
2019-05-01 19:57:34 +00:00
sdiff = ( difficulty & 0xffffffffffffffff ) . convert_to < uint64_t > ( ) ;
2019-04-05 16:28:15 +00:00
swdiff = cryptonote : : hex ( difficulty ) ;
2019-05-01 19:57:34 +00:00
stop64 = ( ( difficulty > > 64 ) & 0xffffffffffffffff ) . convert_to < uint64_t > ( ) ;
2019-01-31 10:44:08 +00:00
}
2017-09-16 09:31:49 +00:00
}
2014-03-03 22:07:58 +00:00
namespace cryptonote
{
//-----------------------------------------------------------------------------------
void core_rpc_server : : init_options ( boost : : program_options : : options_description & desc )
{
command_line : : add_arg ( desc , arg_rpc_bind_port ) ;
2017-11-16 03:58:11 +00:00
command_line : : add_arg ( desc , arg_rpc_restricted_bind_port ) ;
2015-11-27 18:24:29 +00:00
command_line : : add_arg ( desc , arg_restricted_rpc ) ;
2018-01-20 10:38:14 +00:00
command_line : : add_arg ( desc , arg_bootstrap_daemon_address ) ;
command_line : : add_arg ( desc , arg_bootstrap_daemon_login ) ;
2019-05-22 04:09:11 +00:00
cryptonote : : rpc_args : : init_options ( desc , true ) ;
2014-03-03 22:07:58 +00:00
}
//------------------------------------------------------------------------------------------------------------------------------
2014-09-09 14:58:53 +00:00
core_rpc_server : : core_rpc_server (
core & cr
, nodetool : : node_server < cryptonote : : t_cryptonote_protocol_handler < cryptonote : : core > > & p2p
)
: m_core ( cr )
, m_p2p ( p2p )
2019-08-20 15:05:54 +00:00
, m_was_bootstrap_ever_used ( false )
2014-03-03 22:07:58 +00:00
{ }
//------------------------------------------------------------------------------------------------------------------------------
2019-06-10 15:03:18 +00:00
bool core_rpc_server : : set_bootstrap_daemon ( const std : : string & address , const std : : string & username_password )
{
boost : : optional < epee : : net_utils : : http : : login > credentials ;
const auto loc = username_password . find ( ' : ' ) ;
if ( loc ! = std : : string : : npos )
{
credentials = epee : : net_utils : : http : : login ( username_password . substr ( 0 , loc ) , username_password . substr ( loc + 1 ) ) ;
}
return set_bootstrap_daemon ( address , credentials ) ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-08-27 12:01:49 +00:00
boost : : optional < std : : string > core_rpc_server : : get_random_public_node ( )
{
COMMAND_RPC_GET_PUBLIC_NODES : : request request ;
COMMAND_RPC_GET_PUBLIC_NODES : : response response ;
request . gray = true ;
request . white = true ;
if ( ! on_get_public_nodes ( request , response ) | | response . status ! = CORE_RPC_STATUS_OK )
{
return boost : : none ;
}
const auto get_random_node_address = [ ] ( const std : : vector < public_node > & public_nodes ) - > std : : string {
const auto & random_node = public_nodes [ crypto : : rand_idx ( public_nodes . size ( ) ) ] ;
const auto address = random_node . host + " : " + std : : to_string ( random_node . rpc_port ) ;
return address ;
} ;
if ( ! response . white . empty ( ) )
{
return get_random_node_address ( response . white ) ;
}
MDEBUG ( " No white public node found, checking gray peers " ) ;
if ( ! response . gray . empty ( ) )
{
return get_random_node_address ( response . gray ) ;
}
MERROR ( " Failed to find any suitable public node " ) ;
return boost : : none ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-06-10 15:03:18 +00:00
bool core_rpc_server : : set_bootstrap_daemon ( const std : : string & address , const boost : : optional < epee : : net_utils : : http : : login > & credentials )
{
boost : : unique_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
2019-08-27 12:01:49 +00:00
if ( address . empty ( ) )
2019-06-10 15:03:18 +00:00
{
2019-08-27 12:01:49 +00:00
m_bootstrap_daemon . reset ( nullptr ) ;
}
else if ( address = = " auto " )
{
m_bootstrap_daemon . reset ( new bootstrap_daemon ( [ this ] { return get_random_public_node ( ) ; } ) ) ;
}
else
{
m_bootstrap_daemon . reset ( new bootstrap_daemon ( address , credentials ) ) ;
2019-06-10 15:03:18 +00:00
}
2019-08-27 12:01:49 +00:00
m_should_use_bootstrap_daemon = m_bootstrap_daemon . get ( ) ! = nullptr ;
2019-06-10 15:03:18 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2017-02-05 22:48:03 +00:00
bool core_rpc_server : : init (
2014-09-08 17:07:15 +00:00
const boost : : program_options : : variables_map & vm
2017-11-16 03:58:11 +00:00
, const bool restricted
, const std : : string & port
2014-09-08 17:07:15 +00:00
)
2014-03-03 22:07:58 +00:00
{
2017-11-16 03:58:11 +00:00
m_restricted = restricted ;
2017-02-05 22:48:03 +00:00
m_net_server . set_threads_prefix ( " RPC " ) ;
2019-06-13 07:47:06 +00:00
m_net_server . set_connection_filter ( & m_p2p ) ;
2017-02-05 22:48:03 +00:00
2019-05-22 04:09:11 +00:00
auto rpc_config = cryptonote : : rpc_args : : process ( vm , true ) ;
2017-02-05 22:48:03 +00:00
if ( ! rpc_config )
return false ;
2019-06-10 15:03:18 +00:00
if ( ! set_bootstrap_daemon ( command_line : : get_arg ( vm , arg_bootstrap_daemon_address ) ,
command_line : : get_arg ( vm , arg_bootstrap_daemon_login ) ) )
2018-01-20 10:38:14 +00:00
{
2019-06-10 15:03:18 +00:00
MERROR ( " Failed to parse bootstrap daemon address " ) ;
return false ;
2018-01-20 10:38:14 +00:00
}
2017-02-05 22:48:03 +00:00
boost : : optional < epee : : net_utils : : http : : login > http_login { } ;
2017-11-16 03:58:11 +00:00
2017-02-05 22:48:03 +00:00
if ( rpc_config - > login )
http_login . emplace ( std : : move ( rpc_config - > login - > username ) , std : : move ( rpc_config - > login - > password ) . password ( ) ) ;
2017-12-21 11:45:01 +00:00
auto rng = [ ] ( size_t len , uint8_t * ptr ) { return crypto : : rand ( len , ptr ) ; } ;
2017-02-05 22:48:03 +00:00
return epee : : http_server_impl_base < core_rpc_server , connection_context > : : init (
2019-04-10 22:34:30 +00:00
rng , std : : move ( port ) , std : : move ( rpc_config - > bind_ip ) ,
std : : move ( rpc_config - > bind_ipv6_address ) , std : : move ( rpc_config - > use_ipv6 ) , std : : move ( rpc_config - > require_ipv4 ) ,
std : : move ( rpc_config - > access_control_origins ) , std : : move ( http_login ) , std : : move ( rpc_config - > ssl_options )
2017-02-05 22:48:03 +00:00
) ;
2014-03-03 22:07:58 +00:00
}
2014-06-01 21:53:44 +00:00
//------------------------------------------------------------------------------------------------------------------------------
bool core_rpc_server : : check_core_ready ( )
{
if ( ! m_p2p . get_payload_object ( ) . is_synchronized ( ) )
2014-03-20 11:46:11 +00:00
{
return false ;
}
2017-10-20 19:49:23 +00:00
return true ;
2014-03-20 11:46:11 +00:00
}
2019-06-13 07:47:06 +00:00
//------------------------------------------------------------------------------------------------------------------------------
bool core_rpc_server : : add_host_fail ( const connection_context * ctx )
{
if ( ! ctx | | ! ctx - > m_remote_address . is_blockable ( ) )
return false ;
CRITICAL_REGION_LOCAL ( m_host_fails_score_lock ) ;
uint64_t fails = + + m_host_fails_score [ ctx - > m_remote_address . host_str ( ) ] ;
MDEBUG ( " Host " < < ctx - > m_remote_address . host_str ( ) < < " fail score= " < < fails ) ;
if ( fails > RPC_IP_FAILS_BEFORE_BLOCK )
{
auto it = m_host_fails_score . find ( ctx - > m_remote_address . host_str ( ) ) ;
CHECK_AND_ASSERT_MES ( it ! = m_host_fails_score . end ( ) , false , " internal error " ) ;
it - > second = RPC_IP_FAILS_BEFORE_BLOCK / 2 ;
m_p2p . block_host ( ctx - > m_remote_address ) ;
}
return true ;
}
2014-10-06 13:18:16 +00:00
# define CHECK_CORE_READY() do { if(!check_core_ready()){res.status = CORE_RPC_STATUS_BUSY;return true;} } while(0)
2014-03-20 11:46:11 +00:00
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_height ( const COMMAND_RPC_GET_HEIGHT : : request & req , COMMAND_RPC_GET_HEIGHT : : response & res , const connection_context * ctx )
2014-03-20 11:46:11 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_height ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_HEIGHT > ( invoke_http_mode : : JON , " /getheight " , req , res , r ) )
return r ;
2019-04-01 00:02:58 +00:00
crypto : : hash hash ;
m_core . get_blockchain_top ( res . height , hash ) ;
2019-04-11 17:23:25 +00:00
+ + res . height ; // block height to chain height
2019-04-01 00:02:58 +00:00
res . hash = string_tools : : pod_to_hex ( hash ) ;
2014-03-20 11:46:11 +00:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 22:07:58 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_info ( const COMMAND_RPC_GET_INFO : : request & req , COMMAND_RPC_GET_INFO : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_info ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_INFO > ( invoke_http_mode : : JON , " /getinfo " , req , res , r ) )
{
2019-06-10 15:03:18 +00:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
2019-08-27 12:01:49 +00:00
if ( m_bootstrap_daemon . get ( ) ! = nullptr )
{
res . bootstrap_daemon_address = m_bootstrap_daemon - > address ( ) ;
}
2019-06-10 15:03:18 +00:00
}
2018-01-20 10:38:14 +00:00
crypto : : hash top_hash ;
m_core . get_blockchain_top ( res . height_without_bootstrap , top_hash ) ;
+ + res . height_without_bootstrap ; // turn top block height into blockchain height
res . was_bootstrap_ever_used = true ;
return r ;
}
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
2015-12-18 19:56:17 +00:00
crypto : : hash top_hash ;
2017-09-09 11:06:24 +00:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2015-12-18 19:56:17 +00:00
+ + res . height ; // turn top block height into blockchain height
res . top_block_hash = string_tools : : pod_to_hex ( top_hash ) ;
2014-06-04 20:50:13 +00:00
res . target_height = m_core . get_target_blockchain_height ( ) ;
2019-01-31 10:44:08 +00:00
store_difficulty ( m_core . get_blockchain_storage ( ) . get_difficulty_for_next_block ( ) , res . difficulty , res . wide_difficulty , res . difficulty_top64 ) ;
2017-04-06 19:01:07 +00:00
res . target = m_core . get_blockchain_storage ( ) . get_difficulty_target ( ) ;
2014-03-03 22:07:58 +00:00
res . tx_count = m_core . get_blockchain_storage ( ) . get_total_transactions ( ) - res . height ; //without coinbase
res . tx_pool_size = m_core . get_pool_transactions_count ( ) ;
2019-01-11 19:09:39 +00:00
res . alt_blocks_count = restricted ? 0 : m_core . get_blockchain_storage ( ) . get_alternative_blocks_count ( ) ;
2018-12-16 17:57:44 +00:00
uint64_t total_conn = restricted ? 0 : m_p2p . get_public_connections_count ( ) ;
res . outgoing_connections_count = restricted ? 0 : m_p2p . get_public_outgoing_connections_count ( ) ;
2019-01-11 19:09:39 +00:00
res . incoming_connections_count = restricted ? 0 : ( total_conn - res . outgoing_connections_count ) ;
res . rpc_connections_count = restricted ? 0 : get_connections_count ( ) ;
2018-12-16 17:57:44 +00:00
res . white_peerlist_size = restricted ? 0 : m_p2p . get_public_white_peers_count ( ) ;
res . grey_peerlist_size = restricted ? 0 : m_p2p . get_public_gray_peers_count ( ) ;
2018-11-16 04:32:05 +00:00
cryptonote : : network_type net_type = nettype ( ) ;
res . mainnet = net_type = = MAINNET ;
res . testnet = net_type = = TESTNET ;
res . stagenet = net_type = = STAGENET ;
res . nettype = net_type = = MAINNET ? " mainnet " : net_type = = TESTNET ? " testnet " : net_type = = STAGENET ? " stagenet " : " fakechain " ;
2019-01-31 10:44:08 +00:00
store_difficulty ( m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( res . height - 1 ) ,
res . cumulative_difficulty , res . wide_cumulative_difficulty , res . cumulative_difficulty_top64 ) ;
2018-07-18 21:24:53 +00:00
res . block_size_limit = res . block_weight_limit = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_limit ( ) ;
res . block_size_median = res . block_weight_median = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_median ( ) ;
2019-01-11 19:09:39 +00:00
res . start_time = restricted ? 0 : ( uint64_t ) m_core . get_start_time ( ) ;
res . free_space = restricted ? std : : numeric_limits < uint64_t > : : max ( ) : m_core . get_free_space ( ) ;
2017-11-30 15:44:01 +00:00
res . offline = m_core . offline ( ) ;
2019-01-11 19:09:39 +00:00
res . height_without_bootstrap = restricted ? 0 : res . height ;
if ( restricted )
2019-06-10 15:03:18 +00:00
{
res . bootstrap_daemon_address = " " ;
2018-11-20 21:41:03 +00:00
res . was_bootstrap_ever_used = false ;
2019-06-10 15:03:18 +00:00
}
2018-11-20 21:41:03 +00:00
else
2018-01-20 10:38:14 +00:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
2019-08-27 12:01:49 +00:00
if ( m_bootstrap_daemon . get ( ) ! = nullptr )
{
res . bootstrap_daemon_address = m_bootstrap_daemon - > address ( ) ;
}
2018-01-20 10:38:14 +00:00
res . was_bootstrap_ever_used = m_was_bootstrap_ever_used ;
}
2019-02-21 12:55:55 +00:00
res . database_size = m_core . get_blockchain_storage ( ) . get_db ( ) . get_database_size ( ) ;
if ( restricted )
res . database_size = round_up ( res . database_size , 5ull * 1024 * 1024 * 1024 ) ;
2019-01-11 19:09:39 +00:00
res . update_available = restricted ? false : m_core . is_update_available ( ) ;
res . version = restricted ? " " : MONERO_VERSION ;
2019-03-29 12:56:47 +00:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 22:07:58 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-03-21 10:03:24 +00:00
bool core_rpc_server : : on_get_net_stats ( const COMMAND_RPC_GET_NET_STATS : : request & req , COMMAND_RPC_GET_NET_STATS : : response & res , const connection_context * ctx )
{
PERF_TIMER ( on_get_net_stats ) ;
// No bootstrap daemon check: Only ever get stats about local server
res . start_time = ( uint64_t ) m_core . get_start_time ( ) ;
{
CRITICAL_REGION_LOCAL ( epee : : net_utils : : network_throttle_manager : : m_lock_get_global_throttle_in ) ;
epee : : net_utils : : network_throttle_manager : : get_global_throttle_in ( ) . get_stats ( res . total_packets_in , res . total_bytes_in ) ;
}
{
CRITICAL_REGION_LOCAL ( epee : : net_utils : : network_throttle_manager : : m_lock_get_global_throttle_out ) ;
epee : : net_utils : : network_throttle_manager : : get_global_throttle_out ( ) . get_stats ( res . total_packets_out , res . total_bytes_out ) ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-10-14 07:54:07 +00:00
class pruned_transaction {
transaction & tx ;
public :
pruned_transaction ( transaction & tx ) : tx ( tx ) { }
BEGIN_SERIALIZE_OBJECT ( )
bool r = tx . serialize_base ( ar ) ;
if ( ! r ) return false ;
END_SERIALIZE ( )
} ;
2018-09-18 00:38:40 +00:00
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_blocks ( const COMMAND_RPC_GET_BLOCKS_FAST : : request & req , COMMAND_RPC_GET_BLOCKS_FAST : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_blocks ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCKS_FAST > ( invoke_http_mode : : BIN , " /getblocks.bin " , req , res , r ) )
return r ;
2018-04-16 19:16:00 +00:00
std : : vector < std : : pair < std : : pair < cryptonote : : blobdata , crypto : : hash > , std : : vector < std : : pair < crypto : : hash , cryptonote : : blobdata > > > > bs ;
2014-08-01 08:17:50 +00:00
2018-04-16 19:16:00 +00:00
if ( ! m_core . find_blockchain_supplement ( req . start_height , req . block_ids , bs , res . current_height , res . start_height , req . prune , ! req . no_miner_tx , COMMAND_RPC_GET_BLOCKS_FAST_MAX_COUNT ) )
2014-03-03 22:07:58 +00:00
{
res . status = " Failed " ;
2019-06-13 07:47:06 +00:00
add_host_fail ( ctx ) ;
2014-03-03 22:07:58 +00:00
return false ;
}
2017-02-27 20:26:17 +00:00
size_t pruned_size = 0 , unpruned_size = 0 , ntxes = 0 ;
2018-04-15 23:16:02 +00:00
res . blocks . reserve ( bs . size ( ) ) ;
res . output_indices . reserve ( bs . size ( ) ) ;
2017-01-15 16:05:55 +00:00
for ( auto & bd : bs )
2014-03-03 22:07:58 +00:00
{
res . blocks . resize ( res . blocks . size ( ) + 1 ) ;
2018-04-16 19:16:00 +00:00
res . blocks . back ( ) . block = bd . first . first ;
pruned_size + = bd . first . first . size ( ) ;
unpruned_size + = bd . first . first . size ( ) ;
2016-07-13 18:26:11 +00:00
res . output_indices . push_back ( COMMAND_RPC_GET_BLOCKS_FAST : : block_output_indices ( ) ) ;
2017-02-27 20:26:17 +00:00
ntxes + = bd . second . size ( ) ;
2018-12-16 13:28:49 +00:00
res . output_indices . back ( ) . indices . reserve ( 1 + bd . second . size ( ) ) ;
if ( req . no_miner_tx )
res . output_indices . back ( ) . indices . push_back ( COMMAND_RPC_GET_BLOCKS_FAST : : tx_output_indices ( ) ) ;
2018-04-15 23:16:02 +00:00
res . blocks . back ( ) . txs . reserve ( bd . second . size ( ) ) ;
2018-04-16 19:16:00 +00:00
for ( std : : vector < std : : pair < crypto : : hash , cryptonote : : blobdata > > : : iterator i = bd . second . begin ( ) ; i ! = bd . second . end ( ) ; + + i )
2014-03-03 22:07:58 +00:00
{
2018-04-16 19:16:00 +00:00
unpruned_size + = i - > second . size ( ) ;
res . blocks . back ( ) . txs . push_back ( std : : move ( i - > second ) ) ;
i - > second . clear ( ) ;
i - > second . shrink_to_fit ( ) ;
2017-02-27 20:26:17 +00:00
pruned_size + = res . blocks . back ( ) . txs . back ( ) . size ( ) ;
2018-12-16 13:28:49 +00:00
}
2017-08-25 17:59:29 +00:00
2018-12-16 13:28:49 +00:00
const size_t n_txes_to_lookup = bd . second . size ( ) + ( req . no_miner_tx ? 0 : 1 ) ;
if ( n_txes_to_lookup > 0 )
{
std : : vector < std : : vector < uint64_t > > indices ;
bool r = m_core . get_tx_outputs_gindexs ( req . no_miner_tx ? bd . second . front ( ) . first : bd . first . second , n_txes_to_lookup , indices ) ;
2016-07-13 18:26:11 +00:00
if ( ! r )
{
res . status = " Failed " ;
return false ;
}
2018-12-16 13:28:49 +00:00
if ( indices . size ( ) ! = n_txes_to_lookup | | res . output_indices . back ( ) . indices . size ( ) ! = ( req . no_miner_tx ? 1 : 0 ) )
{
res . status = " Failed " ;
return false ;
}
for ( size_t i = 0 ; i < indices . size ( ) ; + + i )
res . output_indices . back ( ) . indices . push_back ( { std : : move ( indices [ i ] ) } ) ;
2014-03-03 22:07:58 +00:00
}
}
2017-02-27 20:26:17 +00:00
MDEBUG ( " on_get_blocks: " < < bs . size ( ) < < " blocks, " < < ntxes < < " txes, pruned size " < < pruned_size < < " , unpruned size " < < unpruned_size ) ;
2014-03-03 22:07:58 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
2017-07-04 04:32:44 +00:00
}
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_alt_blocks_hashes ( const COMMAND_RPC_GET_ALT_BLOCKS_HASHES : : request & req , COMMAND_RPC_GET_ALT_BLOCKS_HASHES : : response & res , const connection_context * ctx )
2017-07-04 04:32:44 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_alt_blocks_hashes ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_ALT_BLOCKS_HASHES > ( invoke_http_mode : : JON , " /get_alt_blocks_hashes " , req , res , r ) )
return r ;
2018-04-15 23:16:02 +00:00
std : : vector < block > blks ;
2017-07-04 04:32:44 +00:00
if ( ! m_core . get_alternative_blocks ( blks ) )
{
res . status = " Failed " ;
return false ;
}
res . blks_hashes . reserve ( blks . size ( ) ) ;
for ( auto const & blk : blks )
{
res . blks_hashes . push_back ( epee : : string_tools : : pod_to_hex ( get_block_hash ( blk ) ) ) ;
}
MDEBUG ( " on_get_alt_blocks_hashes: " < < blks . size ( ) < < " blocks " ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
2014-03-03 22:07:58 +00:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_blocks_by_height ( const COMMAND_RPC_GET_BLOCKS_BY_HEIGHT : : request & req , COMMAND_RPC_GET_BLOCKS_BY_HEIGHT : : response & res , const connection_context * ctx )
2016-12-25 08:18:15 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_blocks_by_height ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCKS_BY_HEIGHT > ( invoke_http_mode : : BIN , " /getblocks_by_height.bin " , req , res , r ) )
return r ;
2016-12-25 08:18:15 +00:00
res . status = " Failed " ;
res . blocks . clear ( ) ;
res . blocks . reserve ( req . heights . size ( ) ) ;
for ( uint64_t height : req . heights )
{
block blk ;
try
{
blk = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_from_height ( height ) ;
}
catch ( . . . )
{
2017-01-26 18:11:37 +00:00
res . status = " Error retrieving block at height " + std : : to_string ( height ) ;
2016-12-25 08:18:15 +00:00
return true ;
}
2018-04-15 23:16:02 +00:00
std : : vector < transaction > txs ;
std : : vector < crypto : : hash > missed_txs ;
2016-12-25 08:18:15 +00:00
m_core . get_transactions ( blk . tx_hashes , txs , missed_txs ) ;
res . blocks . resize ( res . blocks . size ( ) + 1 ) ;
res . blocks . back ( ) . block = block_to_blob ( blk ) ;
for ( auto & tx : txs )
res . blocks . back ( ) . txs . push_back ( tx_to_blob ( tx ) ) ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_hashes ( const COMMAND_RPC_GET_HASHES_FAST : : request & req , COMMAND_RPC_GET_HASHES_FAST : : response & res , const connection_context * ctx )
2016-04-13 22:45:02 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_hashes ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_HASHES_FAST > ( invoke_http_mode : : BIN , " /gethashes.bin " , req , res , r ) )
return r ;
2019-04-15 12:29:47 +00:00
res . start_height = req . start_height ;
if ( ! m_core . get_blockchain_storage ( ) . find_blockchain_supplement ( req . block_ids , res . m_block_ids , res . start_height , res . current_height , false ) )
2016-04-13 22:45:02 +00:00
{
res . status = " Failed " ;
2019-06-13 07:47:06 +00:00
add_host_fail ( ctx ) ;
2016-04-13 22:45:02 +00:00
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_outs_bin ( const COMMAND_RPC_GET_OUTPUTS_BIN : : request & req , COMMAND_RPC_GET_OUTPUTS_BIN : : response & res , const connection_context * ctx )
2016-08-02 20:48:09 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_outs_bin ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUTS_BIN > ( invoke_http_mode : : BIN , " /get_outs.bin " , req , res , r ) )
return r ;
2016-08-02 20:48:09 +00:00
res . status = " Failed " ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
if ( restricted )
2016-08-02 20:48:09 +00:00
{
if ( req . outputs . size ( ) > MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT )
{
res . status = " Too many outs requested " ;
return true ;
}
}
if ( ! m_core . get_outs ( req , res ) )
{
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_outs ( const COMMAND_RPC_GET_OUTPUTS : : request & req , COMMAND_RPC_GET_OUTPUTS : : response & res , const connection_context * ctx )
2016-11-22 20:00:40 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_outs ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUTS > ( invoke_http_mode : : JON , " /get_outs " , req , res , r ) )
return r ;
2016-11-22 20:00:40 +00:00
res . status = " Failed " ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
if ( restricted )
2016-11-22 20:00:40 +00:00
{
if ( req . outputs . size ( ) > MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT )
{
res . status = " Too many outs requested " ;
return true ;
}
}
cryptonote : : COMMAND_RPC_GET_OUTPUTS_BIN : : request req_bin ;
req_bin . outputs = req . outputs ;
2019-03-23 16:20:08 +00:00
req_bin . get_txid = req . get_txid ;
2016-11-22 20:00:40 +00:00
cryptonote : : COMMAND_RPC_GET_OUTPUTS_BIN : : response res_bin ;
if ( ! m_core . get_outs ( req_bin , res_bin ) )
{
return true ;
}
// convert to text
for ( const auto & i : res_bin . outs )
{
res . outs . push_back ( cryptonote : : COMMAND_RPC_GET_OUTPUTS : : outkey ( ) ) ;
cryptonote : : COMMAND_RPC_GET_OUTPUTS : : outkey & outkey = res . outs . back ( ) ;
outkey . key = epee : : string_tools : : pod_to_hex ( i . key ) ;
outkey . mask = epee : : string_tools : : pod_to_hex ( i . mask ) ;
outkey . unlocked = i . unlocked ;
2016-12-23 12:04:54 +00:00
outkey . height = i . height ;
outkey . txid = epee : : string_tools : : pod_to_hex ( i . txid ) ;
2016-11-22 20:00:40 +00:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_indexes ( const COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES : : request & req , COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_indexes ) ;
2018-01-20 10:38:14 +00:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES > ( invoke_http_mode : : BIN , " /get_o_indexes.bin " , req , res , ok ) )
return ok ;
2014-03-03 22:07:58 +00:00
bool r = m_core . get_tx_outputs_gindexs ( req . txid , res . o_indexes ) ;
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
LOG_PRINT_L2 ( " COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES: [ " < < res . o_indexes . size ( ) < < " ] " ) ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_transactions ( const COMMAND_RPC_GET_TRANSACTIONS : : request & req , COMMAND_RPC_GET_TRANSACTIONS : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_transactions ) ;
2018-01-20 10:38:14 +00:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTIONS > ( invoke_http_mode : : JON , " /gettransactions " , req , res , ok ) )
return ok ;
2014-03-03 22:07:58 +00:00
std : : vector < crypto : : hash > vh ;
2017-01-22 20:38:10 +00:00
for ( const auto & tx_hex_str : req . txs_hashes )
2014-03-03 22:07:58 +00:00
{
blobdata b ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( tx_hex_str , b ) )
{
res . status = " Failed to parse hex representation of transaction hash " ;
return true ;
}
if ( b . size ( ) ! = sizeof ( crypto : : hash ) )
{
res . status = " Failed, size of data mismatch " ;
2016-01-30 13:28:12 +00:00
return true ;
2014-03-03 22:07:58 +00:00
}
vh . push_back ( * reinterpret_cast < const crypto : : hash * > ( b . data ( ) ) ) ;
}
2018-04-15 23:16:02 +00:00
std : : vector < crypto : : hash > missed_txs ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
std : : vector < std : : tuple < crypto : : hash , cryptonote : : blobdata , crypto : : hash , cryptonote : : blobdata > > txs ;
bool r = m_core . get_split_transactions_blobs ( vh , txs , missed_txs ) ;
2014-03-03 22:07:58 +00:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
2015-08-11 14:53:55 +00:00
LOG_PRINT_L2 ( " Found " < < txs . size ( ) < < " / " < < vh . size ( ) < < " transactions on the blockchain " ) ;
// try the pool for any missing txes
size_t found_in_pool = 0 ;
2016-04-03 11:51:28 +00:00
std : : unordered_set < crypto : : hash > pool_tx_hashes ;
2019-03-13 12:05:50 +00:00
std : : unordered_map < crypto : : hash , tx_info > per_tx_pool_tx_info ;
2015-08-11 14:53:55 +00:00
if ( ! missed_txs . empty ( ) )
{
2017-09-22 12:57:20 +00:00
std : : vector < tx_info > pool_tx_info ;
std : : vector < spent_key_image_info > pool_key_image_info ;
bool r = m_core . get_pool_transactions_and_spent_keys_info ( pool_tx_info , pool_key_image_info ) ;
2015-08-11 14:53:55 +00:00
if ( r )
{
2017-09-01 20:22:45 +00:00
// sort to match original request
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
std : : vector < std : : tuple < crypto : : hash , cryptonote : : blobdata , crypto : : hash , cryptonote : : blobdata > > sorted_txs ;
2017-09-22 12:57:20 +00:00
std : : vector < tx_info > : : const_iterator i ;
2018-04-15 23:16:02 +00:00
unsigned txs_processed = 0 ;
2017-09-01 20:22:45 +00:00
for ( const crypto : : hash & h : vh )
2015-08-11 14:53:55 +00:00
{
2017-09-01 20:22:45 +00:00
if ( std : : find ( missed_txs . begin ( ) , missed_txs . end ( ) , h ) = = missed_txs . end ( ) )
2015-08-11 14:53:55 +00:00
{
2018-04-15 23:16:02 +00:00
if ( txs . size ( ) = = txs_processed )
2017-12-11 22:36:58 +00:00
{
res . status = " Failed: internal error - txs is empty " ;
return true ;
}
2017-09-01 20:22:45 +00:00
// core returns the ones it finds in the right order
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
if ( std : : get < 0 > ( txs [ txs_processed ] ) ! = h )
2017-09-01 20:22:45 +00:00
{
res . status = " Failed: tx hash mismatch " ;
return true ;
}
2018-04-15 23:16:02 +00:00
sorted_txs . push_back ( std : : move ( txs [ txs_processed ] ) ) ;
+ + txs_processed ;
2017-09-01 20:22:45 +00:00
}
2017-09-22 12:57:20 +00:00
else if ( ( i = std : : find_if ( pool_tx_info . begin ( ) , pool_tx_info . end ( ) , [ h ] ( const tx_info & txi ) { return epee : : string_tools : : pod_to_hex ( h ) = = txi . id_hash ; } ) ) ! = pool_tx_info . end ( ) )
2017-09-01 20:22:45 +00:00
{
2017-09-22 12:57:20 +00:00
cryptonote : : transaction tx ;
if ( ! cryptonote : : parse_and_validate_tx_from_blob ( i - > tx_blob , tx ) )
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
}
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
std : : stringstream ss ;
binary_archive < true > ba ( ss ) ;
bool r = const_cast < cryptonote : : transaction & > ( tx ) . serialize_base ( ba ) ;
if ( ! r )
{
res . status = " Failed to serialize transaction base " ;
return true ;
}
const cryptonote : : blobdata pruned = ss . str ( ) ;
2019-06-12 11:58:12 +00:00
const crypto : : hash prunable_hash = tx . version = = 1 ? crypto : : null_hash : get_transaction_prunable_hash ( tx ) ;
sorted_txs . push_back ( std : : make_tuple ( h , pruned , prunable_hash , std : : string ( i - > tx_blob , pruned . size ( ) ) ) ) ;
2018-04-15 23:16:02 +00:00
missed_txs . erase ( std : : find ( missed_txs . begin ( ) , missed_txs . end ( ) , h ) ) ;
2017-09-29 15:26:57 +00:00
pool_tx_hashes . insert ( h ) ;
2017-09-22 12:57:20 +00:00
const std : : string hash_string = epee : : string_tools : : pod_to_hex ( h ) ;
for ( const auto & ti : pool_tx_info )
{
if ( ti . id_hash = = hash_string )
{
2019-03-13 12:05:50 +00:00
per_tx_pool_tx_info . insert ( std : : make_pair ( h , ti ) ) ;
2017-09-22 12:57:20 +00:00
break ;
}
}
2015-08-11 14:53:55 +00:00
+ + found_in_pool ;
}
}
2017-09-01 20:22:45 +00:00
txs = sorted_txs ;
2015-08-11 14:53:55 +00:00
}
LOG_PRINT_L2 ( " Found " < < found_in_pool < < " / " < < vh . size ( ) < < " transactions in the pool " ) ;
}
2014-03-03 22:07:58 +00:00
2018-04-15 23:16:02 +00:00
std : : vector < std : : string > : : const_iterator txhi = req . txs_hashes . begin ( ) ;
2016-04-03 11:51:28 +00:00
std : : vector < crypto : : hash > : : const_iterator vhi = vh . begin ( ) ;
2017-01-22 20:38:10 +00:00
for ( auto & tx : txs )
2014-03-03 22:07:58 +00:00
{
2016-04-03 11:51:28 +00:00
res . txs . push_back ( COMMAND_RPC_GET_TRANSACTIONS : : entry ( ) ) ;
COMMAND_RPC_GET_TRANSACTIONS : : entry & e = res . txs . back ( ) ;
crypto : : hash tx_hash = * vhi + + ;
e . tx_hash = * txhi + + ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
e . prunable_hash = epee : : string_tools : : pod_to_hex ( std : : get < 2 > ( tx ) ) ;
if ( req . split | | req . prune | | std : : get < 3 > ( tx ) . empty ( ) )
{
2019-03-27 09:41:18 +00:00
// use splitted form with pruned and prunable (filled only when prune=false and the daemon has it), leaving as_hex as empty
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
e . pruned_as_hex = string_tools : : buff_to_hex_nodelimer ( std : : get < 1 > ( tx ) ) ;
if ( ! req . prune )
e . prunable_as_hex = string_tools : : buff_to_hex_nodelimer ( std : : get < 3 > ( tx ) ) ;
2019-03-27 09:41:18 +00:00
if ( req . decode_as_json )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
{
2019-03-27 09:41:18 +00:00
cryptonote : : blobdata tx_data ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
cryptonote : : transaction t ;
2019-03-27 09:41:18 +00:00
if ( req . prune | | std : : get < 3 > ( tx ) . empty ( ) )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
{
2019-03-27 09:41:18 +00:00
// decode pruned tx to JSON
tx_data = std : : get < 1 > ( tx ) ;
if ( cryptonote : : parse_and_validate_tx_base_from_blob ( tx_data , t ) )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
{
pruned_transaction pruned_tx { t } ;
e . as_json = obj_to_json_str ( pruned_tx ) ;
}
else
2019-03-27 09:41:18 +00:00
{
res . status = " Failed to parse and validate pruned tx from blob " ;
return true ;
}
}
else
{
// decode full tx to JSON
tx_data = std : : get < 1 > ( tx ) + std : : get < 3 > ( tx ) ;
if ( cryptonote : : parse_and_validate_tx_from_blob ( tx_data , t ) )
{
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
e . as_json = obj_to_json_str ( t ) ;
2019-03-27 09:41:18 +00:00
}
else
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
}
}
}
}
else
{
// use non-splitted form, leaving pruned_as_hex and prunable_as_hex as empty
cryptonote : : blobdata tx_data = std : : get < 1 > ( tx ) + std : : get < 3 > ( tx ) ;
e . as_hex = string_tools : : buff_to_hex_nodelimer ( tx_data ) ;
if ( req . decode_as_json )
{
cryptonote : : transaction t ;
if ( cryptonote : : parse_and_validate_tx_from_blob ( tx_data , t ) )
{
e . as_json = obj_to_json_str ( t ) ;
}
else
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
}
}
}
2016-04-03 11:51:28 +00:00
e . in_pool = pool_tx_hashes . find ( tx_hash ) ! = pool_tx_hashes . end ( ) ;
if ( e . in_pool )
{
2017-08-30 02:30:31 +00:00
e . block_height = e . block_timestamp = std : : numeric_limits < uint64_t > : : max ( ) ;
2019-03-13 12:05:50 +00:00
auto it = per_tx_pool_tx_info . find ( tx_hash ) ;
if ( it ! = per_tx_pool_tx_info . end ( ) )
2017-09-22 12:57:20 +00:00
{
2019-03-13 12:05:50 +00:00
e . double_spend_seen = it - > second . double_spend_seen ;
e . relayed = it - > second . relayed ;
2017-09-22 12:57:20 +00:00
}
else
{
2019-03-13 12:05:50 +00:00
MERROR ( " Failed to determine pool info for " < < tx_hash ) ;
2017-09-22 12:57:20 +00:00
e . double_spend_seen = false ;
2019-03-13 12:05:50 +00:00
e . relayed = false ;
2017-09-22 12:57:20 +00:00
}
2016-04-03 11:51:28 +00:00
}
else
{
e . block_height = m_core . get_blockchain_storage ( ) . get_db ( ) . get_tx_block_height ( tx_hash ) ;
2017-08-30 02:30:31 +00:00
e . block_timestamp = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_timestamp ( e . block_height ) ;
2017-09-22 12:57:20 +00:00
e . double_spend_seen = false ;
2019-03-13 12:05:50 +00:00
e . relayed = false ;
2016-04-03 11:51:28 +00:00
}
// fill up old style responses too, in case an old wallet asks
res . txs_as_hex . push_back ( e . as_hex ) ;
2015-10-13 21:11:52 +00:00
if ( req . decode_as_json )
2016-04-03 11:51:28 +00:00
res . txs_as_json . push_back ( e . as_json ) ;
2016-11-20 14:12:19 +00:00
2016-11-23 18:55:32 +00:00
// output indices too if not in pool
if ( pool_tx_hashes . find ( tx_hash ) = = pool_tx_hashes . end ( ) )
2016-11-20 14:12:19 +00:00
{
2016-11-23 18:55:32 +00:00
bool r = m_core . get_tx_outputs_gindexs ( tx_hash , e . output_indices ) ;
if ( ! r )
{
res . status = " Failed " ;
return false ;
}
2016-11-20 14:12:19 +00:00
}
2014-03-03 22:07:58 +00:00
}
2017-01-22 20:38:10 +00:00
for ( const auto & miss_tx : missed_txs )
2014-03-03 22:07:58 +00:00
{
res . missed_tx . push_back ( string_tools : : pod_to_hex ( miss_tx ) ) ;
}
2016-04-03 11:51:28 +00:00
LOG_PRINT_L2 ( res . txs . size ( ) < < " transactions found, " < < res . missed_tx . size ( ) < < " not found " ) ;
2014-03-03 22:07:58 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_is_key_image_spent ( const COMMAND_RPC_IS_KEY_IMAGE_SPENT : : request & req , COMMAND_RPC_IS_KEY_IMAGE_SPENT : : response & res , const connection_context * ctx )
2015-08-11 09:49:15 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_is_key_image_spent ) ;
2018-01-20 10:38:14 +00:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_IS_KEY_IMAGE_SPENT > ( invoke_http_mode : : JON , " /is_key_image_spent " , req , res , ok ) )
return ok ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
2015-08-11 09:49:15 +00:00
std : : vector < crypto : : key_image > key_images ;
2017-01-22 20:38:10 +00:00
for ( const auto & ki_hex_str : req . key_images )
2015-08-11 09:49:15 +00:00
{
blobdata b ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( ki_hex_str , b ) )
{
res . status = " Failed to parse hex representation of key image " ;
return true ;
}
if ( b . size ( ) ! = sizeof ( crypto : : key_image ) )
{
res . status = " Failed, size of data mismatch " ;
}
key_images . push_back ( * reinterpret_cast < const crypto : : key_image * > ( b . data ( ) ) ) ;
}
2015-08-13 15:33:28 +00:00
std : : vector < bool > spent_status ;
bool r = m_core . are_key_images_spent ( key_images , spent_status ) ;
2015-08-11 09:49:15 +00:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
2015-08-13 15:33:28 +00:00
res . spent_status . clear ( ) ;
for ( size_t n = 0 ; n < spent_status . size ( ) ; + + n )
2016-01-05 21:57:43 +00:00
res . spent_status . push_back ( spent_status [ n ] ? COMMAND_RPC_IS_KEY_IMAGE_SPENT : : SPENT_IN_BLOCKCHAIN : COMMAND_RPC_IS_KEY_IMAGE_SPENT : : UNSPENT ) ;
// check the pool too
std : : vector < cryptonote : : tx_info > txs ;
std : : vector < cryptonote : : spent_key_image_info > ki ;
2019-01-11 19:09:39 +00:00
r = m_core . get_pool_transactions_and_spent_keys_info ( txs , ki , ! request_has_rpc_origin | | ! restricted ) ;
2016-01-05 21:57:43 +00:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
for ( std : : vector < cryptonote : : spent_key_image_info > : : const_iterator i = ki . begin ( ) ; i ! = ki . end ( ) ; + + i )
{
crypto : : hash hash ;
crypto : : key_image spent_key_image ;
if ( parse_hash256 ( i - > id_hash , hash ) )
{
memcpy ( & spent_key_image , & hash , sizeof ( hash ) ) ; // a bit dodgy, should be other parse functions somewhere
for ( size_t n = 0 ; n < res . spent_status . size ( ) ; + + n )
{
if ( res . spent_status [ n ] = = COMMAND_RPC_IS_KEY_IMAGE_SPENT : : UNSPENT )
{
if ( key_images [ n ] = = spent_key_image )
{
res . spent_status [ n ] = COMMAND_RPC_IS_KEY_IMAGE_SPENT : : SPENT_IN_POOL ;
break ;
}
}
}
}
}
2015-08-11 09:49:15 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_send_raw_tx ( const COMMAND_RPC_SEND_RAW_TX : : request & req , COMMAND_RPC_SEND_RAW_TX : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_send_raw_tx ) ;
2018-01-20 10:38:14 +00:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_SEND_RAW_TX > ( invoke_http_mode : : JON , " /sendrawtransaction " , req , res , ok ) )
return ok ;
2014-04-02 16:00:17 +00:00
CHECK_CORE_READY ( ) ;
2014-03-03 22:07:58 +00:00
std : : string tx_blob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( req . tx_as_hex , tx_blob ) )
{
LOG_PRINT_L0 ( " [on_send_raw_tx]: Failed to parse tx from hexbuff: " < < req . tx_as_hex ) ;
res . status = " Failed " ;
return true ;
}
2019-04-12 20:20:20 +00:00
if ( req . do_sanity_checks & & ! cryptonote : : tx_sanity_check ( m_core . get_blockchain_storage ( ) , tx_blob ) )
{
res . status = " Failed " ;
res . reason = " Sanity check failed " ;
res . sanity_check_failed = true ;
return true ;
}
2019-06-20 05:32:49 +00:00
res . sanity_check_failed = false ;
2019-04-12 20:20:20 +00:00
2014-03-03 22:07:58 +00:00
cryptonote_connection_context fake_context = AUTO_VAL_INIT ( fake_context ) ;
tx_verification_context tvc = AUTO_VAL_INIT ( tvc ) ;
2017-01-14 13:01:21 +00:00
if ( ! m_core . handle_incoming_tx ( tx_blob , tvc , false , false , req . do_not_relay ) | | tvc . m_verifivation_failed )
2014-03-03 22:07:58 +00:00
{
res . status = " Failed " ;
2018-12-30 01:32:20 +00:00
std : : string reason = " " ;
2016-03-27 11:35:36 +00:00
if ( ( res . low_mixin = tvc . m_low_mixin ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " bad ring size " ) ;
2016-03-27 11:35:36 +00:00
if ( ( res . double_spend = tvc . m_double_spend ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " double spend " ) ;
2016-03-27 11:35:36 +00:00
if ( ( res . invalid_input = tvc . m_invalid_input ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " invalid input " ) ;
2016-03-27 11:35:36 +00:00
if ( ( res . invalid_output = tvc . m_invalid_output ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " invalid output " ) ;
2016-03-27 11:35:36 +00:00
if ( ( res . too_big = tvc . m_too_big ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " too big " ) ;
2016-03-27 11:35:36 +00:00
if ( ( res . overspend = tvc . m_overspend ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " overspend " ) ;
2016-03-27 11:35:36 +00:00
if ( ( res . fee_too_low = tvc . m_fee_too_low ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " fee too low " ) ;
2016-07-02 09:02:12 +00:00
if ( ( res . not_rct = tvc . m_not_rct ) )
2018-12-30 01:32:20 +00:00
add_reason ( reason , " tx is not ringct " ) ;
2019-04-23 21:56:04 +00:00
if ( ( res . too_few_outputs = tvc . m_too_few_outputs ) )
add_reason ( reason , " too few outputs " ) ;
2018-12-30 01:32:20 +00:00
const std : : string punctuation = reason . empty ( ) ? " " : " : " ;
2017-09-16 09:31:49 +00:00
if ( tvc . m_verifivation_failed )
{
2018-12-30 01:32:20 +00:00
LOG_PRINT_L0 ( " [on_send_raw_tx]: tx verification failed " < < punctuation < < reason ) ;
2017-09-16 09:31:49 +00:00
}
else
{
2018-12-30 01:32:20 +00:00
LOG_PRINT_L0 ( " [on_send_raw_tx]: Failed to process tx " < < punctuation < < reason ) ;
2017-09-16 09:31:49 +00:00
}
2014-03-03 22:07:58 +00:00
return true ;
}
2017-01-14 13:01:21 +00:00
if ( ! tvc . m_should_be_relayed )
2014-03-03 22:07:58 +00:00
{
LOG_PRINT_L0 ( " [on_send_raw_tx]: tx accepted, but not relayed " ) ;
2016-03-27 11:35:36 +00:00
res . reason = " Not relayed " ;
res . not_relayed = true ;
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 22:07:58 +00:00
return true ;
}
NOTIFY_NEW_TRANSACTIONS : : request r ;
r . txs . push_back ( tx_blob ) ;
m_core . get_protocol ( ) - > relay_transactions ( r , fake_context ) ;
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_start_mining ( const COMMAND_RPC_START_MINING : : request & req , COMMAND_RPC_START_MINING : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_start_mining ) ;
2014-03-20 11:46:11 +00:00
CHECK_CORE_READY ( ) ;
2017-02-19 02:42:10 +00:00
cryptonote : : address_parse_info info ;
2018-11-16 04:32:05 +00:00
if ( ! get_account_address_from_str ( info , nettype ( ) , req . miner_address ) )
2014-03-03 22:07:58 +00:00
{
res . status = " Failed, wrong address " ;
2015-05-28 22:13:32 +00:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 22:07:58 +00:00
return true ;
}
2017-02-19 02:42:10 +00:00
if ( info . is_subaddress )
{
res . status = " Mining to subaddress isn't supported yet " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
2014-03-03 22:07:58 +00:00
2017-01-26 19:31:56 +00:00
unsigned int concurrency_count = boost : : thread : : hardware_concurrency ( ) * 4 ;
// if we couldn't detect threads, set it to a ridiculously high number
if ( concurrency_count = = 0 )
{
concurrency_count = 257 ;
}
// if there are more threads requested than the hardware supports
// then we fail and log that.
if ( req . threads_count > concurrency_count )
{
res . status = " Failed, too many threads relative to CPU cores. " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
2018-09-09 15:26:50 +00:00
cryptonote : : miner & miner = m_core . get_miner ( ) ;
if ( miner . is_mining ( ) )
{
res . status = " Already mining " ;
return true ;
}
2019-06-09 08:51:18 +00:00
if ( ! miner . start ( info . address , static_cast < size_t > ( req . threads_count ) , req . do_background_mining , req . ignore_battery ) )
2014-03-03 22:07:58 +00:00
{
res . status = " Failed, mining not started " ;
2015-05-28 22:13:32 +00:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 22:07:58 +00:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_stop_mining ( const COMMAND_RPC_STOP_MINING : : request & req , COMMAND_RPC_STOP_MINING : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_stop_mining ) ;
2018-10-28 13:50:33 +00:00
cryptonote : : miner & miner = m_core . get_miner ( ) ;
if ( ! miner . is_mining ( ) )
{
res . status = " Mining never started " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
if ( ! miner . stop ( ) )
2014-03-03 22:07:58 +00:00
{
res . status = " Failed, mining not stopped " ;
2015-05-28 22:13:32 +00:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 22:07:58 +00:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_mining_status ( const COMMAND_RPC_MINING_STATUS : : request & req , COMMAND_RPC_MINING_STATUS : : response & res , const connection_context * ctx )
2014-05-25 19:36:12 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_mining_status ) ;
2014-05-25 19:36:12 +00:00
const miner & lMiner = m_core . get_miner ( ) ;
res . active = lMiner . is_mining ( ) ;
2017-02-08 21:17:50 +00:00
res . is_background_mining_enabled = lMiner . get_is_background_mining_enabled ( ) ;
2019-03-30 19:21:30 +00:00
store_difficulty ( m_core . get_blockchain_storage ( ) . get_difficulty_for_next_block ( ) , res . difficulty , res . wide_difficulty , res . difficulty_top64 ) ;
2016-10-10 20:41:24 +00:00
2019-02-22 20:17:45 +00:00
res . block_target = m_core . get_blockchain_storage ( ) . get_current_hard_fork_version ( ) < 2 ? DIFFICULTY_TARGET_V1 : DIFFICULTY_TARGET_V2 ;
2014-05-25 19:36:12 +00:00
if ( lMiner . is_mining ( ) ) {
res . speed = lMiner . get_speed ( ) ;
res . threads_count = lMiner . get_threads_count ( ) ;
2019-02-22 20:17:45 +00:00
res . block_reward = lMiner . get_block_reward ( ) ;
}
const account_public_address & lMiningAdr = lMiner . get_mining_address ( ) ;
2019-05-27 14:14:11 +00:00
if ( lMiner . is_mining ( ) | | lMiner . get_is_background_mining_enabled ( ) )
res . address = get_account_address_as_str ( nettype ( ) , false , lMiningAdr ) ;
2019-02-22 20:17:45 +00:00
const uint8_t major_version = m_core . get_blockchain_storage ( ) . get_current_hard_fork_version ( ) ;
const unsigned variant = major_version > = 7 ? major_version - 6 : 0 ;
switch ( variant )
{
case 0 : res . pow_algorithm = " Cryptonight " ; break ;
case 1 : res . pow_algorithm = " CNv1 (Cryptonight variant 1) " ; break ;
case 2 : case 3 : res . pow_algorithm = " CNv2 (Cryptonight variant 2) " ; break ;
case 4 : case 5 : res . pow_algorithm = " CNv4 (Cryptonight variant 4) " ; break ;
default : res . pow_algorithm = " I'm not sure actually " ; break ;
}
if ( res . is_background_mining_enabled )
{
res . bg_idle_threshold = lMiner . get_idle_threshold ( ) ;
res . bg_min_idle_seconds = lMiner . get_min_idle_seconds ( ) ;
res . bg_ignore_battery = lMiner . get_ignore_battery ( ) ;
res . bg_target = lMiner . get_mining_target ( ) ;
2014-05-25 19:36:12 +00:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_save_bc ( const COMMAND_RPC_SAVE_BC : : request & req , COMMAND_RPC_SAVE_BC : : response & res , const connection_context * ctx )
2014-05-15 22:21:43 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_save_bc ) ;
2014-05-15 22:21:43 +00:00
if ( ! m_core . get_blockchain_storage ( ) . store_blockchain ( ) )
{
2018-03-01 11:36:19 +00:00
res . status = " Error while storing blockchain " ;
2014-05-15 22:21:43 +00:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_peer_list ( const COMMAND_RPC_GET_PEER_LIST : : request & req , COMMAND_RPC_GET_PEER_LIST : : response & res , const connection_context * ctx )
2015-02-05 09:11:20 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_peer_list ) ;
2018-12-05 22:25:27 +00:00
std : : vector < nodetool : : peerlist_entry > white_list ;
std : : vector < nodetool : : peerlist_entry > gray_list ;
2015-05-28 13:07:31 +00:00
2019-06-26 09:14:23 +00:00
if ( req . public_only )
{
m_p2p . get_public_peerlist ( gray_list , white_list ) ;
}
else
{
m_p2p . get_peerlist ( gray_list , white_list ) ;
}
2015-02-05 09:11:20 +00:00
for ( auto & entry : white_list )
{
2018-12-16 17:57:44 +00:00
if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv4_network_address : : get_type_id ( ) )
2017-05-27 10:35:54 +00:00
res . white_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . ip ( ) ,
2019-02-24 08:47:49 +00:00
entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2019-04-10 22:34:30 +00:00
else if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv6_network_address : : get_type_id ( ) )
res . white_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv6_network_address > ( ) . host_str ( ) ,
entry . adr . as < epee : : net_utils : : ipv6_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2017-05-27 10:35:54 +00:00
else
2019-02-24 08:47:49 +00:00
res . white_list . emplace_back ( entry . id , entry . adr . str ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2015-02-05 09:11:20 +00:00
}
for ( auto & entry : gray_list )
{
2018-12-16 17:57:44 +00:00
if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv4_network_address : : get_type_id ( ) )
2017-05-27 10:35:54 +00:00
res . gray_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . ip ( ) ,
2019-02-24 08:47:49 +00:00
entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2019-04-10 22:34:30 +00:00
else if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv6_network_address : : get_type_id ( ) )
2019-06-26 09:14:23 +00:00
res . gray_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv6_network_address > ( ) . host_str ( ) ,
2019-04-10 22:34:30 +00:00
entry . adr . as < epee : : net_utils : : ipv6_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2017-05-27 10:35:54 +00:00
else
2019-02-24 08:47:49 +00:00
res . gray_list . emplace_back ( entry . id , entry . adr . str ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2015-02-05 09:11:20 +00:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-07-29 10:51:15 +00:00
bool core_rpc_server : : on_get_public_nodes ( const COMMAND_RPC_GET_PUBLIC_NODES : : request & req , COMMAND_RPC_GET_PUBLIC_NODES : : response & res , const connection_context * ctx )
{
PERF_TIMER ( on_get_public_nodes ) ;
COMMAND_RPC_GET_PEER_LIST : : response peer_list_res ;
const bool success = on_get_peer_list ( COMMAND_RPC_GET_PEER_LIST : : request ( ) , peer_list_res , ctx ) ;
res . status = peer_list_res . status ;
if ( ! success )
{
return false ;
}
if ( res . status ! = CORE_RPC_STATUS_OK )
{
return true ;
}
const auto collect = [ ] ( const std : : vector < peer > & peer_list , std : : vector < public_node > & public_nodes )
{
for ( const auto & entry : peer_list )
{
if ( entry . rpc_port ! = 0 )
{
public_nodes . emplace_back ( entry ) ;
}
}
} ;
if ( req . white )
{
collect ( peer_list_res . white_list , res . white ) ;
}
if ( req . gray )
{
collect ( peer_list_res . gray_list , res . gray ) ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_set_log_hash_rate ( const COMMAND_RPC_SET_LOG_HASH_RATE : : request & req , COMMAND_RPC_SET_LOG_HASH_RATE : : response & res , const connection_context * ctx )
2015-02-05 09:11:20 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_set_log_hash_rate ) ;
2015-02-05 09:11:20 +00:00
if ( m_core . get_miner ( ) . is_mining ( ) )
{
m_core . get_miner ( ) . do_print_hashrate ( req . visible ) ;
res . status = CORE_RPC_STATUS_OK ;
}
else
{
res . status = CORE_RPC_STATUS_NOT_MINING ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_set_log_level ( const COMMAND_RPC_SET_LOG_LEVEL : : request & req , COMMAND_RPC_SET_LOG_LEVEL : : response & res , const connection_context * ctx )
2015-02-05 09:11:20 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_set_log_level ) ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
if ( req . level < 0 | | req . level > 4 )
2015-02-05 09:11:20 +00:00
{
res . status = " Error: log level not valid " ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
return true ;
2015-02-05 09:11:20 +00:00
}
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
mlog_set_log_level ( req . level ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_set_log_categories ( const COMMAND_RPC_SET_LOG_CATEGORIES : : request & req , COMMAND_RPC_SET_LOG_CATEGORIES : : response & res , const connection_context * ctx )
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_set_log_categories ) ;
2017-02-12 11:37:09 +00:00
mlog_set_log ( req . categories . c_str ( ) ) ;
2017-09-22 16:54:58 +00:00
res . categories = mlog_get_categories ( ) ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
res . status = CORE_RPC_STATUS_OK ;
2015-02-05 09:11:20 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_transaction_pool ( const COMMAND_RPC_GET_TRANSACTION_POOL : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL : : response & res , const connection_context * ctx )
2015-02-05 09:11:20 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_transaction_pool ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL > ( invoke_http_mode : : JON , " /get_transaction_pool " , req , res , r ) )
return r ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transactions_and_spent_keys_info ( res . transactions , res . spent_key_images , ! request_has_rpc_origin | | ! restricted ) ;
2018-10-17 23:01:56 +00:00
for ( tx_info & txi : res . transactions )
txi . tx_blob = epee : : string_tools : : buff_to_hex_nodelimer ( txi . tx_blob ) ;
2015-02-05 09:11:20 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_transaction_pool_hashes_bin ( const COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN : : response & res , const connection_context * ctx )
2017-03-22 18:03:23 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_transaction_pool_hashes ) ;
2018-01-20 10:38:14 +00:00
bool r ;
2018-06-20 11:48:10 +00:00
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN > ( invoke_http_mode : : JON , " /get_transaction_pool_hashes.bin " , req , res , r ) )
2018-01-20 10:38:14 +00:00
return r ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transaction_hashes ( res . tx_hashes , ! request_has_rpc_origin | | ! restricted ) ;
2017-03-22 18:03:23 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_transaction_pool_hashes ( const COMMAND_RPC_GET_TRANSACTION_POOL_HASHES : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_HASHES : : response & res , const connection_context * ctx )
2018-06-20 11:48:10 +00:00
{
PERF_TIMER ( on_get_transaction_pool_hashes ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_HASHES > ( invoke_http_mode : : JON , " /get_transaction_pool_hashes " , req , res , r ) )
return r ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
2018-06-20 11:48:10 +00:00
std : : vector < crypto : : hash > tx_hashes ;
2019-01-11 19:09:39 +00:00
m_core . get_pool_transaction_hashes ( tx_hashes , ! request_has_rpc_origin | | ! restricted ) ;
2018-06-20 11:48:10 +00:00
res . tx_hashes . reserve ( tx_hashes . size ( ) ) ;
for ( const crypto : : hash & tx_hash : tx_hashes )
res . tx_hashes . push_back ( epee : : string_tools : : pod_to_hex ( tx_hash ) ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_transaction_pool_stats ( const COMMAND_RPC_GET_TRANSACTION_POOL_STATS : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_STATS : : response & res , const connection_context * ctx )
2017-05-31 18:11:56 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_transaction_pool_stats ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_STATS > ( invoke_http_mode : : JON , " /get_transaction_pool_stats " , req , res , r ) )
return r ;
2019-01-11 19:09:39 +00:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transaction_stats ( res . pool_stats , ! request_has_rpc_origin | | ! restricted ) ;
2017-05-31 18:11:56 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-06-10 15:03:18 +00:00
bool core_rpc_server : : on_set_bootstrap_daemon ( const COMMAND_RPC_SET_BOOTSTRAP_DAEMON : : request & req , COMMAND_RPC_SET_BOOTSTRAP_DAEMON : : response & res , const connection_context * ctx )
{
PERF_TIMER ( on_set_bootstrap_daemon ) ;
boost : : optional < epee : : net_utils : : http : : login > credentials ;
if ( ! req . username . empty ( ) | | ! req . password . empty ( ) )
{
credentials = epee : : net_utils : : http : : login ( req . username , req . password ) ;
}
if ( set_bootstrap_daemon ( req . address , credentials ) )
{
res . status = CORE_RPC_STATUS_OK ;
}
else
{
res . status = " Failed to set bootstrap daemon " ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_stop_daemon ( const COMMAND_RPC_STOP_DAEMON : : request & req , COMMAND_RPC_STOP_DAEMON : : response & res , const connection_context * ctx )
2015-02-05 09:11:20 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_stop_daemon ) ;
2015-02-05 09:11:20 +00:00
// FIXME: replace back to original m_p2p.send_stop_signal() after
// investigating why that isn't working quite right.
2015-02-05 10:38:49 +00:00
m_p2p . send_stop_signal ( ) ;
2015-02-05 09:11:20 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_getblockcount ( const COMMAND_RPC_GETBLOCKCOUNT : : request & req , COMMAND_RPC_GETBLOCKCOUNT : : response & res , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_getblockcount ) ;
2018-01-20 10:38:14 +00:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res . status = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-20 11:46:11 +00:00
res . count = m_core . get_current_blockchain_height ( ) ;
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 22:07:58 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_getblockhash ( const COMMAND_RPC_GETBLOCKHASH : : request & req , COMMAND_RPC_GETBLOCKHASH : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_getblockhash ) ;
2018-01-20 10:38:14 +00:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-03 22:07:58 +00:00
if ( req . size ( ) ! = 1 )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Wrong parameters, expected height " ;
return false ;
}
uint64_t h = req [ 0 ] ;
if ( m_core . get_current_blockchain_height ( ) < = h )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-02 23:31:31 +00:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( h ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2014-03-03 22:07:58 +00:00
}
res = string_tools : : pod_to_hex ( m_core . get_block_id_by_height ( h ) ) ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2014-10-06 09:27:34 +00:00
// equivalent of strstr, but with arbitrary bytes (ie, NULs)
// This does not differentiate between "not found" and "found at offset 0"
2018-11-23 13:11:40 +00:00
size_t slow_memmem ( const void * start_buff , size_t buflen , const void * pat , size_t patlen )
2014-03-03 22:07:58 +00:00
{
2014-10-06 09:27:34 +00:00
const void * buf = start_buff ;
const void * end = ( const char * ) buf + buflen ;
if ( patlen > buflen | | patlen = = 0 ) return 0 ;
while ( buflen > 0 & & ( buf = memchr ( buf , ( ( const char * ) pat ) [ 0 ] , buflen - patlen + 1 ) ) )
2014-03-03 22:07:58 +00:00
{
if ( memcmp ( buf , pat , patlen ) = = 0 )
2014-10-06 09:27:34 +00:00
return ( const char * ) buf - ( const char * ) start_buff ;
buf = ( const char * ) buf + 1 ;
buflen = ( const char * ) end - ( const char * ) buf ;
2014-03-03 22:07:58 +00:00
}
return 0 ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_getblocktemplate ( const COMMAND_RPC_GETBLOCKTEMPLATE : : request & req , COMMAND_RPC_GETBLOCKTEMPLATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_getblocktemplate ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GETBLOCKTEMPLATE > ( invoke_http_mode : : JON_RPC , " getblocktemplate " , req , res , r ) )
return r ;
2014-03-20 11:46:11 +00:00
if ( ! check_core_ready ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_CORE_BUSY ;
error_resp . message = " Core is busy " ;
return false ;
}
2014-03-03 22:07:58 +00:00
if ( req . reserve_size > 255 )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_RESERVE_SIZE ;
2018-03-01 11:36:19 +00:00
error_resp . message = " Too big reserved size, maximum 255 " ;
2014-03-03 22:07:58 +00:00
return false ;
}
2019-07-02 04:29:32 +00:00
if ( req . reserve_size & & ! req . extra_nonce . empty ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Cannot specify both a reserve_size and an extra_nonce " ;
return false ;
}
if ( req . extra_nonce . size ( ) > 510 )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_RESERVE_SIZE ;
error_resp . message = " Too big extra_nonce size, maximum 510 hex chars " ;
return false ;
}
2017-02-19 02:42:10 +00:00
cryptonote : : address_parse_info info ;
2014-03-03 22:07:58 +00:00
2018-11-16 04:32:05 +00:00
if ( ! req . wallet_address . size ( ) | | ! cryptonote : : get_account_address_from_str ( info , nettype ( ) , req . wallet_address ) )
2014-03-03 22:07:58 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_WALLET_ADDRESS ;
error_resp . message = " Failed to parse wallet address " ;
return false ;
}
2017-02-19 02:42:10 +00:00
if ( info . is_subaddress )
{
error_resp . code = CORE_RPC_ERROR_CODE_MINING_TO_SUBADDRESS ;
error_resp . message = " Mining to subaddress is not supported yet " ;
return false ;
}
2014-03-03 22:07:58 +00:00
2018-11-19 17:55:53 +00:00
block b ;
2014-03-03 22:07:58 +00:00
cryptonote : : blobdata blob_reserve ;
2019-07-02 04:29:32 +00:00
if ( ! req . extra_nonce . empty ( ) )
{
if ( ! string_tools : : parse_hexstr_to_binbuff ( req . extra_nonce , blob_reserve ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Parameter extra_nonce should be a hex string " ;
return false ;
}
}
else
blob_reserve . resize ( req . reserve_size , 0 ) ;
2019-01-31 10:44:08 +00:00
cryptonote : : difficulty_type wdiff ;
2019-03-23 16:20:08 +00:00
crypto : : hash prev_block ;
if ( ! req . prev_block . empty ( ) )
{
if ( ! epee : : string_tools : : hex_to_pod ( req . prev_block , prev_block ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Invalid prev_block " ;
return false ;
}
}
if ( ! m_core . get_block_template ( b , req . prev_block . empty ( ) ? NULL : & prev_block , info . address , wdiff , res . height , res . expected_reward , blob_reserve ) )
2014-03-03 22:07:58 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to create block template " ) ;
return false ;
}
2019-01-31 10:44:08 +00:00
store_difficulty ( wdiff , res . difficulty , res . wide_difficulty , res . difficulty_top64 ) ;
2014-03-03 22:07:58 +00:00
blobdata block_blob = t_serializable_object_to_blob ( b ) ;
2014-05-03 16:19:43 +00:00
crypto : : public_key tx_pub_key = cryptonote : : get_tx_pub_key_from_extra ( b . miner_tx ) ;
2017-10-10 14:47:08 +00:00
if ( tx_pub_key = = crypto : : null_pkey )
2014-03-03 22:07:58 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
2018-10-05 02:12:53 +00:00
LOG_ERROR ( " Failed to get tx pub key in coinbase extra " ) ;
2014-03-03 22:07:58 +00:00
return false ;
}
res . reserved_offset = slow_memmem ( ( void * ) block_blob . data ( ) , block_blob . size ( ) , & tx_pub_key , sizeof ( tx_pub_key ) ) ;
if ( ! res . reserved_offset )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to find tx pub key in blockblob " ) ;
return false ;
}
2019-04-12 03:28:54 +00:00
if ( req . reserve_size )
res . reserved_offset + = sizeof ( tx_pub_key ) + 2 ; //2 bytes: tag for TX_EXTRA_NONCE(1 byte), counter in TX_EXTRA_NONCE(1 byte)
else
res . reserved_offset = 0 ;
2014-03-03 22:07:58 +00:00
if ( res . reserved_offset + req . reserve_size > block_blob . size ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to calculate offset for " ) ;
return false ;
}
2016-03-30 01:50:51 +00:00
blobdata hashing_blob = get_block_hashing_blob ( b ) ;
2015-01-06 16:37:10 +00:00
res . prev_hash = string_tools : : pod_to_hex ( b . prev_id ) ;
2014-03-03 22:07:58 +00:00
res . blocktemplate_blob = string_tools : : buff_to_hex_nodelimer ( block_blob ) ;
2016-03-30 01:50:51 +00:00
res . blockhashing_blob = string_tools : : buff_to_hex_nodelimer ( hashing_blob ) ;
2014-05-25 17:06:40 +00:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 22:07:58 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_submitblock ( const COMMAND_RPC_SUBMITBLOCK : : request & req , COMMAND_RPC_SUBMITBLOCK : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 22:07:58 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_submitblock ) ;
2018-01-20 10:38:14 +00:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res . status = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-20 11:46:11 +00:00
CHECK_CORE_READY ( ) ;
2014-03-03 22:07:58 +00:00
if ( req . size ( ) ! = 1 )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Wrong param " ;
return false ;
}
blobdata blockblob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( req [ 0 ] , blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2016-10-10 20:41:24 +00:00
2014-06-11 14:46:56 +00:00
// Fixing of high orphan issue for most pools
// Thanks Boolberry!
2018-11-19 17:55:53 +00:00
block b ;
2014-06-11 14:46:56 +00:00
if ( ! parse_and_validate_block_from_blob ( blockblob , b ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2014-06-11 21:32:53 +00:00
// Fix from Boolberry neglects to check block
// size, do that with the function below
if ( ! m_core . check_incoming_block_size ( blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB_SIZE ;
error_resp . message = " Block bloc size is too big, rejecting block " ;
return false ;
}
2019-03-23 16:20:08 +00:00
block_verification_context bvc ;
if ( ! m_core . handle_block_found ( b , bvc ) )
2014-03-03 22:07:58 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_BLOCK_NOT_ACCEPTED ;
error_resp . message = " Block not accepted " ;
return false ;
}
2014-05-25 17:06:40 +00:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 22:07:58 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_generateblocks ( const COMMAND_RPC_GENERATEBLOCKS : : request & req , COMMAND_RPC_GENERATEBLOCKS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-06-14 19:11:49 +00:00
{
PERF_TIMER ( on_generateblocks ) ;
CHECK_CORE_READY ( ) ;
res . status = CORE_RPC_STATUS_OK ;
if ( m_core . get_nettype ( ) ! = FAKECHAIN )
{
error_resp . code = CORE_RPC_ERROR_CODE_REGTEST_REQUIRED ;
error_resp . message = " Regtest required when generating blocks " ;
return false ;
}
COMMAND_RPC_GETBLOCKTEMPLATE : : request template_req ;
COMMAND_RPC_GETBLOCKTEMPLATE : : response template_res ;
COMMAND_RPC_SUBMITBLOCK : : request submit_req ;
COMMAND_RPC_SUBMITBLOCK : : response submit_res ;
template_req . reserve_size = 1 ;
template_req . wallet_address = req . wallet_address ;
2019-03-23 16:20:08 +00:00
template_req . prev_block = req . prev_block ;
2019-06-26 14:39:41 +00:00
submit_req . push_back ( std : : string { } ) ;
2018-06-14 19:11:49 +00:00
res . height = m_core . get_blockchain_storage ( ) . get_current_blockchain_height ( ) ;
for ( size_t i = 0 ; i < req . amount_of_blocks ; i + + )
{
2019-04-14 09:21:09 +00:00
bool r = on_getblocktemplate ( template_req , template_res , error_resp , ctx ) ;
2018-06-14 19:11:49 +00:00
res . status = template_res . status ;
2019-03-23 16:20:08 +00:00
template_req . prev_block . clear ( ) ;
2018-06-14 19:11:49 +00:00
if ( ! r ) return false ;
blobdata blockblob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( template_res . blocktemplate_blob , blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2018-11-19 17:55:53 +00:00
block b ;
2018-06-14 19:11:49 +00:00
if ( ! parse_and_validate_block_from_blob ( blockblob , b ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2019-03-23 16:20:08 +00:00
b . nonce = req . starting_nonce ;
2018-06-14 19:11:49 +00:00
miner : : find_nonce_for_given_block ( b , template_res . difficulty , template_res . height ) ;
submit_req . front ( ) = string_tools : : buff_to_hex_nodelimer ( block_to_blob ( b ) ) ;
2019-01-11 19:09:39 +00:00
r = on_submitblock ( submit_req , submit_res , error_resp , ctx ) ;
2018-06-14 19:11:49 +00:00
res . status = submit_res . status ;
if ( ! r ) return false ;
2019-03-23 16:20:08 +00:00
res . blocks . push_back ( epee : : string_tools : : pod_to_hex ( get_block_hash ( b ) ) ) ;
template_req . prev_block = res . blocks . back ( ) ;
2018-06-14 19:11:49 +00:00
res . height = template_res . height ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2014-04-09 12:14:35 +00:00
uint64_t core_rpc_server : : get_block_reward ( const block & blk )
{
uint64_t reward = 0 ;
2017-01-22 20:38:10 +00:00
for ( const tx_out & out : blk . miner_tx . vout )
2014-04-09 12:14:35 +00:00
{
reward + = out . amount ;
}
return reward ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-04-05 14:15:15 +00:00
bool core_rpc_server : : fill_block_header_response ( const block & blk , bool orphan_status , uint64_t height , const crypto : : hash & hash , block_header_response & response , bool fill_pow_hash )
2016-09-29 13:38:12 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( fill_block_header_response ) ;
2016-09-29 13:38:12 +00:00
response . major_version = blk . major_version ;
response . minor_version = blk . minor_version ;
response . timestamp = blk . timestamp ;
response . prev_hash = string_tools : : pod_to_hex ( blk . prev_id ) ;
response . nonce = blk . nonce ;
response . orphan_status = orphan_status ;
response . height = height ;
response . depth = m_core . get_current_blockchain_height ( ) - height - 1 ;
response . hash = string_tools : : pod_to_hex ( hash ) ;
2019-01-31 10:44:08 +00:00
store_difficulty ( m_core . get_blockchain_storage ( ) . block_difficulty ( height ) ,
response . difficulty , response . wide_difficulty , response . difficulty_top64 ) ;
store_difficulty ( m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( height ) ,
response . cumulative_difficulty , response . wide_cumulative_difficulty , response . cumulative_difficulty_top64 ) ;
2016-09-29 13:38:12 +00:00
response . reward = get_block_reward ( blk ) ;
2018-07-18 21:24:53 +00:00
response . block_size = response . block_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_weight ( height ) ;
2017-01-08 11:14:11 +00:00
response . num_txes = blk . tx_hashes . size ( ) ;
2018-04-05 14:15:15 +00:00
response . pow_hash = fill_pow_hash ? string_tools : : pod_to_hex ( get_block_longhash ( blk , height ) ) : " " ;
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
response . long_term_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_long_term_weight ( height ) ;
2019-03-21 00:23:23 +00:00
response . miner_tx_hash = string_tools : : pod_to_hex ( cryptonote : : get_transaction_hash ( blk . miner_tx ) ) ;
2014-04-09 12:14:35 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-01-20 10:38:14 +00:00
template < typename COMMAND_TYPE >
bool core_rpc_server : : use_bootstrap_daemon_if_necessary ( const invoke_http_mode & mode , const std : : string & command_name , const typename COMMAND_TYPE : : request & req , typename COMMAND_TYPE : : response & res , bool & r )
{
res . untrusted = false ;
2019-06-10 15:03:18 +00:00
boost : : upgrade_lock < boost : : shared_mutex > upgrade_lock ( m_bootstrap_daemon_mutex ) ;
2019-08-27 12:01:49 +00:00
if ( m_bootstrap_daemon . get ( ) = = nullptr )
{
2018-01-20 10:38:14 +00:00
return false ;
2019-08-27 12:01:49 +00:00
}
2018-01-20 10:38:14 +00:00
if ( ! m_should_use_bootstrap_daemon )
{
MINFO ( " The local daemon is fully synced. Not switching back to the bootstrap daemon " ) ;
return false ;
}
auto current_time = std : : chrono : : system_clock : : now ( ) ;
if ( current_time - m_bootstrap_height_check_time > std : : chrono : : seconds ( 30 ) ) // update every 30s
{
2019-06-10 15:03:18 +00:00
{
boost : : upgrade_to_unique_lock < boost : : shared_mutex > lock ( upgrade_lock ) ;
m_bootstrap_height_check_time = current_time ;
}
2018-01-20 10:38:14 +00:00
2019-08-27 12:01:49 +00:00
boost : : optional < uint64_t > bootstrap_daemon_height = m_bootstrap_daemon - > get_height ( ) ;
if ( ! bootstrap_daemon_height )
{
MERROR ( " Failed to fetch bootstrap daemon height " ) ;
return false ;
}
2018-01-20 10:38:14 +00:00
2019-08-27 12:01:49 +00:00
uint64_t target_height = m_core . get_target_blockchain_height ( ) ;
if ( * bootstrap_daemon_height < target_height )
{
MINFO ( " Bootstrap daemon is out of sync " ) ;
return m_bootstrap_daemon - > handle_result ( false ) ;
}
2018-01-20 10:38:14 +00:00
2019-08-27 12:01:49 +00:00
uint64_t top_height = m_core . get_current_blockchain_height ( ) ;
m_should_use_bootstrap_daemon = top_height + 10 < * bootstrap_daemon_height ;
MINFO ( ( m_should_use_bootstrap_daemon ? " Using " : " Not using " ) < < " the bootstrap daemon (our height: " < < top_height < < " , bootstrap daemon's height: " < < * bootstrap_daemon_height < < " ) " ) ;
2018-01-20 10:38:14 +00:00
}
if ( ! m_should_use_bootstrap_daemon )
return false ;
if ( mode = = invoke_http_mode : : JON )
{
2019-08-27 12:01:49 +00:00
r = m_bootstrap_daemon - > invoke_http_json ( command_name , req , res ) ;
2018-01-20 10:38:14 +00:00
}
else if ( mode = = invoke_http_mode : : BIN )
{
2019-08-27 12:01:49 +00:00
r = m_bootstrap_daemon - > invoke_http_bin ( command_name , req , res ) ;
2018-01-20 10:38:14 +00:00
}
else if ( mode = = invoke_http_mode : : JON_RPC )
{
2019-08-27 12:01:49 +00:00
r = m_bootstrap_daemon - > invoke_http_json_rpc ( command_name , req , res ) ;
2018-01-20 10:38:14 +00:00
}
else
{
MERROR ( " Unknown invoke_http_mode: " < < mode ) ;
return false ;
}
2019-06-10 15:03:18 +00:00
{
boost : : upgrade_to_unique_lock < boost : : shared_mutex > lock ( upgrade_lock ) ;
m_was_bootstrap_ever_used = true ;
}
2018-01-20 10:38:14 +00:00
r = r & & res . status = = CORE_RPC_STATUS_OK ;
res . untrusted = true ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_last_block_header ( const COMMAND_RPC_GET_LAST_BLOCK_HEADER : : request & req , COMMAND_RPC_GET_LAST_BLOCK_HEADER : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-04-09 12:14:35 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_last_block_header ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_LAST_BLOCK_HEADER > ( invoke_http_mode : : JON_RPC , " getlastblockheader " , req , res , r ) )
return r ;
2017-10-20 19:49:23 +00:00
CHECK_CORE_READY ( ) ;
2014-04-09 12:14:35 +00:00
uint64_t last_block_height ;
crypto : : hash last_block_hash ;
2017-09-09 11:06:24 +00:00
m_core . get_blockchain_top ( last_block_height , last_block_hash ) ;
2014-04-09 12:14:35 +00:00
block last_block ;
bool have_last_block = m_core . get_block_by_hash ( last_block_hash , last_block ) ;
if ( ! have_last_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get last block. " ;
return false ;
}
2019-03-01 16:14:51 +00:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( last_block , false , last_block_height , last_block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 13:38:12 +00:00
if ( ! response_filled )
2014-04-09 12:14:35 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_block_header_by_hash ( const COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH : : request & req , COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_block_header_by_hash ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH > ( invoke_http_mode : : JON_RPC , " getblockheaderbyhash " , req , res , r ) )
return r ;
2019-05-09 10:13:12 +00:00
auto get = [ this ] ( const std : : string & hash , bool fill_pow_hash , block_header_response & block_header , bool restricted , epee : : json_rpc : : error & error_resp ) - > bool {
crypto : : hash block_hash ;
bool hash_parsed = parse_hash256 ( hash , block_hash ) ;
if ( ! hash_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to parse hex representation of block hash. Hex = " + hash + ' . ' ;
return false ;
}
block blk ;
bool orphan = false ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk , & orphan ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by hash. Hash = " + hash + ' . ' ;
return false ;
}
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
bool response_filled = fill_block_header_response ( blk , orphan , block_height , block_hash , block_header , fill_pow_hash & & ! restricted ) ;
if ( ! response_filled )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
return true ;
} ;
const bool restricted = m_restricted & & ctx ;
if ( ! req . hash . empty ( ) )
2014-04-09 12:14:35 +00:00
{
2019-05-09 10:13:12 +00:00
if ( ! get ( req . hash , req . fill_pow_hash , res . block_header , restricted , error_resp ) )
return false ;
2014-04-09 12:14:35 +00:00
}
2019-05-09 10:13:12 +00:00
res . block_headers . reserve ( req . hashes . size ( ) ) ;
for ( const std : : string & hash : req . hashes )
2014-04-09 12:14:35 +00:00
{
2019-05-09 10:13:12 +00:00
res . block_headers . push_back ( { } ) ;
if ( ! get ( hash , req . fill_pow_hash , res . block_headers . back ( ) , restricted , error_resp ) )
return false ;
2014-04-09 12:14:35 +00:00
}
2019-05-09 10:13:12 +00:00
2014-04-09 12:14:35 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_block_headers_range ( const COMMAND_RPC_GET_BLOCK_HEADERS_RANGE : : request & req , COMMAND_RPC_GET_BLOCK_HEADERS_RANGE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_block_headers_range ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADERS_RANGE > ( invoke_http_mode : : JON_RPC , " getblockheadersrange " , req , res , r ) )
return r ;
2016-10-02 09:21:21 +00:00
const uint64_t bc_height = m_core . get_current_blockchain_height ( ) ;
if ( req . start_height > = bc_height | | req . end_height > = bc_height | | req . start_height > req . end_height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
error_resp . message = " Invalid start/end heights. " ;
return false ;
}
for ( uint64_t h = req . start_height ; h < = req . end_height ; + + h )
{
crypto : : hash block_hash = m_core . get_block_id_by_height ( h ) ;
block blk ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by height. Height = " + boost : : lexical_cast < std : : string > ( h ) + " . Hash = " + epee : : string_tools : : pod_to_hex ( block_hash ) + ' . ' ;
return false ;
}
2017-12-11 22:36:58 +00:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2016-10-02 09:21:21 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
if ( block_height ! = h )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong height " ;
return false ;
}
2016-10-04 11:55:55 +00:00
res . headers . push_back ( block_header_response ( ) ) ;
2019-03-01 16:14:51 +00:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , false , block_height , block_hash , res . headers . back ( ) , req . fill_pow_hash & & ! restricted ) ;
2017-06-18 08:12:54 +00:00
if ( ! response_filled )
2016-10-02 09:21:21 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_block_header_by_height ( const COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT : : request & req , COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_block_header_by_height ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT > ( invoke_http_mode : : JON_RPC , " getblockheaderbyheight " , req , res , r ) )
return r ;
2014-04-09 12:14:35 +00:00
if ( m_core . get_current_blockchain_height ( ) < = req . height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-02 23:31:31 +00:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( req . height ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2014-04-09 12:14:35 +00:00
return false ;
}
crypto : : hash block_hash = m_core . get_block_id_by_height ( req . height ) ;
block blk ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
2014-04-30 17:52:21 +00:00
error_resp . message = " Internal error: can't get block by height. Height = " + std : : to_string ( req . height ) + ' . ' ;
2014-04-09 12:14:35 +00:00
return false ;
}
2019-03-01 16:14:51 +00:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , false , req . height , block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 13:38:12 +00:00
if ( ! response_filled )
2014-04-09 12:14:35 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_block ( const COMMAND_RPC_GET_BLOCK : : request & req , COMMAND_RPC_GET_BLOCK : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_block ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK > ( invoke_http_mode : : JON_RPC , " getblock " , req , res , r ) )
return r ;
2015-10-13 20:37:35 +00:00
crypto : : hash block_hash ;
if ( ! req . hash . empty ( ) )
{
bool hash_parsed = parse_hash256 ( req . hash , block_hash ) ;
if ( ! hash_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to parse hex representation of block hash. Hex = " + req . hash + ' . ' ;
return false ;
}
}
else
{
if ( m_core . get_current_blockchain_height ( ) < = req . height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-02 23:31:31 +00:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( req . height ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2015-10-13 20:37:35 +00:00
return false ;
}
block_hash = m_core . get_block_id_by_height ( req . height ) ;
}
block blk ;
2017-01-22 12:20:55 +00:00
bool orphan = false ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk , & orphan ) ;
2015-10-13 20:37:35 +00:00
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by hash. Hash = " + req . hash + ' . ' ;
return false ;
}
2017-12-11 22:36:58 +00:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2015-10-13 20:37:35 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
2019-03-01 16:14:51 +00:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , orphan , block_height , block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 13:38:12 +00:00
if ( ! response_filled )
2015-10-13 20:37:35 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
2017-10-28 14:25:47 +00:00
res . miner_tx_hash = epee : : string_tools : : pod_to_hex ( cryptonote : : get_transaction_hash ( blk . miner_tx ) ) ;
2015-10-13 20:37:35 +00:00
for ( size_t n = 0 ; n < blk . tx_hashes . size ( ) ; + + n )
{
res . tx_hashes . push_back ( epee : : string_tools : : pod_to_hex ( blk . tx_hashes [ n ] ) ) ;
}
2016-06-09 20:48:29 +00:00
res . blob = string_tools : : buff_to_hex_nodelimer ( t_serializable_object_to_blob ( blk ) ) ;
2015-10-13 20:37:35 +00:00
res . json = obj_to_json_str ( blk ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_connections ( const COMMAND_RPC_GET_CONNECTIONS : : request & req , COMMAND_RPC_GET_CONNECTIONS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-07-18 23:33:03 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_connections ) ;
2014-07-18 23:33:03 +00:00
res . connections = m_p2p . get_payload_object ( ) . get_connections ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_info_json ( const COMMAND_RPC_GET_INFO : : request & req , COMMAND_RPC_GET_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-07-22 18:00:10 +00:00
{
2019-03-29 12:56:47 +00:00
return on_get_info ( req , res , ctx ) ;
2014-07-22 18:00:10 +00:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_hard_fork_info ( const COMMAND_RPC_HARD_FORK_INFO : : request & req , COMMAND_RPC_HARD_FORK_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-09-19 15:34:29 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_hard_fork_info ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_HARD_FORK_INFO > ( invoke_http_mode : : JON_RPC , " hard_fork_info " , req , res , r ) )
return r ;
2015-09-19 15:34:29 +00:00
const Blockchain & blockchain = m_core . get_blockchain_storage ( ) ;
2016-08-12 18:19:25 +00:00
uint8_t version = req . version > 0 ? req . version : blockchain . get_next_hard_fork_version ( ) ;
2015-09-19 15:34:29 +00:00
res . version = blockchain . get_current_hard_fork_version ( ) ;
2015-12-19 14:52:30 +00:00
res . enabled = blockchain . get_hard_fork_voting_info ( version , res . window , res . votes , res . threshold , res . earliest_height , res . voting ) ;
2015-09-19 15:34:29 +00:00
res . state = blockchain . get_hard_fork_state ( ) ;
2015-10-26 10:17:48 +00:00
res . status = CORE_RPC_STATUS_OK ;
2015-09-19 15:34:29 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_bans ( const COMMAND_RPC_GETBANS : : request & req , COMMAND_RPC_GETBANS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-11-26 00:04:22 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_bans ) ;
2015-11-26 00:04:22 +00:00
2016-03-12 13:44:55 +00:00
auto now = time ( nullptr ) ;
2019-03-28 22:24:36 +00:00
std : : map < epee : : net_utils : : network_address , time_t > blocked_hosts = m_p2p . get_blocked_hosts ( ) ;
for ( std : : map < epee : : net_utils : : network_address , time_t > : : const_iterator i = blocked_hosts . begin ( ) ; i ! = blocked_hosts . end ( ) ; + + i )
2015-11-26 00:04:22 +00:00
{
2016-03-12 13:44:55 +00:00
if ( i - > second > now ) {
COMMAND_RPC_GETBANS : : ban b ;
2019-03-28 22:24:36 +00:00
b . host = i - > first . host_str ( ) ;
2017-05-27 10:35:54 +00:00
b . ip = 0 ;
uint32_t ip ;
2019-03-28 22:24:36 +00:00
if ( epee : : string_tools : : get_ip_int32_from_string ( ip , b . host ) )
2017-05-27 10:35:54 +00:00
b . ip = ip ;
2016-03-12 13:44:55 +00:00
b . seconds = i - > second - now ;
res . bans . push_back ( b ) ;
}
2015-11-26 00:04:22 +00:00
}
2019-03-29 10:47:53 +00:00
std : : map < epee : : net_utils : : ipv4_network_subnet , time_t > blocked_subnets = m_p2p . get_blocked_subnets ( ) ;
for ( std : : map < epee : : net_utils : : ipv4_network_subnet , time_t > : : const_iterator i = blocked_subnets . begin ( ) ; i ! = blocked_subnets . end ( ) ; + + i )
{
if ( i - > second > now ) {
COMMAND_RPC_GETBANS : : ban b ;
b . host = i - > first . host_str ( ) ;
b . ip = 0 ;
b . seconds = i - > second - now ;
res . bans . push_back ( b ) ;
}
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
bool core_rpc_server : : on_banned ( const COMMAND_RPC_BANNED : : request & req , COMMAND_RPC_BANNED : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
PERF_TIMER ( on_banned ) ;
auto na_parsed = net : : get_network_address ( req . address , 0 ) ;
if ( ! na_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Unsupported host type " ;
return false ;
}
epee : : net_utils : : network_address na = std : : move ( * na_parsed ) ;
time_t seconds ;
if ( m_p2p . is_host_blocked ( na , & seconds ) )
{
res . banned = true ;
res . seconds = seconds ;
}
else
{
res . banned = false ;
res . seconds = 0 ;
}
2015-11-26 00:04:22 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_set_bans ( const COMMAND_RPC_SETBANS : : request & req , COMMAND_RPC_SETBANS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-11-26 00:04:22 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_set_bans ) ;
2015-11-26 00:04:22 +00:00
for ( auto i = req . bans . begin ( ) ; i ! = req . bans . end ( ) ; + + i )
{
2017-05-27 10:35:54 +00:00
epee : : net_utils : : network_address na ;
2019-03-29 10:47:53 +00:00
// try subnet first
if ( ! i - > host . empty ( ) )
{
auto ns_parsed = net : : get_ipv4_subnet_address ( i - > host ) ;
if ( ns_parsed )
{
if ( i - > ban )
m_p2p . block_subnet ( * ns_parsed , i - > seconds ) ;
else
m_p2p . unblock_subnet ( * ns_parsed ) ;
continue ;
}
}
// then host
2017-05-27 10:35:54 +00:00
if ( ! i - > host . empty ( ) )
{
2018-12-16 17:57:44 +00:00
auto na_parsed = net : : get_network_address ( i - > host , 0 ) ;
if ( ! na_parsed )
2017-05-27 10:35:54 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
2019-03-29 10:47:53 +00:00
error_resp . message = " Unsupported host/subnet type " ;
2017-05-27 10:35:54 +00:00
return false ;
}
2018-12-16 17:57:44 +00:00
na = std : : move ( * na_parsed ) ;
2017-05-27 10:35:54 +00:00
}
else
{
2017-08-25 15:14:46 +00:00
na = epee : : net_utils : : ipv4_network_address { i - > ip , 0 } ;
2017-05-27 10:35:54 +00:00
}
2015-11-26 00:04:22 +00:00
if ( i - > ban )
2017-05-27 10:35:54 +00:00
m_p2p . block_host ( na , i - > seconds ) ;
2015-11-26 00:04:22 +00:00
else
2017-05-27 10:35:54 +00:00
m_p2p . unblock_host ( na ) ;
2015-11-26 00:04:22 +00:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_flush_txpool ( const COMMAND_RPC_FLUSH_TRANSACTION_POOL : : request & req , COMMAND_RPC_FLUSH_TRANSACTION_POOL : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-01-30 13:28:26 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_flush_txpool ) ;
2016-01-30 13:28:26 +00:00
bool failed = false ;
2018-04-15 23:16:02 +00:00
std : : vector < crypto : : hash > txids ;
2016-01-30 13:28:26 +00:00
if ( req . txids . empty ( ) )
{
2018-04-15 23:16:02 +00:00
std : : vector < transaction > pool_txs ;
2016-01-30 13:28:26 +00:00
bool r = m_core . get_pool_transactions ( pool_txs ) ;
if ( ! r )
{
res . status = " Failed to get txpool contents " ;
return true ;
}
for ( const auto & tx : pool_txs )
{
txids . push_back ( cryptonote : : get_transaction_hash ( tx ) ) ;
}
}
else
{
for ( const auto & str : req . txids )
{
cryptonote : : blobdata txid_data ;
if ( ! epee : : string_tools : : parse_hexstr_to_binbuff ( str , txid_data ) )
{
failed = true ;
}
2017-12-07 21:33:20 +00:00
else
{
crypto : : hash txid = * reinterpret_cast < const crypto : : hash * > ( txid_data . data ( ) ) ;
txids . push_back ( txid ) ;
}
2016-01-30 13:28:26 +00:00
}
}
if ( ! m_core . get_blockchain_storage ( ) . flush_txes_from_pool ( txids ) )
{
2017-12-07 21:33:20 +00:00
res . status = " Failed to remove one or more tx(es) " ;
2016-01-30 13:28:26 +00:00
return false ;
}
if ( failed )
{
2017-12-07 21:33:20 +00:00
if ( txids . empty ( ) )
res . status = " Failed to parse txid " ;
else
res . status = " Failed to parse some of the txids " ;
2016-01-30 13:28:26 +00:00
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_output_histogram ( const COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : request & req , COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-03-26 14:30:23 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_output_histogram ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_HISTOGRAM > ( invoke_http_mode : : JON_RPC , " get_output_histogram " , req , res , r ) )
return r ;
2016-03-26 14:30:23 +00:00
2019-05-12 13:27:34 +00:00
const bool restricted = m_restricted & & ctx ;
if ( restricted & & req . recent_cutoff > 0 & & req . recent_cutoff < ( uint64_t ) time ( NULL ) - OUTPUT_HISTOGRAM_RECENT_CUTOFF_RESTRICTION )
{
res . status = " Recent cutoff is too old " ;
return true ;
}
2016-09-17 14:45:51 +00:00
std : : map < uint64_t , std : : tuple < uint64_t , uint64_t , uint64_t > > histogram ;
2016-03-26 14:30:23 +00:00
try
{
2018-03-22 17:51:58 +00:00
histogram = m_core . get_blockchain_storage ( ) . get_output_histogram ( req . amounts , req . unlocked , req . recent_cutoff , req . min_count ) ;
2016-03-26 14:30:23 +00:00
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output histogram " ;
return true ;
}
res . histogram . clear ( ) ;
res . histogram . reserve ( histogram . size ( ) ) ;
for ( const auto & i : histogram )
{
2016-09-17 14:45:51 +00:00
if ( std : : get < 0 > ( i . second ) > = req . min_count & & ( std : : get < 0 > ( i . second ) < = req . max_count | | req . max_count = = 0 ) )
res . histogram . push_back ( COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : entry ( i . first , std : : get < 0 > ( i . second ) , std : : get < 1 > ( i . second ) , std : : get < 2 > ( i . second ) ) ) ;
2016-03-26 14:30:23 +00:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_version ( const COMMAND_RPC_GET_VERSION : : request & req , COMMAND_RPC_GET_VERSION : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-07-10 15:49:40 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_version ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_VERSION > ( invoke_http_mode : : JON_RPC , " get_version " , req , res , r ) )
return r ;
2016-07-10 15:49:40 +00:00
res . version = CORE_RPC_VERSION ;
2019-05-17 09:04:38 +00:00
res . release = MONERO_VERSION_IS_RELEASE ;
2016-07-10 15:49:40 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_coinbase_tx_sum ( const COMMAND_RPC_GET_COINBASE_TX_SUM : : request & req , COMMAND_RPC_GET_COINBASE_TX_SUM : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-10-10 19:45:51 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_coinbase_tx_sum ) ;
2016-10-10 23:55:18 +00:00
std : : pair < uint64_t , uint64_t > amounts = m_core . get_coinbase_tx_sum ( req . height , req . count ) ;
res . emission_amount = amounts . first ;
res . fee_amount = amounts . second ;
2017-02-13 23:05:33 +00:00
res . status = CORE_RPC_STATUS_OK ;
2016-10-10 19:45:51 +00:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_base_fee_estimate ( const COMMAND_RPC_GET_BASE_FEE_ESTIMATE : : request & req , COMMAND_RPC_GET_BASE_FEE_ESTIMATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-10-28 20:19:40 +00:00
{
2018-07-18 21:24:53 +00:00
PERF_TIMER ( on_get_base_fee_estimate ) ;
2018-01-20 10:38:14 +00:00
bool r ;
2018-07-18 21:24:53 +00:00
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BASE_FEE_ESTIMATE > ( invoke_http_mode : : JON_RPC , " get_fee_estimate " , req , res , r ) )
2018-01-20 10:38:14 +00:00
return r ;
2018-07-18 21:24:53 +00:00
res . fee = m_core . get_blockchain_storage ( ) . get_dynamic_base_fee_estimate ( req . grace_blocks ) ;
res . quantization_mask = Blockchain : : get_fee_quantization_mask ( ) ;
2016-10-28 20:19:40 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_alternate_chains ( const COMMAND_RPC_GET_ALTERNATE_CHAINS : : request & req , COMMAND_RPC_GET_ALTERNATE_CHAINS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-12-17 11:25:15 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_alternate_chains ) ;
2016-12-17 11:25:15 +00:00
try
{
2019-05-02 22:23:00 +00:00
std : : vector < std : : pair < Blockchain : : block_extended_info , std : : vector < crypto : : hash > > > chains = m_core . get_blockchain_storage ( ) . get_alternative_chains ( ) ;
2016-12-17 11:25:15 +00:00
for ( const auto & i : chains )
{
2019-01-31 10:44:08 +00:00
difficulty_type wdiff = i . first . cumulative_difficulty ;
res . chains . push_back ( COMMAND_RPC_GET_ALTERNATE_CHAINS : : chain_info { epee : : string_tools : : pod_to_hex ( get_block_hash ( i . first . bl ) ) , i . first . height , i . second . size ( ) , 0 , " " , 0 , { } , std : : string ( ) } ) ;
store_difficulty ( wdiff , res . chains . back ( ) . difficulty , res . chains . back ( ) . wide_difficulty , res . chains . back ( ) . difficulty_top64 ) ;
2018-05-19 22:53:05 +00:00
res . chains . back ( ) . block_hashes . reserve ( i . second . size ( ) ) ;
for ( const crypto : : hash & block_id : i . second )
res . chains . back ( ) . block_hashes . push_back ( epee : : string_tools : : pod_to_hex ( block_id ) ) ;
if ( i . first . height < i . second . size ( ) )
{
res . status = " Error finding alternate chain attachment point " ;
return true ;
}
cryptonote : : block main_chain_parent_block ;
try { main_chain_parent_block = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_from_height ( i . first . height - i . second . size ( ) ) ; }
catch ( const std : : exception & e ) { res . status = " Error finding alternate chain attachment point " ; return true ; }
res . chains . back ( ) . main_chain_parent_block = epee : : string_tools : : pod_to_hex ( get_block_hash ( main_chain_parent_block ) ) ;
2016-12-17 11:25:15 +00:00
}
res . status = CORE_RPC_STATUS_OK ;
}
catch ( . . . )
{
res . status = " Error retrieving alternate chains " ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_limit ( const COMMAND_RPC_GET_LIMIT : : request & req , COMMAND_RPC_GET_LIMIT : : response & res , const connection_context * ctx )
2017-09-17 19:19:53 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_limit ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_LIMIT > ( invoke_http_mode : : JON , " /get_limit " , req , res , r ) )
return r ;
2017-09-17 19:19:53 +00:00
res . limit_down = epee : : net_utils : : connection_basic : : get_rate_down_limit ( ) ;
res . limit_up = epee : : net_utils : : connection_basic : : get_rate_up_limit ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_set_limit ( const COMMAND_RPC_SET_LIMIT : : request & req , COMMAND_RPC_SET_LIMIT : : response & res , const connection_context * ctx )
2017-09-17 19:19:53 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_set_limit ) ;
2017-09-17 19:19:53 +00:00
// -1 = reset to default
// 0 = do not modify
if ( req . limit_down > 0 )
{
epee : : net_utils : : connection_basic : : set_rate_down_limit ( req . limit_down ) ;
}
else if ( req . limit_down < 0 )
{
if ( req . limit_down ! = - 1 )
{
res . status = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
return false ;
}
2017-11-26 14:26:17 +00:00
epee : : net_utils : : connection_basic : : set_rate_down_limit ( nodetool : : default_limit_down ) ;
2017-09-17 19:19:53 +00:00
}
if ( req . limit_up > 0 )
{
epee : : net_utils : : connection_basic : : set_rate_up_limit ( req . limit_up ) ;
}
else if ( req . limit_up < 0 )
{
if ( req . limit_up ! = - 1 )
{
res . status = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
return false ;
}
2017-11-26 14:26:17 +00:00
epee : : net_utils : : connection_basic : : set_rate_up_limit ( nodetool : : default_limit_up ) ;
2017-09-17 19:19:53 +00:00
}
res . limit_down = epee : : net_utils : : connection_basic : : get_rate_down_limit ( ) ;
res . limit_up = epee : : net_utils : : connection_basic : : get_rate_up_limit ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_out_peers ( const COMMAND_RPC_OUT_PEERS : : request & req , COMMAND_RPC_OUT_PEERS : : response & res , const connection_context * ctx )
2015-04-01 17:00:45 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_out_peers ) ;
2019-05-28 17:54:41 +00:00
if ( req . set )
m_p2p . change_max_out_public_peers ( req . out_peers ) ;
res . out_peers = m_p2p . get_max_out_public_peers ( ) ;
2017-10-06 07:40:14 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
2015-04-01 17:00:45 +00:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_in_peers ( const COMMAND_RPC_IN_PEERS : : request & req , COMMAND_RPC_IN_PEERS : : response & res , const connection_context * ctx )
2018-01-20 21:44:23 +00:00
{
PERF_TIMER ( on_in_peers ) ;
2019-05-28 17:54:41 +00:00
if ( req . set )
m_p2p . change_max_in_public_peers ( req . in_peers ) ;
res . in_peers = m_p2p . get_max_in_public_peers ( ) ;
2018-01-20 21:44:23 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_update ( const COMMAND_RPC_UPDATE : : request & req , COMMAND_RPC_UPDATE : : response & res , const connection_context * ctx )
2017-02-24 23:16:13 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_update ) ;
2019-05-09 19:14:17 +00:00
if ( m_core . offline ( ) )
{
res . status = " Daemon is running offline " ;
return true ;
}
2017-02-24 23:16:13 +00:00
static const char software [ ] = " monero " ;
2017-03-04 18:45:33 +00:00
# ifdef BUILD_TAG
static const char buildtag [ ] = BOOST_PP_STRINGIZE ( BUILD_TAG ) ;
2017-09-22 20:48:19 +00:00
static const char subdir [ ] = " cli " ;
2017-02-24 23:16:13 +00:00
# else
static const char buildtag [ ] = " source " ;
2017-09-22 20:48:19 +00:00
static const char subdir [ ] = " source " ;
2017-02-24 23:16:13 +00:00
# endif
if ( req . command ! = " check " & & req . command ! = " download " & & req . command ! = " update " )
{
res . status = std : : string ( " unknown command: ' " ) + req . command + " ' " ;
return true ;
}
std : : string version , hash ;
if ( ! tools : : check_updates ( software , buildtag , version , hash ) )
{
res . status = " Error checking for updates " ;
return true ;
}
if ( tools : : vercmp ( version . c_str ( ) , MONERO_VERSION ) < = 0 )
{
res . update = false ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
res . update = true ;
res . version = version ;
2017-09-22 20:48:19 +00:00
res . user_uri = tools : : get_update_url ( software , subdir , buildtag , version , true ) ;
res . auto_uri = tools : : get_update_url ( software , subdir , buildtag , version , false ) ;
2017-02-24 23:16:13 +00:00
res . hash = hash ;
if ( req . command = = " check " )
{
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
boost : : filesystem : : path path ;
if ( req . path . empty ( ) )
{
std : : string filename ;
const char * slash = strrchr ( res . auto_uri . c_str ( ) , ' / ' ) ;
if ( slash )
filename = slash + 1 ;
else
filename = std : : string ( software ) + " -update- " + version ;
path = epee : : string_tools : : get_current_module_folder ( ) ;
path / = filename ;
}
else
{
path = req . path ;
}
crypto : : hash file_hash ;
if ( ! tools : : sha256sum ( path . string ( ) , file_hash ) | | ( hash ! = epee : : string_tools : : pod_to_hex ( file_hash ) ) )
{
MDEBUG ( " We don't have that file already, downloading " ) ;
if ( ! tools : : download ( path . string ( ) , res . auto_uri ) )
{
MERROR ( " Failed to download " < < res . auto_uri ) ;
return false ;
}
if ( ! tools : : sha256sum ( path . string ( ) , file_hash ) )
{
MERROR ( " Failed to hash " < < path ) ;
return false ;
}
if ( hash ! = epee : : string_tools : : pod_to_hex ( file_hash ) )
{
MERROR ( " Download from " < < res . auto_uri < < " does not match the expected hash " ) ;
return false ;
}
MINFO ( " New version downloaded to " < < path ) ;
}
else
{
MDEBUG ( " We already have " < < path < < " with expected hash " ) ;
}
res . path = path . string ( ) ;
if ( req . command = = " download " )
{
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
res . status = " 'update' not implemented yet " ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_pop_blocks ( const COMMAND_RPC_POP_BLOCKS : : request & req , COMMAND_RPC_POP_BLOCKS : : response & res , const connection_context * ctx )
2018-11-25 21:08:07 +00:00
{
PERF_TIMER ( on_pop_blocks ) ;
m_core . get_blockchain_storage ( ) . pop_blocks ( req . nblocks ) ;
res . height = m_core . get_current_blockchain_height ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_relay_tx ( const COMMAND_RPC_RELAY_TX : : request & req , COMMAND_RPC_RELAY_TX : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-04-02 11:17:35 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_relay_tx ) ;
2017-04-02 11:17:35 +00:00
bool failed = false ;
2017-12-07 21:33:20 +00:00
res . status = " " ;
2017-04-02 11:17:35 +00:00
for ( const auto & str : req . txids )
{
cryptonote : : blobdata txid_data ;
if ( ! epee : : string_tools : : parse_hexstr_to_binbuff ( str , txid_data ) )
{
2017-12-07 21:33:20 +00:00
if ( ! res . status . empty ( ) ) res . status + = " , " ;
res . status + = std : : string ( " invalid transaction id: " ) + str ;
2017-04-02 11:17:35 +00:00
failed = true ;
2017-12-07 21:33:20 +00:00
continue ;
2017-04-02 11:17:35 +00:00
}
crypto : : hash txid = * reinterpret_cast < const crypto : : hash * > ( txid_data . data ( ) ) ;
2017-05-14 13:06:55 +00:00
cryptonote : : blobdata txblob ;
bool r = m_core . get_pool_transaction ( txid , txblob ) ;
2017-04-02 11:17:35 +00:00
if ( r )
{
cryptonote_connection_context fake_context = AUTO_VAL_INIT ( fake_context ) ;
NOTIFY_NEW_TRANSACTIONS : : request r ;
2017-05-14 13:06:55 +00:00
r . txs . push_back ( txblob ) ;
2017-04-02 11:17:35 +00:00
m_core . get_protocol ( ) - > relay_transactions ( r , fake_context ) ;
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
}
else
{
2017-12-07 21:33:20 +00:00
if ( ! res . status . empty ( ) ) res . status + = " , " ;
res . status + = std : : string ( " transaction not found in pool: " ) + str ;
2017-04-02 11:17:35 +00:00
failed = true ;
2017-12-07 21:33:20 +00:00
continue ;
2017-04-02 11:17:35 +00:00
}
}
if ( failed )
{
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_sync_info ( const COMMAND_RPC_SYNC_INFO : : request & req , COMMAND_RPC_SYNC_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-07-02 21:41:15 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_sync_info ) ;
2017-07-02 21:41:15 +00:00
crypto : : hash top_hash ;
2017-09-09 11:06:24 +00:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2017-07-02 21:41:15 +00:00
+ + res . height ; // turn top block height into blockchain height
res . target_height = m_core . get_target_blockchain_height ( ) ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
res . next_needed_pruning_seed = m_p2p . get_payload_object ( ) . get_next_needed_pruning_stripe ( ) . second ;
2017-07-02 21:41:15 +00:00
for ( const auto & c : m_p2p . get_payload_object ( ) . get_connections ( ) )
res . peers . push_back ( { c } ) ;
const cryptonote : : block_queue & block_queue = m_p2p . get_payload_object ( ) . get_block_queue ( ) ;
block_queue . foreach ( [ & ] ( const cryptonote : : block_queue : : span & span ) {
2017-11-17 23:52:50 +00:00
const std : : string span_connection_id = epee : : string_tools : : pod_to_hex ( span . connection_id ) ;
2017-07-02 21:41:15 +00:00
uint32_t speed = ( uint32_t ) ( 100.0f * block_queue . get_speed ( span . connection_id ) + 0.5f ) ;
std : : string address = " " ;
for ( const auto & c : m_p2p . get_payload_object ( ) . get_connections ( ) )
2017-11-17 23:52:50 +00:00
if ( c . connection_id = = span_connection_id )
2017-07-02 21:41:15 +00:00
address = c . address ;
2017-11-17 23:52:50 +00:00
res . spans . push_back ( { span . start_block_height , span . nblocks , span_connection_id , ( uint32_t ) ( span . rate + 0.5f ) , speed , span . size , address } ) ;
2017-07-02 21:41:15 +00:00
return true ;
} ) ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
res . overview = block_queue . get_overview ( res . height ) ;
2017-07-02 21:41:15 +00:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_txpool_backlog ( const COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-08-26 15:23:31 +00:00
{
2017-10-29 21:10:46 +00:00
PERF_TIMER ( on_get_txpool_backlog ) ;
2018-01-20 10:38:14 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG > ( invoke_http_mode : : JON_RPC , " get_txpool_backlog " , req , res , r ) )
return r ;
2017-08-26 15:23:31 +00:00
if ( ! m_core . get_txpool_backlog ( res . backlog ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to get txpool backlog " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_output_distribution ( const COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : request & req , COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-02-19 11:15:15 +00:00
{
2018-04-08 12:23:49 +00:00
PERF_TIMER ( on_get_output_distribution ) ;
2018-05-22 13:46:30 +00:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_DISTRIBUTION > ( invoke_http_mode : : JON_RPC , " get_output_distribution " , req , res , r ) )
return r ;
2018-02-19 11:15:15 +00:00
try
{
2018-10-19 09:20:03 +00:00
// 0 is placeholder for the whole chain
const uint64_t req_to_height = req . to_height ? req . to_height : ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2018-02-19 11:15:15 +00:00
for ( uint64_t amount : req . amounts )
{
2019-04-25 16:41:33 +00:00
auto data = rpc : : RpcHandler : : get_output_distribution ( [ this ] ( uint64_t amount , uint64_t from , uint64_t to , uint64_t & start_height , std : : vector < uint64_t > & distribution , uint64_t & base ) { return m_core . get_output_distribution ( amount , from , to , start_height , distribution , base ) ; } , amount , req . from_height , req_to_height , [ this ] ( uint64_t height ) { return m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_hash_from_height ( height ) ; } , req . cumulative , m_core . get_current_blockchain_height ( ) ) ;
2018-10-20 02:06:03 +00:00
if ( ! data )
2018-02-19 11:15:15 +00:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
2018-10-20 02:06:03 +00:00
error_resp . message = " Failed to get output distribution " ;
2018-02-19 11:15:15 +00:00
return false ;
}
2018-05-31 15:53:56 +00:00
2018-11-08 18:26:59 +00:00
res . distributions . push_back ( { std : : move ( * data ) , amount , " " , req . binary , req . compress } ) ;
2018-02-19 11:15:15 +00:00
}
}
catch ( const std : : exception & e )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to get output distribution " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_get_output_distribution_bin ( const COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : request & req , COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : response & res , const connection_context * ctx )
2018-11-08 18:26:59 +00:00
{
PERF_TIMER ( on_get_output_distribution_bin ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_DISTRIBUTION > ( invoke_http_mode : : BIN , " /get_output_distribution.bin " , req , res , r ) )
return r ;
res . status = " Failed " ;
if ( ! req . binary )
{
res . status = " Binary only call " ;
return false ;
}
try
{
// 0 is placeholder for the whole chain
const uint64_t req_to_height = req . to_height ? req . to_height : ( m_core . get_current_blockchain_height ( ) - 1 ) ;
for ( uint64_t amount : req . amounts )
{
2019-04-25 16:41:33 +00:00
auto data = rpc : : RpcHandler : : get_output_distribution ( [ this ] ( uint64_t amount , uint64_t from , uint64_t to , uint64_t & start_height , std : : vector < uint64_t > & distribution , uint64_t & base ) { return m_core . get_output_distribution ( amount , from , to , start_height , distribution , base ) ; } , amount , req . from_height , req_to_height , [ this ] ( uint64_t height ) { return m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_hash_from_height ( height ) ; } , req . cumulative , m_core . get_current_blockchain_height ( ) ) ;
2018-11-08 18:26:59 +00:00
if ( ! data )
{
res . status = " Failed to get output distribution " ;
return false ;
}
res . distributions . push_back ( { std : : move ( * data ) , amount , " " , req . binary , req . compress } ) ;
}
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output distribution " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 19:09:39 +00:00
bool core_rpc_server : : on_prune_blockchain ( const COMMAND_RPC_PRUNE_BLOCKCHAIN : : request & req , COMMAND_RPC_PRUNE_BLOCKCHAIN : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
{
try
{
if ( ! ( req . check ? m_core . check_blockchain_pruning ( ) : m_core . prune_blockchain ( ) ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = req . check ? " Failed to check blockchain pruning " : " Failed to prune blockchain " ;
return false ;
}
res . pruning_seed = m_core . get_blockchain_pruning_seed ( ) ;
2019-04-16 15:14:18 +00:00
res . pruned = res . pruning_seed ! = 0 ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
}
catch ( const std : : exception & e )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to prune blockchain " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-02-19 11:15:15 +00:00
2015-01-29 22:10:53 +00:00
2018-02-16 11:04:04 +00:00
const command_line : : arg_descriptor < std : : string , false , true , 2 > core_rpc_server : : arg_rpc_bind_port = {
2015-01-29 22:10:53 +00:00
" rpc-bind-port "
, " Port for RPC server "
, std : : to_string ( config : : RPC_DEFAULT_PORT )
2018-02-16 11:04:04 +00:00
, { { & cryptonote : : arg_testnet_on , & cryptonote : : arg_stagenet_on } }
2018-03-27 13:47:57 +00:00
, [ ] ( std : : array < bool , 2 > testnet_stagenet , bool defaulted , std : : string val ) - > std : : string {
2018-02-16 11:04:04 +00:00
if ( testnet_stagenet [ 0 ] & & defaulted )
2018-01-22 01:49:51 +00:00
return std : : to_string ( config : : testnet : : RPC_DEFAULT_PORT ) ;
2018-02-16 11:04:04 +00:00
else if ( testnet_stagenet [ 1 ] & & defaulted )
return std : : to_string ( config : : stagenet : : RPC_DEFAULT_PORT ) ;
2018-01-22 01:49:51 +00:00
return val ;
}
2015-01-29 22:10:53 +00:00
} ;
2017-11-16 03:58:11 +00:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_restricted_bind_port = {
" rpc-restricted-bind-port "
, " Port for restricted RPC server "
, " "
} ;
2015-11-27 18:24:29 +00:00
const command_line : : arg_descriptor < bool > core_rpc_server : : arg_restricted_rpc = {
" restricted-rpc "
2017-11-08 12:06:41 +00:00
, " Restrict RPC to view only commands and do not return privacy sensitive data in RPC calls "
2015-11-27 18:24:29 +00:00
, false
} ;
2018-01-20 10:38:14 +00:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_bootstrap_daemon_address = {
" bootstrap-daemon-address "
2019-08-27 12:01:49 +00:00
, " URL of a 'bootstrap' remote daemon that the connected wallets can use while this daemon is still not fully synced. \n "
" Use 'auto' to enable automatic public nodes discovering and bootstrap daemon switching "
2018-01-20 10:38:14 +00:00
, " "
} ;
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_bootstrap_daemon_login = {
" bootstrap-daemon-login "
, " Specify username:password for the bootstrap daemon login "
, " "
} ;
2015-01-29 22:10:53 +00:00
} // namespace cryptonote