mirror of
https://github.com/monero-project/monero.git
synced 2024-11-18 18:11:03 +00:00
cb130c7590
de1ffe0
p2p: warn when no incoming connections are seen for a while (moneromooo-monero)
2083 lines
79 KiB
C++
2083 lines
79 KiB
C++
// Copyright (c) 2014-2018, The Monero Project
|
|
//
|
|
// All rights reserved.
|
|
//
|
|
// Redistribution and use in source and binary forms, with or without modification, are
|
|
// permitted provided that the following conditions are met:
|
|
//
|
|
// 1. Redistributions of source code must retain the above copyright notice, this list of
|
|
// conditions and the following disclaimer.
|
|
//
|
|
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
|
// of conditions and the following disclaimer in the documentation and/or other
|
|
// materials provided with the distribution.
|
|
//
|
|
// 3. Neither the name of the copyright holder nor the names of its contributors may be
|
|
// used to endorse or promote products derived from this software without specific
|
|
// prior written permission.
|
|
//
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
|
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
//
|
|
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
|
|
|
|
// IP blocking adapted from Boolberry
|
|
|
|
#include <algorithm>
|
|
#include <boost/date_time/posix_time/posix_time.hpp>
|
|
#include <boost/thread/thread.hpp>
|
|
#include <boost/bind.hpp>
|
|
#include <atomic>
|
|
|
|
#include "version.h"
|
|
#include "string_tools.h"
|
|
#include "common/util.h"
|
|
#include "common/dns_utils.h"
|
|
#include "net/net_helper.h"
|
|
#include "math_helper.h"
|
|
#include "p2p_protocol_defs.h"
|
|
#include "net_peerlist_boost_serialization.h"
|
|
#include "net/local_ip.h"
|
|
#include "crypto/crypto.h"
|
|
#include "storages/levin_abstract_invoke2.h"
|
|
#include "cryptonote_core/cryptonote_core.h"
|
|
|
|
#include <miniupnp/miniupnpc/miniupnpc.h>
|
|
#include <miniupnp/miniupnpc/upnpcommands.h>
|
|
#include <miniupnp/miniupnpc/upnperrors.h>
|
|
|
|
#undef MONERO_DEFAULT_LOG_CATEGORY
|
|
#define MONERO_DEFAULT_LOG_CATEGORY "net.p2p"
|
|
|
|
#define NET_MAKE_IP(b1,b2,b3,b4) ((LPARAM)(((DWORD)(b1)<<24)+((DWORD)(b2)<<16)+((DWORD)(b3)<<8)+((DWORD)(b4))))
|
|
|
|
#define MIN_WANTED_SEED_NODES 12
|
|
|
|
namespace nodetool
|
|
{
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::init_options(boost::program_options::options_description& desc)
|
|
{
|
|
command_line::add_arg(desc, arg_p2p_bind_ip);
|
|
command_line::add_arg(desc, arg_p2p_bind_port, false);
|
|
command_line::add_arg(desc, arg_p2p_external_port);
|
|
command_line::add_arg(desc, arg_p2p_allow_local_ip);
|
|
command_line::add_arg(desc, arg_p2p_add_peer);
|
|
command_line::add_arg(desc, arg_p2p_add_priority_node);
|
|
command_line::add_arg(desc, arg_p2p_add_exclusive_node);
|
|
command_line::add_arg(desc, arg_p2p_seed_node);
|
|
command_line::add_arg(desc, arg_p2p_hide_my_port);
|
|
command_line::add_arg(desc, arg_no_igd);
|
|
command_line::add_arg(desc, arg_out_peers);
|
|
command_line::add_arg(desc, arg_in_peers);
|
|
command_line::add_arg(desc, arg_tos_flag);
|
|
command_line::add_arg(desc, arg_limit_rate_up);
|
|
command_line::add_arg(desc, arg_limit_rate_down);
|
|
command_line::add_arg(desc, arg_limit_rate);
|
|
command_line::add_arg(desc, arg_save_graph);
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::init_config()
|
|
{
|
|
//
|
|
TRY_ENTRY();
|
|
std::string state_file_path = m_config_folder + "/" + P2P_NET_DATA_FILENAME;
|
|
std::ifstream p2p_data;
|
|
p2p_data.open( state_file_path , std::ios_base::binary | std::ios_base::in);
|
|
if(!p2p_data.fail())
|
|
{
|
|
try
|
|
{
|
|
// first try reading in portable mode
|
|
boost::archive::portable_binary_iarchive a(p2p_data);
|
|
a >> *this;
|
|
}
|
|
catch (...)
|
|
{
|
|
// if failed, try reading in unportable mode
|
|
boost::filesystem::copy_file(state_file_path, state_file_path + ".unportable", boost::filesystem::copy_option::overwrite_if_exists);
|
|
p2p_data.close();
|
|
p2p_data.open( state_file_path , std::ios_base::binary | std::ios_base::in);
|
|
if(!p2p_data.fail())
|
|
{
|
|
try
|
|
{
|
|
boost::archive::binary_iarchive a(p2p_data);
|
|
a >> *this;
|
|
}
|
|
catch (const std::exception &e)
|
|
{
|
|
MWARNING("Failed to load p2p config file, falling back to default config");
|
|
m_peerlist = peerlist_manager(); // it was probably half clobbered by the failed load
|
|
make_default_config();
|
|
}
|
|
}
|
|
else
|
|
{
|
|
make_default_config();
|
|
}
|
|
}
|
|
}else
|
|
{
|
|
make_default_config();
|
|
}
|
|
|
|
// always recreate a new peer id
|
|
make_default_peer_id();
|
|
|
|
//at this moment we have hardcoded config
|
|
m_config.m_net_config.handshake_interval = P2P_DEFAULT_HANDSHAKE_INTERVAL;
|
|
m_config.m_net_config.packet_max_size = P2P_DEFAULT_PACKET_MAX_SIZE; //20 MB limit
|
|
m_config.m_net_config.config_id = 0; // initial config
|
|
m_config.m_net_config.connection_timeout = P2P_DEFAULT_CONNECTION_TIMEOUT;
|
|
m_config.m_net_config.ping_connection_timeout = P2P_DEFAULT_PING_CONNECTION_TIMEOUT;
|
|
m_config.m_net_config.send_peerlist_sz = P2P_DEFAULT_PEERS_IN_HANDSHAKE;
|
|
m_config.m_support_flags = P2P_SUPPORT_FLAGS;
|
|
|
|
m_first_connection_maker_call = true;
|
|
CATCH_ENTRY_L0("node_server::init_config", false);
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::for_each_connection(std::function<bool(typename t_payload_net_handler::connection_context&, peerid_type, uint32_t)> f)
|
|
{
|
|
m_net_server.get_config_object().foreach_connection([&](p2p_connection_context& cntx){
|
|
return f(cntx, cntx.peer_id, cntx.support_flags);
|
|
});
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::for_connection(const boost::uuids::uuid &connection_id, std::function<bool(typename t_payload_net_handler::connection_context&, peerid_type, uint32_t)> f)
|
|
{
|
|
return m_net_server.get_config_object().for_connection(connection_id, [&](p2p_connection_context& cntx){
|
|
return f(cntx, cntx.peer_id, cntx.support_flags);
|
|
});
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::is_remote_host_allowed(const epee::net_utils::network_address &address)
|
|
{
|
|
CRITICAL_REGION_LOCAL(m_blocked_hosts_lock);
|
|
auto it = m_blocked_hosts.find(address.host_str());
|
|
if(it == m_blocked_hosts.end())
|
|
return true;
|
|
if(time(nullptr) >= it->second)
|
|
{
|
|
m_blocked_hosts.erase(it);
|
|
MCLOG_CYAN(el::Level::Info, "global", "Host " << address.host_str() << " unblocked.");
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::make_default_peer_id()
|
|
{
|
|
m_config.m_peer_id = crypto::rand<uint64_t>();
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::make_default_config()
|
|
{
|
|
return make_default_peer_id();
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::block_host(const epee::net_utils::network_address &addr, time_t seconds)
|
|
{
|
|
CRITICAL_REGION_LOCAL(m_blocked_hosts_lock);
|
|
m_blocked_hosts[addr.host_str()] = time(nullptr) + seconds;
|
|
|
|
// drop any connection to that IP
|
|
std::list<boost::uuids::uuid> conns;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if (cntxt.m_remote_address.is_same_host(addr))
|
|
{
|
|
conns.push_back(cntxt.m_connection_id);
|
|
}
|
|
return true;
|
|
});
|
|
for (const auto &c: conns)
|
|
m_net_server.get_config_object().close(c);
|
|
|
|
MCLOG_CYAN(el::Level::Info, "global", "Host " << addr.host_str() << " blocked.");
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::unblock_host(const epee::net_utils::network_address &address)
|
|
{
|
|
CRITICAL_REGION_LOCAL(m_blocked_hosts_lock);
|
|
auto i = m_blocked_hosts.find(address.host_str());
|
|
if (i == m_blocked_hosts.end())
|
|
return false;
|
|
m_blocked_hosts.erase(i);
|
|
MCLOG_CYAN(el::Level::Info, "global", "Host " << address.host_str() << " unblocked.");
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::add_host_fail(const epee::net_utils::network_address &address)
|
|
{
|
|
CRITICAL_REGION_LOCAL(m_host_fails_score_lock);
|
|
uint64_t fails = ++m_host_fails_score[address.host_str()];
|
|
MDEBUG("Host " << address.host_str() << " fail score=" << fails);
|
|
if(fails > P2P_IP_FAILS_BEFORE_BLOCK)
|
|
{
|
|
auto it = m_host_fails_score.find(address.host_str());
|
|
CHECK_AND_ASSERT_MES(it != m_host_fails_score.end(), false, "internal error");
|
|
it->second = P2P_IP_FAILS_BEFORE_BLOCK/2;
|
|
block_host(address);
|
|
}
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::parse_peer_from_string(epee::net_utils::network_address& pe, const std::string& node_addr, uint16_t default_port)
|
|
{
|
|
return epee::net_utils::create_network_address(pe, node_addr, default_port);
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::handle_command_line(
|
|
const boost::program_options::variables_map& vm
|
|
)
|
|
{
|
|
bool testnet = command_line::get_arg(vm, cryptonote::arg_testnet_on);
|
|
bool stagenet = command_line::get_arg(vm, cryptonote::arg_stagenet_on);
|
|
m_nettype = testnet ? cryptonote::TESTNET : stagenet ? cryptonote::STAGENET : cryptonote::MAINNET;
|
|
|
|
m_bind_ip = command_line::get_arg(vm, arg_p2p_bind_ip);
|
|
m_port = command_line::get_arg(vm, arg_p2p_bind_port);
|
|
m_external_port = command_line::get_arg(vm, arg_p2p_external_port);
|
|
m_allow_local_ip = command_line::get_arg(vm, arg_p2p_allow_local_ip);
|
|
m_no_igd = command_line::get_arg(vm, arg_no_igd);
|
|
m_offline = command_line::get_arg(vm, cryptonote::arg_offline);
|
|
|
|
if (command_line::has_arg(vm, arg_p2p_add_peer))
|
|
{
|
|
std::vector<std::string> perrs = command_line::get_arg(vm, arg_p2p_add_peer);
|
|
for(const std::string& pr_str: perrs)
|
|
{
|
|
nodetool::peerlist_entry pe = AUTO_VAL_INIT(pe);
|
|
pe.id = crypto::rand<uint64_t>();
|
|
const uint16_t default_port = testnet ? ::config::testnet::P2P_DEFAULT_PORT : stagenet ? ::config::stagenet::P2P_DEFAULT_PORT : ::config::P2P_DEFAULT_PORT;
|
|
bool r = parse_peer_from_string(pe.adr, pr_str, default_port);
|
|
CHECK_AND_ASSERT_MES(r, false, "Failed to parse address from string: " << pr_str);
|
|
m_command_line_peers.push_back(pe);
|
|
}
|
|
}
|
|
|
|
if(command_line::has_arg(vm, arg_save_graph))
|
|
{
|
|
set_save_graph(true);
|
|
}
|
|
|
|
if (command_line::has_arg(vm,arg_p2p_add_exclusive_node))
|
|
{
|
|
if (!parse_peers_and_add_to_container(vm, arg_p2p_add_exclusive_node, m_exclusive_peers))
|
|
return false;
|
|
}
|
|
|
|
if (command_line::has_arg(vm, arg_p2p_add_priority_node))
|
|
{
|
|
if (!parse_peers_and_add_to_container(vm, arg_p2p_add_priority_node, m_priority_peers))
|
|
return false;
|
|
}
|
|
|
|
if (command_line::has_arg(vm, arg_p2p_seed_node))
|
|
{
|
|
if (!parse_peers_and_add_to_container(vm, arg_p2p_seed_node, m_seed_nodes))
|
|
return false;
|
|
}
|
|
|
|
if(command_line::has_arg(vm, arg_p2p_hide_my_port))
|
|
m_hide_my_port = true;
|
|
|
|
if ( !set_max_out_peers(vm, command_line::get_arg(vm, arg_out_peers) ) )
|
|
return false;
|
|
|
|
if ( !set_max_in_peers(vm, command_line::get_arg(vm, arg_in_peers) ) )
|
|
return false;
|
|
|
|
if ( !set_tos_flag(vm, command_line::get_arg(vm, arg_tos_flag) ) )
|
|
return false;
|
|
|
|
if ( !set_rate_up_limit(vm, command_line::get_arg(vm, arg_limit_rate_up) ) )
|
|
return false;
|
|
|
|
if ( !set_rate_down_limit(vm, command_line::get_arg(vm, arg_limit_rate_down) ) )
|
|
return false;
|
|
|
|
if ( !set_rate_limit(vm, command_line::get_arg(vm, arg_limit_rate) ) )
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
inline void append_net_address(
|
|
std::vector<epee::net_utils::network_address> & seed_nodes
|
|
, std::string const & addr
|
|
)
|
|
{
|
|
using namespace boost::asio;
|
|
|
|
size_t pos = addr.find_last_of(':');
|
|
CHECK_AND_ASSERT_MES_NO_RET(std::string::npos != pos && addr.length() - 1 != pos && 0 != pos, "Failed to parse seed address from string: '" << addr << '\'');
|
|
std::string host = addr.substr(0, pos);
|
|
std::string port = addr.substr(pos + 1);
|
|
|
|
io_service io_srv;
|
|
ip::tcp::resolver resolver(io_srv);
|
|
ip::tcp::resolver::query query(host, port, boost::asio::ip::tcp::resolver::query::canonical_name);
|
|
boost::system::error_code ec;
|
|
ip::tcp::resolver::iterator i = resolver.resolve(query, ec);
|
|
CHECK_AND_ASSERT_MES_NO_RET(!ec, "Failed to resolve host name '" << host << "': " << ec.message() << ':' << ec.value());
|
|
|
|
ip::tcp::resolver::iterator iend;
|
|
for (; i != iend; ++i)
|
|
{
|
|
ip::tcp::endpoint endpoint = *i;
|
|
if (endpoint.address().is_v4())
|
|
{
|
|
epee::net_utils::network_address na{epee::net_utils::ipv4_network_address{boost::asio::detail::socket_ops::host_to_network_long(endpoint.address().to_v4().to_ulong()), endpoint.port()}};
|
|
seed_nodes.push_back(na);
|
|
MINFO("Added seed node: " << na.str());
|
|
}
|
|
else
|
|
{
|
|
MWARNING("IPv6 unsupported, skip '" << host << "' -> " << endpoint.address().to_v6().to_string(ec));
|
|
throw std::runtime_error("IPv6 unsupported");
|
|
}
|
|
}
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
std::set<std::string> node_server<t_payload_net_handler>::get_seed_nodes(cryptonote::network_type nettype) const
|
|
{
|
|
std::set<std::string> full_addrs;
|
|
if (nettype == cryptonote::TESTNET)
|
|
{
|
|
full_addrs.insert("212.83.175.67:28080");
|
|
full_addrs.insert("5.9.100.248:28080");
|
|
full_addrs.insert("163.172.182.165:28080");
|
|
full_addrs.insert("195.154.123.123:28080");
|
|
full_addrs.insert("212.83.172.165:28080");
|
|
}
|
|
else if (nettype == cryptonote::STAGENET)
|
|
{
|
|
full_addrs.insert("162.210.173.150:38080");
|
|
full_addrs.insert("162.210.173.151:38080");
|
|
}
|
|
else
|
|
{
|
|
full_addrs.insert("107.152.130.98:18080");
|
|
full_addrs.insert("212.83.175.67:18080");
|
|
full_addrs.insert("5.9.100.248:18080");
|
|
full_addrs.insert("163.172.182.165:18080");
|
|
full_addrs.insert("161.67.132.39:18080");
|
|
full_addrs.insert("198.74.231.92:18080");
|
|
full_addrs.insert("195.154.123.123:18080");
|
|
full_addrs.insert("212.83.172.165:18080");
|
|
}
|
|
return full_addrs;
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::init(const boost::program_options::variables_map& vm)
|
|
{
|
|
std::set<std::string> full_addrs;
|
|
|
|
bool res = handle_command_line(vm);
|
|
CHECK_AND_ASSERT_MES(res, false, "Failed to handle command line");
|
|
|
|
m_fallback_seed_nodes_added = false;
|
|
if (m_nettype == cryptonote::TESTNET)
|
|
{
|
|
memcpy(&m_network_id, &::config::testnet::NETWORK_ID, 16);
|
|
full_addrs = get_seed_nodes(cryptonote::TESTNET);
|
|
}
|
|
else if (m_nettype == cryptonote::STAGENET)
|
|
{
|
|
memcpy(&m_network_id, &::config::stagenet::NETWORK_ID, 16);
|
|
full_addrs = get_seed_nodes(cryptonote::STAGENET);
|
|
}
|
|
else
|
|
{
|
|
memcpy(&m_network_id, &::config::NETWORK_ID, 16);
|
|
if (m_exclusive_peers.empty())
|
|
{
|
|
// for each hostname in the seed nodes list, attempt to DNS resolve and
|
|
// add the result addresses as seed nodes
|
|
// TODO: at some point add IPv6 support, but that won't be relevant
|
|
// for some time yet.
|
|
|
|
std::vector<std::vector<std::string>> dns_results;
|
|
dns_results.resize(m_seed_nodes_list.size());
|
|
|
|
std::list<boost::thread> dns_threads;
|
|
uint64_t result_index = 0;
|
|
for (const std::string& addr_str : m_seed_nodes_list)
|
|
{
|
|
boost::thread th = boost::thread([=, &dns_results, &addr_str]
|
|
{
|
|
MDEBUG("dns_threads[" << result_index << "] created for: " << addr_str);
|
|
// TODO: care about dnssec avail/valid
|
|
bool avail, valid;
|
|
std::vector<std::string> addr_list;
|
|
|
|
try
|
|
{
|
|
addr_list = tools::DNSResolver::instance().get_ipv4(addr_str, avail, valid);
|
|
MDEBUG("dns_threads[" << result_index << "] DNS resolve done");
|
|
boost::this_thread::interruption_point();
|
|
}
|
|
catch(const boost::thread_interrupted&)
|
|
{
|
|
// thread interruption request
|
|
// even if we now have results, finish thread without setting
|
|
// result variables, which are now out of scope in main thread
|
|
MWARNING("dns_threads[" << result_index << "] interrupted");
|
|
return;
|
|
}
|
|
|
|
MINFO("dns_threads[" << result_index << "] addr_str: " << addr_str << " number of results: " << addr_list.size());
|
|
dns_results[result_index] = addr_list;
|
|
});
|
|
|
|
dns_threads.push_back(std::move(th));
|
|
++result_index;
|
|
}
|
|
|
|
MDEBUG("dns_threads created, now waiting for completion or timeout of " << CRYPTONOTE_DNS_TIMEOUT_MS << "ms");
|
|
boost::chrono::system_clock::time_point deadline = boost::chrono::system_clock::now() + boost::chrono::milliseconds(CRYPTONOTE_DNS_TIMEOUT_MS);
|
|
uint64_t i = 0;
|
|
for (boost::thread& th : dns_threads)
|
|
{
|
|
if (! th.try_join_until(deadline))
|
|
{
|
|
MWARNING("dns_threads[" << i << "] timed out, sending interrupt");
|
|
th.interrupt();
|
|
}
|
|
++i;
|
|
}
|
|
|
|
i = 0;
|
|
for (const auto& result : dns_results)
|
|
{
|
|
MDEBUG("DNS lookup for " << m_seed_nodes_list[i] << ": " << result.size() << " results");
|
|
// if no results for node, thread's lookup likely timed out
|
|
if (result.size())
|
|
{
|
|
for (const auto& addr_string : result)
|
|
full_addrs.insert(addr_string + ":" + std::to_string(m_nettype == cryptonote::TESTNET ? ::config::testnet::P2P_DEFAULT_PORT : m_nettype == cryptonote::STAGENET ? ::config::stagenet::P2P_DEFAULT_PORT : ::config::P2P_DEFAULT_PORT));
|
|
}
|
|
++i;
|
|
}
|
|
|
|
// append the fallback nodes if we have too few seed nodes to start with
|
|
if (full_addrs.size() < MIN_WANTED_SEED_NODES)
|
|
{
|
|
if (full_addrs.empty())
|
|
MINFO("DNS seed node lookup either timed out or failed, falling back to defaults");
|
|
else
|
|
MINFO("Not enough DNS seed nodes found, using fallback defaults too");
|
|
|
|
for (const auto &peer: get_seed_nodes(cryptonote::MAINNET))
|
|
full_addrs.insert(peer);
|
|
m_fallback_seed_nodes_added = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
for (const auto& full_addr : full_addrs)
|
|
{
|
|
MDEBUG("Seed node: " << full_addr);
|
|
append_net_address(m_seed_nodes, full_addr);
|
|
}
|
|
MDEBUG("Number of seed nodes: " << m_seed_nodes.size());
|
|
|
|
m_config_folder = command_line::get_arg(vm, cryptonote::arg_data_dir);
|
|
|
|
if ((m_nettype == cryptonote::MAINNET && m_port != std::to_string(::config::P2P_DEFAULT_PORT))
|
|
|| (m_nettype == cryptonote::TESTNET && m_port != std::to_string(::config::testnet::P2P_DEFAULT_PORT))
|
|
|| (m_nettype == cryptonote::STAGENET && m_port != std::to_string(::config::stagenet::P2P_DEFAULT_PORT))) {
|
|
m_config_folder = m_config_folder + "/" + m_port;
|
|
}
|
|
|
|
res = init_config();
|
|
CHECK_AND_ASSERT_MES(res, false, "Failed to init config.");
|
|
|
|
res = m_peerlist.init(m_allow_local_ip);
|
|
CHECK_AND_ASSERT_MES(res, false, "Failed to init peerlist.");
|
|
|
|
|
|
for(auto& p: m_command_line_peers)
|
|
m_peerlist.append_with_peer_white(p);
|
|
|
|
//only in case if we really sure that we have external visible ip
|
|
m_have_address = true;
|
|
m_ip_address = 0;
|
|
m_last_stat_request_time = 0;
|
|
|
|
//configure self
|
|
m_net_server.set_threads_prefix("P2P");
|
|
m_net_server.get_config_object().set_handler(this);
|
|
m_net_server.get_config_object().m_invoke_timeout = P2P_DEFAULT_INVOKE_TIMEOUT;
|
|
m_net_server.set_connection_filter(this);
|
|
|
|
// from here onwards, it's online stuff
|
|
if (m_offline)
|
|
return res;
|
|
|
|
//try to bind
|
|
MINFO("Binding on " << m_bind_ip << ":" << m_port);
|
|
res = m_net_server.init_server(m_port, m_bind_ip);
|
|
CHECK_AND_ASSERT_MES(res, false, "Failed to bind server");
|
|
|
|
m_listening_port = m_net_server.get_binded_port();
|
|
MLOG_GREEN(el::Level::Info, "Net service bound to " << m_bind_ip << ":" << m_listening_port);
|
|
if(m_external_port)
|
|
MDEBUG("External port defined as " << m_external_port);
|
|
|
|
// add UPnP port mapping
|
|
if(!m_no_igd)
|
|
add_upnp_port_mapping(m_listening_port);
|
|
|
|
return res;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
typename node_server<t_payload_net_handler>::payload_net_handler& node_server<t_payload_net_handler>::get_payload_object()
|
|
{
|
|
return m_payload_handler;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::run()
|
|
{
|
|
// creating thread to log number of connections
|
|
mPeersLoggerThread.reset(new boost::thread([&]()
|
|
{
|
|
_note("Thread monitor number of peers - start");
|
|
while (!is_closing && !m_net_server.is_stop_signal_sent())
|
|
{ // main loop of thread
|
|
//number_of_peers = m_net_server.get_config_object().get_connections_count();
|
|
unsigned int number_of_in_peers = 0;
|
|
unsigned int number_of_out_peers = 0;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if (cntxt.m_is_income)
|
|
{
|
|
++number_of_in_peers;
|
|
}
|
|
else
|
|
{
|
|
++number_of_out_peers;
|
|
}
|
|
return true;
|
|
}); // lambda
|
|
|
|
m_current_number_of_in_peers = number_of_in_peers;
|
|
m_current_number_of_out_peers = number_of_out_peers;
|
|
|
|
boost::this_thread::sleep_for(boost::chrono::seconds(1));
|
|
} // main loop of thread
|
|
_note("Thread monitor number of peers - done");
|
|
})); // lambda
|
|
|
|
//here you can set worker threads count
|
|
int thrds_count = 10;
|
|
|
|
m_net_server.add_idle_handler(boost::bind(&node_server<t_payload_net_handler>::idle_worker, this), 1000);
|
|
m_net_server.add_idle_handler(boost::bind(&t_payload_net_handler::on_idle, &m_payload_handler), 1000);
|
|
|
|
boost::thread::attributes attrs;
|
|
attrs.set_stack_size(THREAD_STACK_SIZE);
|
|
|
|
//go to loop
|
|
MINFO("Run net_service loop( " << thrds_count << " threads)...");
|
|
if(!m_net_server.run_server(thrds_count, true, attrs))
|
|
{
|
|
LOG_ERROR("Failed to run net tcp server!");
|
|
}
|
|
|
|
MINFO("net_service loop stopped.");
|
|
return true;
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
uint64_t node_server<t_payload_net_handler>::get_connections_count()
|
|
{
|
|
return m_net_server.get_config_object().get_connections_count();
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::deinit()
|
|
{
|
|
kill();
|
|
m_peerlist.deinit();
|
|
m_net_server.deinit_server();
|
|
// remove UPnP port mapping
|
|
if(!m_no_igd)
|
|
delete_upnp_port_mapping(m_listening_port);
|
|
return store_config();
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::store_config()
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
if (!tools::create_directories_if_necessary(m_config_folder))
|
|
{
|
|
MWARNING("Failed to create data directory: " << m_config_folder);
|
|
return false;
|
|
}
|
|
|
|
std::string state_file_path = m_config_folder + "/" + P2P_NET_DATA_FILENAME;
|
|
std::ofstream p2p_data;
|
|
p2p_data.open( state_file_path , std::ios_base::binary | std::ios_base::out| std::ios::trunc);
|
|
if(p2p_data.fail())
|
|
{
|
|
MWARNING("Failed to save config to file " << state_file_path);
|
|
return false;
|
|
};
|
|
|
|
boost::archive::portable_binary_oarchive a(p2p_data);
|
|
a << *this;
|
|
return true;
|
|
CATCH_ENTRY_L0("blockchain_storage::save", false);
|
|
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::send_stop_signal()
|
|
{
|
|
MDEBUG("[node] sending stop signal");
|
|
m_net_server.send_stop_signal();
|
|
MDEBUG("[node] Stop signal sent");
|
|
|
|
std::list<boost::uuids::uuid> connection_ids;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt) {
|
|
connection_ids.push_back(cntxt.m_connection_id);
|
|
return true;
|
|
});
|
|
for (const auto &connection_id: connection_ids)
|
|
m_net_server.get_config_object().close(connection_id);
|
|
|
|
m_payload_handler.stop();
|
|
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::do_handshake_with_peer(peerid_type& pi, p2p_connection_context& context_, bool just_take_peerlist)
|
|
{
|
|
typename COMMAND_HANDSHAKE::request arg;
|
|
typename COMMAND_HANDSHAKE::response rsp;
|
|
get_local_node_data(arg.node_data);
|
|
m_payload_handler.get_payload_sync_data(arg.payload_data);
|
|
|
|
epee::simple_event ev;
|
|
std::atomic<bool> hsh_result(false);
|
|
|
|
bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_HANDSHAKE::response>(context_.m_connection_id, COMMAND_HANDSHAKE::ID, arg, m_net_server.get_config_object(),
|
|
[this, &pi, &ev, &hsh_result, &just_take_peerlist](int code, const typename COMMAND_HANDSHAKE::response& rsp, p2p_connection_context& context)
|
|
{
|
|
epee::misc_utils::auto_scope_leave_caller scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ev.raise();});
|
|
|
|
if(code < 0)
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE invoke failed. (" << code << ", " << epee::levin::get_err_descr(code) << ")");
|
|
return;
|
|
}
|
|
|
|
if(rsp.node_data.network_id != m_network_id)
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE Failed, wrong network! (" << epee::string_tools::get_str_from_guid_a(rsp.node_data.network_id) << "), closing connection.");
|
|
return;
|
|
}
|
|
|
|
if(!handle_remote_peerlist(rsp.local_peerlist_new, rsp.node_data.local_time, context))
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE: failed to handle_remote_peerlist(...), closing connection.");
|
|
add_host_fail(context.m_remote_address);
|
|
return;
|
|
}
|
|
hsh_result = true;
|
|
if(!just_take_peerlist)
|
|
{
|
|
if(!m_payload_handler.process_payload_sync_data(rsp.payload_data, context, true))
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE invoked, but process_payload_sync_data returned false, dropping connection.");
|
|
hsh_result = false;
|
|
return;
|
|
}
|
|
|
|
pi = context.peer_id = rsp.node_data.peer_id;
|
|
m_peerlist.set_peer_just_seen(rsp.node_data.peer_id, context.m_remote_address);
|
|
|
|
if(rsp.node_data.peer_id == m_config.m_peer_id)
|
|
{
|
|
LOG_DEBUG_CC(context, "Connection to self detected, dropping connection");
|
|
hsh_result = false;
|
|
return;
|
|
}
|
|
LOG_DEBUG_CC(context, " COMMAND_HANDSHAKE INVOKED OK");
|
|
}else
|
|
{
|
|
LOG_DEBUG_CC(context, " COMMAND_HANDSHAKE(AND CLOSE) INVOKED OK");
|
|
}
|
|
}, P2P_DEFAULT_HANDSHAKE_INVOKE_TIMEOUT);
|
|
|
|
if(r)
|
|
{
|
|
ev.wait();
|
|
}
|
|
|
|
if(!hsh_result)
|
|
{
|
|
LOG_WARNING_CC(context_, "COMMAND_HANDSHAKE Failed");
|
|
m_net_server.get_config_object().close(context_.m_connection_id);
|
|
}
|
|
else
|
|
{
|
|
try_get_support_flags(context_, [](p2p_connection_context& flags_context, const uint32_t& support_flags)
|
|
{
|
|
flags_context.support_flags = support_flags;
|
|
});
|
|
}
|
|
|
|
return hsh_result;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::do_peer_timed_sync(const epee::net_utils::connection_context_base& context_, peerid_type peer_id)
|
|
{
|
|
typename COMMAND_TIMED_SYNC::request arg = AUTO_VAL_INIT(arg);
|
|
m_payload_handler.get_payload_sync_data(arg.payload_data);
|
|
|
|
bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_TIMED_SYNC::response>(context_.m_connection_id, COMMAND_TIMED_SYNC::ID, arg, m_net_server.get_config_object(),
|
|
[this](int code, const typename COMMAND_TIMED_SYNC::response& rsp, p2p_connection_context& context)
|
|
{
|
|
context.m_in_timedsync = false;
|
|
if(code < 0)
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_TIMED_SYNC invoke failed. (" << code << ", " << epee::levin::get_err_descr(code) << ")");
|
|
return;
|
|
}
|
|
|
|
if(!handle_remote_peerlist(rsp.local_peerlist_new, rsp.local_time, context))
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_TIMED_SYNC: failed to handle_remote_peerlist(...), closing connection.");
|
|
m_net_server.get_config_object().close(context.m_connection_id );
|
|
add_host_fail(context.m_remote_address);
|
|
}
|
|
if(!context.m_is_income)
|
|
m_peerlist.set_peer_just_seen(context.peer_id, context.m_remote_address);
|
|
m_payload_handler.process_payload_sync_data(rsp.payload_data, context, false);
|
|
});
|
|
|
|
if(!r)
|
|
{
|
|
LOG_WARNING_CC(context_, "COMMAND_TIMED_SYNC Failed");
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
size_t node_server<t_payload_net_handler>::get_random_index_with_fixed_probability(size_t max_index)
|
|
{
|
|
//divide by zero workaround
|
|
if(!max_index)
|
|
return 0;
|
|
|
|
size_t x = crypto::rand<size_t>()%(max_index+1);
|
|
size_t res = (x*x*x)/(max_index*max_index); //parabola \/
|
|
MDEBUG("Random connection index=" << res << "(x="<< x << ", max_index=" << max_index << ")");
|
|
return res;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::is_peer_used(const peerlist_entry& peer)
|
|
{
|
|
|
|
if(m_config.m_peer_id == peer.id)
|
|
return true;//dont make connections to ourself
|
|
|
|
bool used = false;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if(cntxt.peer_id == peer.id || (!cntxt.m_is_income && peer.adr == cntxt.m_remote_address))
|
|
{
|
|
used = true;
|
|
return false;//stop enumerating
|
|
}
|
|
return true;
|
|
});
|
|
|
|
return used;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::is_peer_used(const anchor_peerlist_entry& peer)
|
|
{
|
|
if(m_config.m_peer_id == peer.id) {
|
|
return true;//dont make connections to ourself
|
|
}
|
|
|
|
bool used = false;
|
|
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if(cntxt.peer_id == peer.id || (!cntxt.m_is_income && peer.adr == cntxt.m_remote_address))
|
|
{
|
|
used = true;
|
|
|
|
return false;//stop enumerating
|
|
}
|
|
|
|
return true;
|
|
});
|
|
|
|
return used;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::is_addr_connected(const epee::net_utils::network_address& peer)
|
|
{
|
|
bool connected = false;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if(!cntxt.m_is_income && peer == cntxt.m_remote_address)
|
|
{
|
|
connected = true;
|
|
return false;//stop enumerating
|
|
}
|
|
return true;
|
|
});
|
|
|
|
return connected;
|
|
}
|
|
|
|
#define LOG_PRINT_CC_PRIORITY_NODE(priority, con, msg) \
|
|
do { \
|
|
if (priority) {\
|
|
LOG_INFO_CC(con, "[priority]" << msg); \
|
|
} else {\
|
|
LOG_INFO_CC(con, msg); \
|
|
} \
|
|
} while(0)
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::try_to_connect_and_handshake_with_new_peer(const epee::net_utils::network_address& na, bool just_take_peerlist, uint64_t last_seen_stamp, PeerType peer_type, uint64_t first_seen_stamp)
|
|
{
|
|
if (m_current_number_of_out_peers == m_config.m_net_config.max_out_connection_count) // out peers limit
|
|
{
|
|
return false;
|
|
}
|
|
else if (m_current_number_of_out_peers > m_config.m_net_config.max_out_connection_count)
|
|
{
|
|
m_net_server.get_config_object().del_out_connections(1);
|
|
m_current_number_of_out_peers --; // atomic variable, update time = 1s
|
|
return false;
|
|
}
|
|
MDEBUG("Connecting to " << na.str() << "(peer_type=" << peer_type << ", last_seen: "
|
|
<< (last_seen_stamp ? epee::misc_utils::get_time_interval_string(time(NULL) - last_seen_stamp):"never")
|
|
<< ")...");
|
|
|
|
CHECK_AND_ASSERT_MES(na.get_type_id() == epee::net_utils::ipv4_network_address::ID, false,
|
|
"Only IPv4 addresses are supported here");
|
|
const epee::net_utils::ipv4_network_address &ipv4 = na.as<const epee::net_utils::ipv4_network_address>();
|
|
|
|
typename net_server::t_connection_context con = AUTO_VAL_INIT(con);
|
|
bool res = m_net_server.connect(epee::string_tools::get_ip_string_from_int32(ipv4.ip()),
|
|
epee::string_tools::num_to_string_fast(ipv4.port()),
|
|
m_config.m_net_config.connection_timeout,
|
|
con);
|
|
|
|
if(!res)
|
|
{
|
|
bool is_priority = is_priority_node(na);
|
|
LOG_PRINT_CC_PRIORITY_NODE(is_priority, con, "Connect failed to " << na.str()
|
|
/*<< ", try " << try_count*/);
|
|
//m_peerlist.set_peer_unreachable(pe);
|
|
return false;
|
|
}
|
|
|
|
peerid_type pi = AUTO_VAL_INIT(pi);
|
|
res = do_handshake_with_peer(pi, con, just_take_peerlist);
|
|
|
|
if(!res)
|
|
{
|
|
bool is_priority = is_priority_node(na);
|
|
LOG_PRINT_CC_PRIORITY_NODE(is_priority, con, "Failed to HANDSHAKE with peer "
|
|
<< na.str()
|
|
/*<< ", try " << try_count*/);
|
|
return false;
|
|
}
|
|
|
|
if(just_take_peerlist)
|
|
{
|
|
m_net_server.get_config_object().close(con.m_connection_id);
|
|
LOG_DEBUG_CC(con, "CONNECTION HANDSHAKED OK AND CLOSED.");
|
|
return true;
|
|
}
|
|
|
|
peerlist_entry pe_local = AUTO_VAL_INIT(pe_local);
|
|
pe_local.adr = na;
|
|
pe_local.id = pi;
|
|
time_t last_seen;
|
|
time(&last_seen);
|
|
pe_local.last_seen = static_cast<int64_t>(last_seen);
|
|
m_peerlist.append_with_peer_white(pe_local);
|
|
//update last seen and push it to peerlist manager
|
|
|
|
anchor_peerlist_entry ape = AUTO_VAL_INIT(ape);
|
|
ape.adr = na;
|
|
ape.id = pi;
|
|
ape.first_seen = first_seen_stamp ? first_seen_stamp : time(nullptr);
|
|
|
|
m_peerlist.append_with_peer_anchor(ape);
|
|
|
|
LOG_DEBUG_CC(con, "CONNECTION HANDSHAKED OK.");
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::check_connection_and_handshake_with_peer(const epee::net_utils::network_address& na, uint64_t last_seen_stamp)
|
|
{
|
|
LOG_PRINT_L1("Connecting to " << na.str() << "(last_seen: "
|
|
<< (last_seen_stamp ? epee::misc_utils::get_time_interval_string(time(NULL) - last_seen_stamp):"never")
|
|
<< ")...");
|
|
|
|
CHECK_AND_ASSERT_MES(na.get_type_id() == epee::net_utils::ipv4_network_address::ID, false,
|
|
"Only IPv4 addresses are supported here");
|
|
const epee::net_utils::ipv4_network_address &ipv4 = na.as<epee::net_utils::ipv4_network_address>();
|
|
|
|
typename net_server::t_connection_context con = AUTO_VAL_INIT(con);
|
|
bool res = m_net_server.connect(epee::string_tools::get_ip_string_from_int32(ipv4.ip()),
|
|
epee::string_tools::num_to_string_fast(ipv4.port()),
|
|
m_config.m_net_config.connection_timeout,
|
|
con);
|
|
|
|
if (!res) {
|
|
bool is_priority = is_priority_node(na);
|
|
|
|
LOG_PRINT_CC_PRIORITY_NODE(is_priority, con, "Connect failed to " << na.str());
|
|
|
|
return false;
|
|
}
|
|
|
|
peerid_type pi = AUTO_VAL_INIT(pi);
|
|
res = do_handshake_with_peer(pi, con, true);
|
|
|
|
if (!res) {
|
|
bool is_priority = is_priority_node(na);
|
|
|
|
LOG_PRINT_CC_PRIORITY_NODE(is_priority, con, "Failed to HANDSHAKE with peer " << na.str());
|
|
|
|
return false;
|
|
}
|
|
|
|
m_net_server.get_config_object().close(con.m_connection_id);
|
|
|
|
LOG_DEBUG_CC(con, "CONNECTION HANDSHAKED OK AND CLOSED.");
|
|
|
|
return true;
|
|
}
|
|
|
|
#undef LOG_PRINT_CC_PRIORITY_NODE
|
|
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::is_addr_recently_failed(const epee::net_utils::network_address& addr)
|
|
{
|
|
CRITICAL_REGION_LOCAL(m_conn_fails_cache_lock);
|
|
auto it = m_conn_fails_cache.find(addr);
|
|
if(it == m_conn_fails_cache.end())
|
|
return false;
|
|
|
|
if(time(NULL) - it->second > P2P_FAILED_ADDR_FORGET_SECONDS)
|
|
return false;
|
|
else
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::make_new_connection_from_anchor_peerlist(const std::vector<anchor_peerlist_entry>& anchor_peerlist)
|
|
{
|
|
for (const auto& pe: anchor_peerlist) {
|
|
_note("Considering connecting (out) to peer: " << peerid_type(pe.id) << " " << pe.adr.str());
|
|
|
|
if(is_peer_used(pe)) {
|
|
_note("Peer is used");
|
|
continue;
|
|
}
|
|
|
|
if(!is_remote_host_allowed(pe.adr)) {
|
|
continue;
|
|
}
|
|
|
|
if(is_addr_recently_failed(pe.adr)) {
|
|
continue;
|
|
}
|
|
|
|
MDEBUG("Selected peer: " << peerid_to_string(pe.id) << " " << pe.adr.str()
|
|
<< "[peer_type=" << anchor
|
|
<< "] first_seen: " << epee::misc_utils::get_time_interval_string(time(NULL) - pe.first_seen));
|
|
|
|
if(!try_to_connect_and_handshake_with_new_peer(pe.adr, false, 0, anchor, pe.first_seen)) {
|
|
_note("Handshake failed");
|
|
continue;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::make_new_connection_from_peerlist(bool use_white_list)
|
|
{
|
|
size_t local_peers_count = use_white_list ? m_peerlist.get_white_peers_count():m_peerlist.get_gray_peers_count();
|
|
if(!local_peers_count)
|
|
return false;//no peers
|
|
|
|
size_t max_random_index = std::min<uint64_t>(local_peers_count -1, 20);
|
|
|
|
std::set<size_t> tried_peers;
|
|
|
|
size_t try_count = 0;
|
|
size_t rand_count = 0;
|
|
while(rand_count < (max_random_index+1)*3 && try_count < 10 && !m_net_server.is_stop_signal_sent())
|
|
{
|
|
++rand_count;
|
|
size_t random_index;
|
|
|
|
if (use_white_list) {
|
|
local_peers_count = m_peerlist.get_white_peers_count();
|
|
if (!local_peers_count)
|
|
return false;
|
|
max_random_index = std::min<uint64_t>(local_peers_count -1, 20);
|
|
random_index = get_random_index_with_fixed_probability(max_random_index);
|
|
} else {
|
|
local_peers_count = m_peerlist.get_gray_peers_count();
|
|
if (!local_peers_count)
|
|
return false;
|
|
random_index = crypto::rand<size_t>() % local_peers_count;
|
|
}
|
|
|
|
CHECK_AND_ASSERT_MES(random_index < local_peers_count, false, "random_starter_index < peers_local.size() failed!!");
|
|
|
|
if(tried_peers.count(random_index))
|
|
continue;
|
|
|
|
tried_peers.insert(random_index);
|
|
peerlist_entry pe = AUTO_VAL_INIT(pe);
|
|
bool r = use_white_list ? m_peerlist.get_white_peer_by_index(pe, random_index):m_peerlist.get_gray_peer_by_index(pe, random_index);
|
|
CHECK_AND_ASSERT_MES(r, false, "Failed to get random peer from peerlist(white:" << use_white_list << ")");
|
|
|
|
++try_count;
|
|
|
|
_note("Considering connecting (out) to peer: " << peerid_to_string(pe.id) << " " << pe.adr.str());
|
|
|
|
if(is_peer_used(pe)) {
|
|
_note("Peer is used");
|
|
continue;
|
|
}
|
|
|
|
if(!is_remote_host_allowed(pe.adr))
|
|
continue;
|
|
|
|
if(is_addr_recently_failed(pe.adr))
|
|
continue;
|
|
|
|
MDEBUG("Selected peer: " << peerid_to_string(pe.id) << " " << pe.adr.str()
|
|
<< "[peer_list=" << (use_white_list ? white : gray)
|
|
<< "] last_seen: " << (pe.last_seen ? epee::misc_utils::get_time_interval_string(time(NULL) - pe.last_seen) : "never"));
|
|
|
|
if(!try_to_connect_and_handshake_with_new_peer(pe.adr, false, pe.last_seen, use_white_list ? white : gray)) {
|
|
_note("Handshake failed");
|
|
continue;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::connect_to_seed()
|
|
{
|
|
if (m_seed_nodes.empty() || m_offline || !m_exclusive_peers.empty())
|
|
return true;
|
|
|
|
size_t try_count = 0;
|
|
size_t current_index = crypto::rand<size_t>()%m_seed_nodes.size();
|
|
while(true)
|
|
{
|
|
if(m_net_server.is_stop_signal_sent())
|
|
return false;
|
|
|
|
if(try_to_connect_and_handshake_with_new_peer(m_seed_nodes[current_index], true))
|
|
break;
|
|
if(++try_count > m_seed_nodes.size())
|
|
{
|
|
if (!m_fallback_seed_nodes_added)
|
|
{
|
|
MWARNING("Failed to connect to any of seed peers, trying fallback seeds");
|
|
current_index = m_seed_nodes.size();
|
|
for (const auto &peer: get_seed_nodes(m_nettype))
|
|
{
|
|
MDEBUG("Fallback seed node: " << peer);
|
|
append_net_address(m_seed_nodes, peer);
|
|
}
|
|
m_fallback_seed_nodes_added = true;
|
|
if (current_index == m_seed_nodes.size())
|
|
{
|
|
MWARNING("No fallback seeds, continuing without seeds");
|
|
break;
|
|
}
|
|
// continue for another few cycles
|
|
}
|
|
else
|
|
{
|
|
MWARNING("Failed to connect to any of seed peers, continuing without seeds");
|
|
break;
|
|
}
|
|
}
|
|
if(++current_index >= m_seed_nodes.size())
|
|
current_index = 0;
|
|
}
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::connections_maker()
|
|
{
|
|
if (m_offline) return true;
|
|
if (!connect_to_peerlist(m_exclusive_peers)) return false;
|
|
|
|
if (!m_exclusive_peers.empty()) return true;
|
|
|
|
size_t start_conn_count = get_outgoing_connections_count();
|
|
if(!m_peerlist.get_white_peers_count() && m_seed_nodes.size())
|
|
{
|
|
if (!connect_to_seed())
|
|
return false;
|
|
}
|
|
|
|
if (!connect_to_peerlist(m_priority_peers)) return false;
|
|
|
|
size_t expected_white_connections = (m_config.m_net_config.max_out_connection_count*P2P_DEFAULT_WHITELIST_CONNECTIONS_PERCENT)/100;
|
|
|
|
size_t conn_count = get_outgoing_connections_count();
|
|
if(conn_count < m_config.m_net_config.max_out_connection_count)
|
|
{
|
|
if(conn_count < expected_white_connections)
|
|
{
|
|
//start from anchor list
|
|
if(!make_expected_connections_count(anchor, P2P_DEFAULT_ANCHOR_CONNECTIONS_COUNT))
|
|
return false;
|
|
//then do white list
|
|
if(!make_expected_connections_count(white, expected_white_connections))
|
|
return false;
|
|
//then do grey list
|
|
if(!make_expected_connections_count(gray, m_config.m_net_config.max_out_connection_count))
|
|
return false;
|
|
}else
|
|
{
|
|
//start from grey list
|
|
if(!make_expected_connections_count(gray, m_config.m_net_config.max_out_connection_count))
|
|
return false;
|
|
//and then do white list
|
|
if(!make_expected_connections_count(white, m_config.m_net_config.max_out_connection_count))
|
|
return false;
|
|
}
|
|
}
|
|
|
|
if (start_conn_count == get_outgoing_connections_count() && start_conn_count < m_config.m_net_config.max_out_connection_count)
|
|
{
|
|
MINFO("Failed to connect to any, trying seeds");
|
|
if (!connect_to_seed())
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::make_expected_connections_count(PeerType peer_type, size_t expected_connections)
|
|
{
|
|
if (m_offline)
|
|
return true;
|
|
|
|
std::vector<anchor_peerlist_entry> apl;
|
|
|
|
if (peer_type == anchor) {
|
|
m_peerlist.get_and_empty_anchor_peerlist(apl);
|
|
}
|
|
|
|
size_t conn_count = get_outgoing_connections_count();
|
|
//add new connections from white peers
|
|
while(conn_count < expected_connections)
|
|
{
|
|
if(m_net_server.is_stop_signal_sent())
|
|
return false;
|
|
|
|
if (peer_type == anchor && !make_new_connection_from_anchor_peerlist(apl)) {
|
|
break;
|
|
}
|
|
|
|
if (peer_type == white && !make_new_connection_from_peerlist(true)) {
|
|
break;
|
|
}
|
|
|
|
if (peer_type == gray && !make_new_connection_from_peerlist(false)) {
|
|
break;
|
|
}
|
|
|
|
conn_count = get_outgoing_connections_count();
|
|
}
|
|
return true;
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
size_t node_server<t_payload_net_handler>::get_outgoing_connections_count()
|
|
{
|
|
size_t count = 0;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if(!cntxt.m_is_income)
|
|
++count;
|
|
return true;
|
|
});
|
|
|
|
return count;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
size_t node_server<t_payload_net_handler>::get_incoming_connections_count()
|
|
{
|
|
size_t count = 0;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if(cntxt.m_is_income)
|
|
++count;
|
|
return true;
|
|
});
|
|
|
|
return count;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::idle_worker()
|
|
{
|
|
m_peer_handshake_idle_maker_interval.do_call(boost::bind(&node_server<t_payload_net_handler>::peer_sync_idle_maker, this));
|
|
m_connections_maker_interval.do_call(boost::bind(&node_server<t_payload_net_handler>::connections_maker, this));
|
|
m_gray_peerlist_housekeeping_interval.do_call(boost::bind(&node_server<t_payload_net_handler>::gray_peerlist_housekeeping, this));
|
|
m_peerlist_store_interval.do_call(boost::bind(&node_server<t_payload_net_handler>::store_config, this));
|
|
m_incoming_connections_interval.do_call(boost::bind(&node_server<t_payload_net_handler>::check_incoming_connections, this));
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::check_incoming_connections()
|
|
{
|
|
if (m_offline || m_hide_my_port)
|
|
return true;
|
|
if (get_incoming_connections_count() == 0)
|
|
{
|
|
const el::Level level = el::Level::Warning;
|
|
MCLOG_RED(level, "global", "No incoming connections - check firewalls/routers allow port " << get_this_peer_port());
|
|
}
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::peer_sync_idle_maker()
|
|
{
|
|
MDEBUG("STARTED PEERLIST IDLE HANDSHAKE");
|
|
typedef std::list<std::pair<epee::net_utils::connection_context_base, peerid_type> > local_connects_type;
|
|
local_connects_type cncts;
|
|
m_net_server.get_config_object().foreach_connection([&](p2p_connection_context& cntxt)
|
|
{
|
|
if(cntxt.peer_id && !cntxt.m_in_timedsync)
|
|
{
|
|
cntxt.m_in_timedsync = true;
|
|
cncts.push_back(local_connects_type::value_type(cntxt, cntxt.peer_id));//do idle sync only with handshaked connections
|
|
}
|
|
return true;
|
|
});
|
|
|
|
std::for_each(cncts.begin(), cncts.end(), [&](const typename local_connects_type::value_type& vl){do_peer_timed_sync(vl.first, vl.second);});
|
|
|
|
MDEBUG("FINISHED PEERLIST IDLE HANDSHAKE");
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::fix_time_delta(std::list<peerlist_entry>& local_peerlist, time_t local_time, int64_t& delta)
|
|
{
|
|
//fix time delta
|
|
time_t now = 0;
|
|
time(&now);
|
|
delta = now - local_time;
|
|
|
|
for(peerlist_entry& be: local_peerlist)
|
|
{
|
|
if(be.last_seen > local_time)
|
|
{
|
|
MWARNING("FOUND FUTURE peerlist for entry " << be.adr.str() << " last_seen: " << be.last_seen << ", local_time(on remote node):" << local_time);
|
|
return false;
|
|
}
|
|
be.last_seen += delta;
|
|
}
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::handle_remote_peerlist(const std::list<peerlist_entry>& peerlist, time_t local_time, const epee::net_utils::connection_context_base& context)
|
|
{
|
|
int64_t delta = 0;
|
|
std::list<peerlist_entry> peerlist_ = peerlist;
|
|
if(!fix_time_delta(peerlist_, local_time, delta))
|
|
return false;
|
|
LOG_DEBUG_CC(context, "REMOTE PEERLIST: TIME_DELTA: " << delta << ", remote peerlist size=" << peerlist_.size());
|
|
LOG_DEBUG_CC(context, "REMOTE PEERLIST: " << print_peerlist_to_string(peerlist_));
|
|
return m_peerlist.merge_peerlist(peerlist_);
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::get_local_node_data(basic_node_data& node_data)
|
|
{
|
|
time_t local_time;
|
|
time(&local_time);
|
|
node_data.local_time = local_time;
|
|
node_data.peer_id = m_config.m_peer_id;
|
|
if(!m_hide_my_port)
|
|
node_data.my_port = m_external_port ? m_external_port : m_listening_port;
|
|
else
|
|
node_data.my_port = 0;
|
|
node_data.network_id = m_network_id;
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
#ifdef ALLOW_DEBUG_COMMANDS
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::check_trust(const proof_of_trust& tr)
|
|
{
|
|
uint64_t local_time = time(NULL);
|
|
uint64_t time_delata = local_time > tr.time ? local_time - tr.time: tr.time - local_time;
|
|
if(time_delata > 24*60*60 )
|
|
{
|
|
MWARNING("check_trust failed to check time conditions, local_time=" << local_time << ", proof_time=" << tr.time);
|
|
return false;
|
|
}
|
|
if(m_last_stat_request_time >= tr.time )
|
|
{
|
|
MWARNING("check_trust failed to check time conditions, last_stat_request_time=" << m_last_stat_request_time << ", proof_time=" << tr.time);
|
|
return false;
|
|
}
|
|
if(m_config.m_peer_id != tr.peer_id)
|
|
{
|
|
MWARNING("check_trust failed: peer_id mismatch (passed " << tr.peer_id << ", expected " << m_config.m_peer_id<< ")");
|
|
return false;
|
|
}
|
|
crypto::public_key pk = AUTO_VAL_INIT(pk);
|
|
epee::string_tools::hex_to_pod(::config::P2P_REMOTE_DEBUG_TRUSTED_PUB_KEY, pk);
|
|
crypto::hash h = get_proof_of_trust_hash(tr);
|
|
if(!crypto::check_signature(h, pk, tr.sign))
|
|
{
|
|
MWARNING("check_trust failed: sign check failed");
|
|
return false;
|
|
}
|
|
//update last request time
|
|
m_last_stat_request_time = tr.time;
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_get_stat_info(int command, typename COMMAND_REQUEST_STAT_INFO::request& arg, typename COMMAND_REQUEST_STAT_INFO::response& rsp, p2p_connection_context& context)
|
|
{
|
|
if(!check_trust(arg.tr))
|
|
{
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
rsp.connections_count = m_net_server.get_config_object().get_connections_count();
|
|
rsp.incoming_connections_count = rsp.connections_count - get_outgoing_connections_count();
|
|
rsp.version = MONERO_VERSION_FULL;
|
|
rsp.os_version = tools::get_os_version_string();
|
|
m_payload_handler.get_stat_info(rsp.payload_info);
|
|
return 1;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_get_network_state(int command, COMMAND_REQUEST_NETWORK_STATE::request& arg, COMMAND_REQUEST_NETWORK_STATE::response& rsp, p2p_connection_context& context)
|
|
{
|
|
if(!check_trust(arg.tr))
|
|
{
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
connection_entry ce;
|
|
ce.adr = cntxt.m_remote_address;
|
|
ce.id = cntxt.peer_id;
|
|
ce.is_income = cntxt.m_is_income;
|
|
rsp.connections_list.push_back(ce);
|
|
return true;
|
|
});
|
|
|
|
m_peerlist.get_peerlist_full(rsp.local_peerlist_gray, rsp.local_peerlist_white);
|
|
rsp.my_id = m_config.m_peer_id;
|
|
rsp.local_time = time(NULL);
|
|
return 1;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_get_peer_id(int command, COMMAND_REQUEST_PEER_ID::request& arg, COMMAND_REQUEST_PEER_ID::response& rsp, p2p_connection_context& context)
|
|
{
|
|
rsp.my_id = m_config.m_peer_id;
|
|
return 1;
|
|
}
|
|
#endif
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_get_support_flags(int command, COMMAND_REQUEST_SUPPORT_FLAGS::request& arg, COMMAND_REQUEST_SUPPORT_FLAGS::response& rsp, p2p_connection_context& context)
|
|
{
|
|
rsp.support_flags = m_config.m_support_flags;
|
|
return 1;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::request_callback(const epee::net_utils::connection_context_base& context)
|
|
{
|
|
m_net_server.get_config_object().request_callback(context.m_connection_id);
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::relay_notify_to_list(int command, const std::string& data_buff, const std::list<boost::uuids::uuid> &connections)
|
|
{
|
|
for(const auto& c_id: connections)
|
|
{
|
|
m_net_server.get_config_object().notify(command, data_buff, c_id);
|
|
}
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::relay_notify_to_all(int command, const std::string& data_buff, const epee::net_utils::connection_context_base& context)
|
|
{
|
|
std::list<boost::uuids::uuid> connections;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if(cntxt.peer_id && context.m_connection_id != cntxt.m_connection_id)
|
|
connections.push_back(cntxt.m_connection_id);
|
|
return true;
|
|
});
|
|
return relay_notify_to_list(command, data_buff, connections);
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::callback(p2p_connection_context& context)
|
|
{
|
|
m_payload_handler.on_callback(context);
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::invoke_notify_to_peer(int command, const std::string& req_buff, const epee::net_utils::connection_context_base& context)
|
|
{
|
|
int res = m_net_server.get_config_object().notify(command, req_buff, context.m_connection_id);
|
|
return res > 0;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::invoke_command_to_peer(int command, const std::string& req_buff, std::string& resp_buff, const epee::net_utils::connection_context_base& context)
|
|
{
|
|
int res = m_net_server.get_config_object().invoke(command, req_buff, resp_buff, context.m_connection_id);
|
|
return res > 0;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::drop_connection(const epee::net_utils::connection_context_base& context)
|
|
{
|
|
m_net_server.get_config_object().close(context.m_connection_id);
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler> template<class t_callback>
|
|
bool node_server<t_payload_net_handler>::try_ping(basic_node_data& node_data, p2p_connection_context& context, const t_callback &cb)
|
|
{
|
|
if(!node_data.my_port)
|
|
return false;
|
|
|
|
CHECK_AND_ASSERT_MES(context.m_remote_address.get_type_id() == epee::net_utils::ipv4_network_address::ID, false,
|
|
"Only IPv4 addresses are supported here");
|
|
|
|
const epee::net_utils::network_address na = context.m_remote_address;
|
|
uint32_t actual_ip = na.as<const epee::net_utils::ipv4_network_address>().ip();
|
|
if(!m_peerlist.is_host_allowed(context.m_remote_address))
|
|
return false;
|
|
std::string ip = epee::string_tools::get_ip_string_from_int32(actual_ip);
|
|
std::string port = epee::string_tools::num_to_string_fast(node_data.my_port);
|
|
epee::net_utils::network_address address{epee::net_utils::ipv4_network_address(actual_ip, node_data.my_port)};
|
|
peerid_type pr = node_data.peer_id;
|
|
bool r = m_net_server.connect_async(ip, port, m_config.m_net_config.ping_connection_timeout, [cb, /*context,*/ address, pr, this](
|
|
const typename net_server::t_connection_context& ping_context,
|
|
const boost::system::error_code& ec)->bool
|
|
{
|
|
if(ec)
|
|
{
|
|
LOG_WARNING_CC(ping_context, "back ping connect failed to " << address.str());
|
|
return false;
|
|
}
|
|
COMMAND_PING::request req;
|
|
COMMAND_PING::response rsp;
|
|
//vc2010 workaround
|
|
/*std::string ip_ = ip;
|
|
std::string port_=port;
|
|
peerid_type pr_ = pr;
|
|
auto cb_ = cb;*/
|
|
|
|
// GCC 5.1.0 gives error with second use of uint64_t (peerid_type) variable.
|
|
peerid_type pr_ = pr;
|
|
|
|
bool inv_call_res = epee::net_utils::async_invoke_remote_command2<COMMAND_PING::response>(ping_context.m_connection_id, COMMAND_PING::ID, req, m_net_server.get_config_object(),
|
|
[=](int code, const COMMAND_PING::response& rsp, p2p_connection_context& context)
|
|
{
|
|
if(code <= 0)
|
|
{
|
|
LOG_WARNING_CC(ping_context, "Failed to invoke COMMAND_PING to " << address.str() << "(" << code << ", " << epee::levin::get_err_descr(code) << ")");
|
|
return;
|
|
}
|
|
|
|
if(rsp.status != PING_OK_RESPONSE_STATUS_TEXT || pr != rsp.peer_id)
|
|
{
|
|
LOG_WARNING_CC(ping_context, "back ping invoke wrong response \"" << rsp.status << "\" from" << address.str() << ", hsh_peer_id=" << pr_ << ", rsp.peer_id=" << rsp.peer_id);
|
|
m_net_server.get_config_object().close(ping_context.m_connection_id);
|
|
return;
|
|
}
|
|
m_net_server.get_config_object().close(ping_context.m_connection_id);
|
|
cb();
|
|
});
|
|
|
|
if(!inv_call_res)
|
|
{
|
|
LOG_WARNING_CC(ping_context, "back ping invoke failed to " << address.str());
|
|
m_net_server.get_config_object().close(ping_context.m_connection_id);
|
|
return false;
|
|
}
|
|
return true;
|
|
});
|
|
if(!r)
|
|
{
|
|
LOG_WARNING_CC(context, "Failed to call connect_async, network error.");
|
|
}
|
|
return r;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::try_get_support_flags(const p2p_connection_context& context, std::function<void(p2p_connection_context&, const uint32_t&)> f)
|
|
{
|
|
COMMAND_REQUEST_SUPPORT_FLAGS::request support_flags_request;
|
|
bool r = epee::net_utils::async_invoke_remote_command2<typename COMMAND_REQUEST_SUPPORT_FLAGS::response>
|
|
(
|
|
context.m_connection_id,
|
|
COMMAND_REQUEST_SUPPORT_FLAGS::ID,
|
|
support_flags_request,
|
|
m_net_server.get_config_object(),
|
|
[=](int code, const typename COMMAND_REQUEST_SUPPORT_FLAGS::response& rsp, p2p_connection_context& context_)
|
|
{
|
|
if(code < 0)
|
|
{
|
|
LOG_WARNING_CC(context_, "COMMAND_REQUEST_SUPPORT_FLAGS invoke failed. (" << code << ", " << epee::levin::get_err_descr(code) << ")");
|
|
return;
|
|
}
|
|
|
|
f(context_, rsp.support_flags);
|
|
},
|
|
P2P_DEFAULT_HANDSHAKE_INVOKE_TIMEOUT
|
|
);
|
|
|
|
return r;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_timed_sync(int command, typename COMMAND_TIMED_SYNC::request& arg, typename COMMAND_TIMED_SYNC::response& rsp, p2p_connection_context& context)
|
|
{
|
|
if(!m_payload_handler.process_payload_sync_data(arg.payload_data, context, false))
|
|
{
|
|
LOG_WARNING_CC(context, "Failed to process_payload_sync_data(), dropping connection");
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
|
|
//fill response
|
|
rsp.local_time = time(NULL);
|
|
m_peerlist.get_peerlist_head(rsp.local_peerlist_new);
|
|
m_payload_handler.get_payload_sync_data(rsp.payload_data);
|
|
LOG_DEBUG_CC(context, "COMMAND_TIMED_SYNC");
|
|
return 1;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_handshake(int command, typename COMMAND_HANDSHAKE::request& arg, typename COMMAND_HANDSHAKE::response& rsp, p2p_connection_context& context)
|
|
{
|
|
if(arg.node_data.network_id != m_network_id)
|
|
{
|
|
|
|
LOG_INFO_CC(context, "WRONG NETWORK AGENT CONNECTED! id=" << epee::string_tools::get_str_from_guid_a(arg.node_data.network_id));
|
|
drop_connection(context);
|
|
add_host_fail(context.m_remote_address);
|
|
return 1;
|
|
}
|
|
|
|
if(!context.m_is_income)
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE came not from incoming connection");
|
|
drop_connection(context);
|
|
add_host_fail(context.m_remote_address);
|
|
return 1;
|
|
}
|
|
|
|
if(context.peer_id)
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE came, but seems that connection already have associated peer_id (double COMMAND_HANDSHAKE?)");
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
|
|
if (m_current_number_of_in_peers >= m_config.m_net_config.max_in_connection_count) // in peers limit
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE came, but already have max incoming connections, so dropping this one.");
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
|
|
if(!m_payload_handler.process_payload_sync_data(arg.payload_data, context, true))
|
|
{
|
|
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE came, but process_payload_sync_data returned false, dropping connection.");
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
|
|
if(has_too_many_connections(context.m_remote_address))
|
|
{
|
|
LOG_PRINT_CCONTEXT_L1("CONNECTION FROM " << context.m_remote_address.host_str() << " REFUSED, too many connections from the same address");
|
|
drop_connection(context);
|
|
return 1;
|
|
}
|
|
|
|
//associate peer_id with this connection
|
|
context.peer_id = arg.node_data.peer_id;
|
|
context.m_in_timedsync = false;
|
|
|
|
if(arg.node_data.peer_id != m_config.m_peer_id && arg.node_data.my_port)
|
|
{
|
|
peerid_type peer_id_l = arg.node_data.peer_id;
|
|
uint32_t port_l = arg.node_data.my_port;
|
|
//try ping to be sure that we can add this peer to peer_list
|
|
try_ping(arg.node_data, context, [peer_id_l, port_l, context, this]()
|
|
{
|
|
CHECK_AND_ASSERT_MES(context.m_remote_address.get_type_id() == epee::net_utils::ipv4_network_address::ID, void(),
|
|
"Only IPv4 addresses are supported here");
|
|
//called only(!) if success pinged, update local peerlist
|
|
peerlist_entry pe;
|
|
const epee::net_utils::network_address na = context.m_remote_address;
|
|
pe.adr = epee::net_utils::ipv4_network_address(na.as<epee::net_utils::ipv4_network_address>().ip(), port_l);
|
|
time_t last_seen;
|
|
time(&last_seen);
|
|
pe.last_seen = static_cast<int64_t>(last_seen);
|
|
pe.id = peer_id_l;
|
|
this->m_peerlist.append_with_peer_white(pe);
|
|
LOG_DEBUG_CC(context, "PING SUCCESS " << context.m_remote_address.host_str() << ":" << port_l);
|
|
});
|
|
}
|
|
|
|
try_get_support_flags(context, [](p2p_connection_context& flags_context, const uint32_t& support_flags)
|
|
{
|
|
flags_context.support_flags = support_flags;
|
|
});
|
|
|
|
//fill response
|
|
m_peerlist.get_peerlist_head(rsp.local_peerlist_new);
|
|
get_local_node_data(rsp.node_data);
|
|
m_payload_handler.get_payload_sync_data(rsp.payload_data);
|
|
LOG_DEBUG_CC(context, "COMMAND_HANDSHAKE");
|
|
return 1;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
int node_server<t_payload_net_handler>::handle_ping(int command, COMMAND_PING::request& arg, COMMAND_PING::response& rsp, p2p_connection_context& context)
|
|
{
|
|
LOG_DEBUG_CC(context, "COMMAND_PING");
|
|
rsp.status = PING_OK_RESPONSE_STATUS_TEXT;
|
|
rsp.peer_id = m_config.m_peer_id;
|
|
return 1;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::log_peerlist()
|
|
{
|
|
std::list<peerlist_entry> pl_white;
|
|
std::list<peerlist_entry> pl_gray;
|
|
m_peerlist.get_peerlist_full(pl_gray, pl_white);
|
|
MINFO(ENDL << "Peerlist white:" << ENDL << print_peerlist_to_string(pl_white) << ENDL << "Peerlist gray:" << ENDL << print_peerlist_to_string(pl_gray) );
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::log_connections()
|
|
{
|
|
MINFO("Connections: \r\n" << print_connections_container() );
|
|
return true;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
std::string node_server<t_payload_net_handler>::print_connections_container()
|
|
{
|
|
|
|
std::stringstream ss;
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
ss << cntxt.m_remote_address.str()
|
|
<< " \t\tpeer_id " << cntxt.peer_id
|
|
<< " \t\tconn_id " << epee::string_tools::get_str_from_guid_a(cntxt.m_connection_id) << (cntxt.m_is_income ? " INC":" OUT")
|
|
<< std::endl;
|
|
return true;
|
|
});
|
|
std::string s = ss.str();
|
|
return s;
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::on_connection_new(p2p_connection_context& context)
|
|
{
|
|
MINFO("["<< epee::net_utils::print_connection_context(context) << "] NEW CONNECTION");
|
|
}
|
|
//-----------------------------------------------------------------------------------
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::on_connection_close(p2p_connection_context& context)
|
|
{
|
|
if (!m_net_server.is_stop_signal_sent() && !context.m_is_income) {
|
|
epee::net_utils::network_address na = AUTO_VAL_INIT(na);
|
|
na = context.m_remote_address;
|
|
|
|
m_peerlist.remove_from_peer_anchor(na);
|
|
}
|
|
|
|
m_payload_handler.on_connection_close(context);
|
|
|
|
MINFO("["<< epee::net_utils::print_connection_context(context) << "] CLOSE CONNECTION");
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::is_priority_node(const epee::net_utils::network_address& na)
|
|
{
|
|
return (std::find(m_priority_peers.begin(), m_priority_peers.end(), na) != m_priority_peers.end()) || (std::find(m_exclusive_peers.begin(), m_exclusive_peers.end(), na) != m_exclusive_peers.end());
|
|
}
|
|
|
|
template<class t_payload_net_handler> template <class Container>
|
|
bool node_server<t_payload_net_handler>::connect_to_peerlist(const Container& peers)
|
|
{
|
|
for(const epee::net_utils::network_address& na: peers)
|
|
{
|
|
if(m_net_server.is_stop_signal_sent())
|
|
return false;
|
|
|
|
if(is_addr_connected(na))
|
|
continue;
|
|
|
|
try_to_connect_and_handshake_with_new_peer(na);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler> template <class Container>
|
|
bool node_server<t_payload_net_handler>::parse_peers_and_add_to_container(const boost::program_options::variables_map& vm, const command_line::arg_descriptor<std::vector<std::string> > & arg, Container& container)
|
|
{
|
|
std::vector<std::string> perrs = command_line::get_arg(vm, arg);
|
|
|
|
for(const std::string& pr_str: perrs)
|
|
{
|
|
epee::net_utils::network_address na = AUTO_VAL_INIT(na);
|
|
const uint16_t default_port = m_nettype == cryptonote::TESTNET ? ::config::testnet::P2P_DEFAULT_PORT : m_nettype == cryptonote::STAGENET ? ::config::stagenet::P2P_DEFAULT_PORT : ::config::P2P_DEFAULT_PORT;
|
|
bool r = parse_peer_from_string(na, pr_str, default_port);
|
|
CHECK_AND_ASSERT_MES(r, false, "Failed to parse address from string: " << pr_str);
|
|
container.push_back(na);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::set_max_out_peers(const boost::program_options::variables_map& vm, int64_t max)
|
|
{
|
|
if(max == -1) {
|
|
m_config.m_net_config.max_out_connection_count = P2P_DEFAULT_CONNECTIONS_COUNT;
|
|
return true;
|
|
}
|
|
m_config.m_net_config.max_out_connection_count = max;
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::set_max_in_peers(const boost::program_options::variables_map& vm, int64_t max)
|
|
{
|
|
if(max == -1) {
|
|
m_config.m_net_config.max_in_connection_count = -1;
|
|
return true;
|
|
}
|
|
m_config.m_net_config.max_in_connection_count = max;
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::delete_out_connections(size_t count)
|
|
{
|
|
m_net_server.get_config_object().del_out_connections(count);
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::delete_in_connections(size_t count)
|
|
{
|
|
m_net_server.get_config_object().del_in_connections(count);
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::set_tos_flag(const boost::program_options::variables_map& vm, int flag)
|
|
{
|
|
if(flag==-1){
|
|
return true;
|
|
}
|
|
epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_tos_flag(flag);
|
|
_dbg1("Set ToS flag " << flag);
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit)
|
|
{
|
|
this->islimitup=true;
|
|
|
|
if (limit==-1) {
|
|
limit=default_limit_up;
|
|
this->islimitup=false;
|
|
}
|
|
|
|
epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit( limit );
|
|
MINFO("Set limit-up to " << limit << " kB/s");
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit)
|
|
{
|
|
this->islimitdown=true;
|
|
if(limit==-1) {
|
|
limit=default_limit_down;
|
|
this->islimitdown=false;
|
|
}
|
|
epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit( limit );
|
|
MINFO("Set limit-down to " << limit << " kB/s");
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit)
|
|
{
|
|
int64_t limit_up = 0;
|
|
int64_t limit_down = 0;
|
|
|
|
if(limit == -1)
|
|
{
|
|
limit_up = default_limit_up;
|
|
limit_down = default_limit_down;
|
|
}
|
|
else
|
|
{
|
|
limit_up = limit;
|
|
limit_down = limit;
|
|
}
|
|
if(!this->islimitup) {
|
|
epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_up_limit(limit_up);
|
|
MINFO("Set limit-up to " << limit_up << " kB/s");
|
|
}
|
|
if(!this->islimitdown) {
|
|
epee::net_utils::connection<epee::levin::async_protocol_handler<p2p_connection_context> >::set_rate_down_limit(limit_down);
|
|
MINFO("Set limit-down to " << limit_down << " kB/s");
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::has_too_many_connections(const epee::net_utils::network_address &address)
|
|
{
|
|
const size_t max_connections = 1;
|
|
size_t count = 0;
|
|
|
|
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
|
{
|
|
if (cntxt.m_is_income && cntxt.m_remote_address.is_same_host(address)) {
|
|
count++;
|
|
|
|
if (count > max_connections) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
});
|
|
|
|
return count > max_connections;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
bool node_server<t_payload_net_handler>::gray_peerlist_housekeeping()
|
|
{
|
|
if (m_offline) return true;
|
|
if (!m_exclusive_peers.empty()) return true;
|
|
|
|
peerlist_entry pe = AUTO_VAL_INIT(pe);
|
|
|
|
if (m_net_server.is_stop_signal_sent())
|
|
return false;
|
|
|
|
if (!m_peerlist.get_random_gray_peer(pe)) {
|
|
return false;
|
|
}
|
|
|
|
bool success = check_connection_and_handshake_with_peer(pe.adr, pe.last_seen);
|
|
|
|
if (!success) {
|
|
m_peerlist.remove_from_peer_gray(pe);
|
|
|
|
LOG_PRINT_L2("PEER EVICTED FROM GRAY PEER LIST IP address: " << pe.adr.host_str() << " Peer ID: " << peerid_type(pe.id));
|
|
|
|
return true;
|
|
}
|
|
|
|
m_peerlist.set_peer_just_seen(pe.id, pe.adr);
|
|
|
|
LOG_PRINT_L2("PEER PROMOTED TO WHITE PEER LIST IP address: " << pe.adr.host_str() << " Peer ID: " << peerid_type(pe.id));
|
|
|
|
return true;
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::add_upnp_port_mapping(uint32_t port)
|
|
{
|
|
MDEBUG("Attempting to add IGD port mapping.");
|
|
int result;
|
|
#if MINIUPNPC_API_VERSION > 13
|
|
// default according to miniupnpc.h
|
|
unsigned char ttl = 2;
|
|
UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, ttl, &result);
|
|
#else
|
|
UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, &result);
|
|
#endif
|
|
UPNPUrls urls;
|
|
IGDdatas igdData;
|
|
char lanAddress[64];
|
|
result = UPNP_GetValidIGD(deviceList, &urls, &igdData, lanAddress, sizeof lanAddress);
|
|
freeUPNPDevlist(deviceList);
|
|
if (result != 0) {
|
|
if (result == 1) {
|
|
std::ostringstream portString;
|
|
portString << port;
|
|
|
|
// Delete the port mapping before we create it, just in case we have dangling port mapping from the daemon not being shut down correctly
|
|
UPNP_DeletePortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), "TCP", 0);
|
|
|
|
int portMappingResult;
|
|
portMappingResult = UPNP_AddPortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), portString.str().c_str(), lanAddress, CRYPTONOTE_NAME, "TCP", 0, "0");
|
|
if (portMappingResult != 0) {
|
|
LOG_ERROR("UPNP_AddPortMapping failed, error: " << strupnperror(portMappingResult));
|
|
} else {
|
|
MLOG_GREEN(el::Level::Info, "Added IGD port mapping.");
|
|
}
|
|
} else if (result == 2) {
|
|
MWARNING("IGD was found but reported as not connected.");
|
|
} else if (result == 3) {
|
|
MWARNING("UPnP device was found but not recognized as IGD.");
|
|
} else {
|
|
MWARNING("UPNP_GetValidIGD returned an unknown result code.");
|
|
}
|
|
|
|
FreeUPNPUrls(&urls);
|
|
} else {
|
|
MINFO("No IGD was found.");
|
|
}
|
|
}
|
|
|
|
template<class t_payload_net_handler>
|
|
void node_server<t_payload_net_handler>::delete_upnp_port_mapping(uint32_t port)
|
|
{
|
|
MDEBUG("Attempting to delete IGD port mapping.");
|
|
int result;
|
|
#if MINIUPNPC_API_VERSION > 13
|
|
// default according to miniupnpc.h
|
|
unsigned char ttl = 2;
|
|
UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, ttl, &result);
|
|
#else
|
|
UPNPDev* deviceList = upnpDiscover(1000, NULL, NULL, 0, 0, &result);
|
|
#endif
|
|
UPNPUrls urls;
|
|
IGDdatas igdData;
|
|
char lanAddress[64];
|
|
result = UPNP_GetValidIGD(deviceList, &urls, &igdData, lanAddress, sizeof lanAddress);
|
|
freeUPNPDevlist(deviceList);
|
|
if (result != 0) {
|
|
if (result == 1) {
|
|
std::ostringstream portString;
|
|
portString << port;
|
|
|
|
int portMappingResult;
|
|
portMappingResult = UPNP_DeletePortMapping(urls.controlURL, igdData.first.servicetype, portString.str().c_str(), "TCP", 0);
|
|
if (portMappingResult != 0) {
|
|
LOG_ERROR("UPNP_DeletePortMapping failed, error: " << strupnperror(portMappingResult));
|
|
} else {
|
|
MLOG_GREEN(el::Level::Info, "Deleted IGD port mapping.");
|
|
}
|
|
} else if (result == 2) {
|
|
MWARNING("IGD was found but reported as not connected.");
|
|
} else if (result == 3) {
|
|
MWARNING("UPnP device was found but not recognized as IGD.");
|
|
} else {
|
|
MWARNING("UPNP_GetValidIGD returned an unknown result code.");
|
|
}
|
|
|
|
FreeUPNPUrls(&urls);
|
|
} else {
|
|
MINFO("No IGD was found.");
|
|
}
|
|
}
|
|
}
|