Merge mining RPC: added merge_mining_get_job

This commit is contained in:
SChernykh 2023-11-05 17:59:32 +01:00
parent 75bb046f22
commit 40b2c2a858
4 changed files with 163 additions and 20 deletions

View file

@ -55,28 +55,34 @@ Field|Description
-|-
`chain_id`|A unique 32-byte hex-encoded value that identifies this merge mined chain.
Example response 1: `{"jsonrpc":"2.0","id":"0","result":{"chain_id":"f89175d2ce8ce92eaa062eea5c433d0d70f89f5e1554c066dc27943e8cfc37b0"}}`
Example response 1: `{"jsonrpc":"2.0","id":"0","result":{"chain_id":"0f28c4960d96647e77e7ab6d13b85bd16c7ca56f45df802cdc763a5e5c0c7863"}}`
Example response 2: `{"jsonrpc":"2.0","id":"0","error":"something went wrong"}`
### merge_mining_get_job
Example request: `{"jsonrpc":"2.0","id":"0","method":"merge_mining_get_job","params":{"address":"MERGE_MINED_CHAIN_ADDRESS","aux_hash":"f6952d6eef555ddd87aca66e56b91530222d6e318414816f3ba7cf5bf694bf0f","height":3000000,"prev_id":"ad505b0be8a49b89273e307106fa42133cbd804456724c5e7635bd953215d92a"}}`
Request: a JSON containing these fields:
Field|Description
-|-
`height`|Monero height
`prev_id`|Hash of the previous Monero block
`address`|A wallet address on the merge mined chain
`aux_hash`|Merge mining job that is currently being used
`height`|Monero height
`prev_id`|Hash of the previous Monero block
Response: a JSON containing these fields:
Field|Description
-|-
`result`|`OK` or an error message
`aux_blob`|A hex-encoded blob of data. Merge mined chain defines the contents of this blob. It's opaque to P2Pool and will not be changed by it.
`aux_hash`|A 32-byte hex-encoded hash of the `aux_blob`. Merge mined chain defines how exactly this hash is calculated. It's opaque to P2Pool.
`aux_diff`|Mining difficulty (decimal number).
`aux_hash`|A 32-byte hex-encoded hash of the `aux_blob`. Merge mined chain defines how exactly this hash is calculated. It's opaque to P2Pool.
If `aux_hash` is the same as in the request, all other fields will be ignored by P2Pool, so they don't have to be included in the response. Moreover, `{"result":"OK"}` response will be interpreted as a response having the same `aux_hash` as in the request. This enables an efficient polling.
If `aux_hash` is the same as in the request, all other fields will be ignored by P2Pool, so they don't have to be included in the response. Moreover, empty response will be interpreted as a response having the same `aux_hash` as in the request. This enables an efficient polling.
Example response 1: `{"jsonrpc":"2.0","id":"0","result":{"aux_blob":"4c6f72656d20697073756d","aux_diff":123456,"aux_hash":"f6952d6eef555ddd87aca66e56b91530222d6e318414816f3ba7cf5bf694bf0f"}}`
Example response 2: `{"jsonrpc":"2.0","id":"0","result":{}}`
### merge_mining_submit_solution

View file

@ -27,12 +27,15 @@ LOG_CATEGORY(MergeMiningClient)
namespace p2pool {
MergeMiningClient::MergeMiningClient(p2pool* pool, const std::string& host)
MergeMiningClient::MergeMiningClient(p2pool* pool, const std::string& host, const std::string& address)
: m_host(host)
, m_port(80)
, m_auxAddress(address)
, m_pool(pool)
, m_loop{}
, m_loopThread{}
, m_timer{}
, m_getJobRunning(false)
, m_shutdownAsync{}
{
const size_t k = host.find_last_of(':');
@ -63,6 +66,13 @@ MergeMiningClient::MergeMiningClient(p2pool* pool, const std::string& host)
}
m_shutdownAsync.data = this;
err = uv_timer_init(&m_loop, &m_timer);
if (err) {
LOGERR(1, "failed to create timer, error " << uv_err_name(err));
uv_loop_close(&m_loop);
throw std::exception();
}
err = uv_thread_create(&m_loopThread, loop, this);
if (err) {
LOGERR(1, "failed to start event loop thread, error " << uv_err_name(err));
@ -81,19 +91,30 @@ MergeMiningClient::~MergeMiningClient()
LOGINFO(1, "stopped");
}
void MergeMiningClient::on_timer()
{
MinerData data = m_pool->miner_data();
merge_mining_get_job(data.height, data.prev_id, m_auxAddress, m_auxHash);
}
void MergeMiningClient::merge_mining_get_chain_id()
{
constexpr char req[] = "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"merge_mining_get_chain_id\"}";
JSONRPCRequest::call(m_host, m_port, req, std::string(), m_pool->params().m_socks5Proxy,
[this](const char* data, size_t size, double) {
parse_merge_mining_get_chain_id(data, size);
if (parse_merge_mining_get_chain_id(data, size)) {
const int err = uv_timer_start(&m_timer, on_timer, 0, 500);
if (err) {
LOGERR(1, "failed to start timer, error " << uv_err_name(err));
}
}
},
[](const char* data, size_t size, double) {
if (size > 0) {
LOGERR(1, "couldn't get merge mining id, error " << log::const_buf(data, size));
}
});
}, &m_loop);
}
bool MergeMiningClient::parse_merge_mining_get_chain_id(const char* data, size_t size)
@ -123,22 +144,94 @@ bool MergeMiningClient::parse_merge_mining_get_chain_id(const char* data, size_t
const auto& chain_id = result["chain_id"];
if (!chain_id.IsString() || (chain_id.GetStringLength() != HASH_SIZE * 2)) {
if (!chain_id.IsString() || !from_hex(chain_id.GetString(), chain_id.GetStringLength(), m_chainID)) {
return err("invalid chain_id");
}
const char* s = chain_id.GetString();
hash id;
for (uint32_t i = 0; i < HASH_SIZE; ++i) {
uint8_t d[2];
if (!from_hex(s[i * 2], d[0]) || !from_hex(s[i * 2 + 1], d[1])) {
return err("chain_id is not hex-encoded");
}
id.h[i] = (d[0] << 4) | d[1];
return true;
}
m_chainID = id;
void MergeMiningClient::merge_mining_get_job(uint64_t height, const hash& prev_id, const std::string& address, const hash& aux_hash)
{
if (m_getJobRunning) {
return;
}
m_getJobRunning = true;
char buf[log::Stream::BUF_SIZE + 1];
log::Stream s(buf);
s << "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"merge_mining_get_job\",\"params\":{"
<< "\"address\":\"" << address << '"'
<< ",\"aux_hash\":\"" << aux_hash << '"'
<< ",\"height\":" << height
<< ",\"prev_id\":\"" << prev_id << '"'
<< "}}\0";
JSONRPCRequest::call(m_host, m_port, buf, std::string(), m_pool->params().m_socks5Proxy,
[this](const char* data, size_t size, double) {
parse_merge_mining_get_job(data, size);
},
[this](const char* data, size_t size, double) {
if (size > 0) {
LOGERR(1, "couldn't get merge mining job, error " << log::const_buf(data, size));
}
m_getJobRunning = false;
}, &m_loop);
}
bool MergeMiningClient::parse_merge_mining_get_job(const char* data, size_t size)
{
auto err = [](const char* msg) {
LOGWARN(1, "merge_mining_get_job RPC call failed: " << msg);
return false;
};
rapidjson::Document doc;
if (doc.Parse(data, size).HasParseError() || !doc.IsObject()) {
return err("parsing failed");
}
if (doc.HasMember("error")) {
return err(doc["error"].IsString() ? doc["error"].GetString() : "an unknown error occurred");
}
const auto& result = doc["result"];
if (!result.IsObject()) {
return err("couldn't parse result");
}
if (!result.HasMember("aux_hash")) {
return true;
}
const auto& aux_hash = result["aux_hash"];
hash h;
if (!aux_hash.IsString() || !from_hex(aux_hash.GetString(), aux_hash.GetStringLength(), h)) {
return err("invalid aux_hash");
}
if (h == m_auxHash) {
return true;
}
if (!result.HasMember("aux_blob") || !result["aux_blob"].IsString()) {
return err("invalid aux_blob");
}
if (!result.HasMember("aux_diff") || !result["aux_diff"].IsUint64()) {
return err("invalid aux_diff");
}
m_auxBlob = result["aux_blob"].GetString();
m_auxHash = h;
m_auxDiff.lo = result["aux_diff"].GetUint64();
m_auxDiff.hi = 0;
return true;
}
@ -161,4 +254,10 @@ void MergeMiningClient::loop(void* data)
LOGINFO(1, "event loop stopped");
}
void MergeMiningClient::on_shutdown()
{
uv_timer_stop(&m_timer);
uv_close(reinterpret_cast<uv_handle_t*>(&m_timer), nullptr);
}
} // namespace p2pool

View file

@ -26,18 +26,29 @@ class p2pool;
class MergeMiningClient
{
public:
MergeMiningClient(p2pool* pool, const std::string& host);
MergeMiningClient(p2pool* pool, const std::string& host, const std::string& address);
~MergeMiningClient();
private:
static void loop(void* data);
static void on_timer(uv_timer_t* timer) { reinterpret_cast<MergeMiningClient*>(timer->data)->on_timer(); }
void on_timer();
void merge_mining_get_chain_id();
bool parse_merge_mining_get_chain_id(const char* data, size_t size);
void merge_mining_get_job(uint64_t height, const hash& prev_id, const std::string& address, const hash& aux_hash);
bool parse_merge_mining_get_job(const char* data, size_t size);
std::string m_host;
uint32_t m_port;
std::string m_auxAddress;
std::string m_auxBlob;
hash m_auxHash;
difficulty_type m_auxDiff;
hash m_chainID;
p2pool* m_pool;
@ -45,15 +56,23 @@ private:
uv_loop_t m_loop;
uv_thread_t m_loopThread;
uv_timer_t m_timer;
bool m_getJobRunning;
uv_async_t m_shutdownAsync;
static void on_shutdown(uv_async_t* async)
{
MergeMiningClient* client = reinterpret_cast<MergeMiningClient*>(async->data);
client->on_shutdown();
uv_close(reinterpret_cast<uv_handle_t*>(&client->m_shutdownAsync), nullptr);
delete GetLoopUserData(&client->m_loop, false);
}
void on_shutdown();
};
} // namespace p2pool

View file

@ -99,6 +99,25 @@ static FORCEINLINE bool from_hex(char c, T& out_value) {
return false;
}
static FORCEINLINE bool from_hex(const char* s, size_t len, hash& h) {
if (len != HASH_SIZE * 2) {
return false;
}
hash result;
for (uint32_t i = 0; i < HASH_SIZE; ++i) {
uint8_t d[2];
if (!from_hex(s[i * 2], d[0]) || !from_hex(s[i * 2 + 1], d[1])) {
return false;
}
result.h[i] = (d[0] << 4) | d[1];
}
h = result;
return true;
}
template<typename T, bool is_signed> struct is_negative_helper {};
template<typename T> struct is_negative_helper<T, false> { static FORCEINLINE bool value(T) { return false; } };
template<typename T> struct is_negative_helper<T, true> { static FORCEINLINE bool value(T x) { return (x < 0); } };