mirror of
https://github.com/xmrig/xmrig.git
synced 2024-12-23 12:09:22 +00:00
Make option "dataset_host" available only for RandomX.
This commit is contained in:
parent
f8d1488e33
commit
c235145121
7 changed files with 31 additions and 29 deletions
|
@ -59,21 +59,21 @@ xmrig::CudaThread::CudaThread(const rapidjson::Value &value)
|
|||
m_affinity = Json::getUint64(value, kAffinity, m_affinity);
|
||||
|
||||
if (Json::getValue(value, kDatasetHost).IsInt()) {
|
||||
m_dataset_host = Json::getInt(value, kDatasetHost) != 0;
|
||||
m_datasetHost = Json::getInt(value, kDatasetHost, m_datasetHost) != 0;
|
||||
}
|
||||
else {
|
||||
m_dataset_host = Json::getBool(value, kDatasetHost);
|
||||
m_datasetHost = Json::getBool(value, kDatasetHost);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) :
|
||||
m_blocks(CudaLib::deviceInt(ctx, CudaLib::DeviceBlocks)),
|
||||
m_datasetHost(CudaLib::deviceInt(ctx, CudaLib::DeviceDatasetHost)),
|
||||
m_threads(CudaLib::deviceInt(ctx, CudaLib::DeviceThreads)),
|
||||
m_index(index),
|
||||
m_bfactor(CudaLib::deviceUint(ctx, CudaLib::DeviceBFactor)),
|
||||
m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep)),
|
||||
m_dataset_host(CudaLib::deviceInt(ctx, CudaLib::DeviceDatasetHost) != 0)
|
||||
m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep))
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -81,13 +81,13 @@ xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) :
|
|||
|
||||
bool xmrig::CudaThread::isEqual(const CudaThread &other) const
|
||||
{
|
||||
return m_blocks == other.m_blocks &&
|
||||
m_threads == other.m_threads &&
|
||||
m_affinity == other.m_affinity &&
|
||||
m_index == other.m_index &&
|
||||
m_bfactor == other.m_bfactor &&
|
||||
m_bsleep == other.m_bsleep &&
|
||||
m_dataset_host == other.m_dataset_host;
|
||||
return m_blocks == other.m_blocks &&
|
||||
m_threads == other.m_threads &&
|
||||
m_affinity == other.m_affinity &&
|
||||
m_index == other.m_index &&
|
||||
m_bfactor == other.m_bfactor &&
|
||||
m_bsleep == other.m_bsleep &&
|
||||
m_datasetHost == other.m_datasetHost;
|
||||
}
|
||||
|
||||
|
||||
|
@ -104,7 +104,10 @@ rapidjson::Value xmrig::CudaThread::toJSON(rapidjson::Document &doc) const
|
|||
out.AddMember(StringRef(kBFactor), bfactor(), allocator);
|
||||
out.AddMember(StringRef(kBSleep), bsleep(), allocator);
|
||||
out.AddMember(StringRef(kAffinity), affinity(), allocator);
|
||||
out.AddMember(StringRef(kDatasetHost), dataset_host(), allocator);
|
||||
|
||||
if (m_datasetHost >= 0) {
|
||||
out.AddMember(StringRef(kDatasetHost), m_datasetHost > 0, allocator);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -47,10 +47,10 @@ public:
|
|||
inline int32_t bfactor() const { return static_cast<int32_t>(m_bfactor); }
|
||||
inline int32_t blocks() const { return m_blocks; }
|
||||
inline int32_t bsleep() const { return static_cast<int32_t>(m_bsleep); }
|
||||
inline int32_t datasetHost() const { return m_datasetHost; }
|
||||
inline int32_t threads() const { return m_threads; }
|
||||
inline int64_t affinity() const { return m_affinity; }
|
||||
inline uint32_t index() const { return m_index; }
|
||||
inline uint32_t dataset_host() const { return m_dataset_host; }
|
||||
|
||||
inline bool operator!=(const CudaThread &other) const { return !isEqual(other); }
|
||||
inline bool operator==(const CudaThread &other) const { return isEqual(other); }
|
||||
|
@ -59,11 +59,11 @@ public:
|
|||
rapidjson::Value toJSON(rapidjson::Document &doc) const;
|
||||
|
||||
private:
|
||||
int32_t m_blocks = 0;
|
||||
int32_t m_threads = 0;
|
||||
int64_t m_affinity = -1;
|
||||
uint32_t m_index = 0;
|
||||
bool m_dataset_host = false;
|
||||
int32_t m_blocks = 0;
|
||||
int32_t m_datasetHost = -1;
|
||||
int32_t m_threads = 0;
|
||||
int64_t m_affinity = -1;
|
||||
uint32_t m_index = 0;
|
||||
|
||||
# ifdef _WIN32
|
||||
uint32_t m_bfactor = 6;
|
||||
|
|
|
@ -47,7 +47,7 @@ xmrig::CudaBaseRunner::~CudaBaseRunner()
|
|||
bool xmrig::CudaBaseRunner::init()
|
||||
{
|
||||
m_ctx = CudaLib::alloc(m_data.thread.index(), m_data.thread.bfactor(), m_data.thread.bsleep());
|
||||
if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm, m_data.thread.dataset_host() ? 1 : 0) != 0) {
|
||||
if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm, m_data.thread.datasetHost()) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,8 @@
|
|||
#include "crypto/rx/RxDataset.h"
|
||||
|
||||
|
||||
xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data)
|
||||
xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data),
|
||||
m_datasetHost(data.thread.datasetHost() > 0)
|
||||
{
|
||||
m_intensity = m_data.thread.threads() * m_data.thread.blocks();
|
||||
const size_t scratchpads_size = m_intensity * m_data.algorithm.l3();
|
||||
|
@ -42,8 +43,6 @@ xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : Cu
|
|||
}
|
||||
|
||||
m_intensity -= m_intensity % 32;
|
||||
|
||||
m_dataset_host = m_data.thread.dataset_host();
|
||||
}
|
||||
|
||||
|
||||
|
@ -61,7 +60,7 @@ bool xmrig::CudaRxRunner::set(const Job &job, uint8_t *blob)
|
|||
}
|
||||
|
||||
auto dataset = Rx::dataset(job, 0);
|
||||
m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_dataset_host, m_intensity));
|
||||
m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_datasetHost, m_intensity));
|
||||
|
||||
return m_ready;
|
||||
}
|
||||
|
|
|
@ -44,9 +44,9 @@ protected:
|
|||
bool set(const Job &job, uint8_t *blob) override;
|
||||
|
||||
private:
|
||||
bool m_ready = false;
|
||||
size_t m_intensity = 0;
|
||||
bool m_dataset_host = false;
|
||||
bool m_ready = false;
|
||||
const bool m_datasetHost = false;
|
||||
size_t m_intensity = 0;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ xmrig::CudaDevice::CudaDevice(uint32_t index, int32_t bfactor, int32_t bsleep) :
|
|||
m_index(index)
|
||||
{
|
||||
auto ctx = CudaLib::alloc(index, bfactor, bsleep);
|
||||
if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID, 0) != 0) {
|
||||
if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID) != 0) {
|
||||
CudaLib::release(ctx);
|
||||
|
||||
return;
|
||||
|
@ -107,7 +107,7 @@ uint32_t xmrig::CudaDevice::smx() const
|
|||
|
||||
void xmrig::CudaDevice::generate(const Algorithm &algorithm, CudaThreads &threads) const
|
||||
{
|
||||
if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm, 0) != 0) {
|
||||
if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm) != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ public:
|
|||
static const char *deviceName(nvid_ctx *ctx) noexcept;
|
||||
static const char *lastError(nvid_ctx *ctx) noexcept;
|
||||
static const char *pluginVersion() noexcept;
|
||||
static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm, int32_t dataset_host) noexcept;
|
||||
static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm, int32_t dataset_host = -1) noexcept;
|
||||
static int32_t deviceInt(nvid_ctx *ctx, DeviceProperty property) noexcept;
|
||||
static nvid_ctx *alloc(uint32_t id, int32_t bfactor, int32_t bsleep) noexcept;
|
||||
static std::string version(uint32_t version);
|
||||
|
|
Loading…
Reference in a new issue