Merge branch 'feature-cuda' into evo

This commit is contained in:
XMRig 2019-10-28 13:29:15 +07:00
commit ab7f0cb4cc
46 changed files with 2820 additions and 65 deletions

View file

@ -15,6 +15,7 @@ option(WITH_TLS "Enable OpenSSL support" ON)
option(WITH_ASM "Enable ASM PoW implementations" ON)
option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF)
option(WITH_OPENCL "Enable OpenCL backend" ON)
option(WITH_CUDA "Enable CUDA backend" ON)
option(WITH_STRICT_CACHE "Enable strict checks for OpenCL cache" ON)
option(WITH_INTERLEAVE_DEBUG_LOG "Enable debug log for threads interleave" OFF)

View file

@ -1,5 +1,6 @@
include (src/backend/cpu/cpu.cmake)
include (src/backend/opencl/opencl.cmake)
include (src/backend/cuda/cuda.cmake)
include (src/backend/common/common.cmake)
@ -7,10 +8,12 @@ set(HEADERS_BACKEND
"${HEADERS_BACKEND_COMMON}"
"${HEADERS_BACKEND_CPU}"
"${HEADERS_BACKEND_OPENCL}"
"${HEADERS_BACKEND_CUDA}"
)
set(SOURCES_BACKEND
"${SOURCES_BACKEND_COMMON}"
"${SOURCES_BACKEND_CPU}"
"${SOURCES_BACKEND_OPENCL}"
"${SOURCES_BACKEND_CUDA}"
)

View file

@ -38,6 +38,12 @@ const char *ocl_tag();
#endif
#ifdef XMRIG_FEATURE_CUDA
const char *cuda_tag();
#endif
#ifdef XMRIG_ALGO_RANDOMX
const char *rx_tag();
#endif

View file

@ -34,6 +34,11 @@
#endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaThreads.h"
#endif
namespace xmrig {
@ -167,4 +172,8 @@ template class Threads<CpuThreads>;
template class Threads<OclThreads>;
#endif
#ifdef XMRIG_FEATURE_CUDA
template class Threads<CudaThreads>;
#endif
} // namespace xmrig

View file

@ -37,6 +37,11 @@
#endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaWorker.h"
#endif
namespace xmrig {
@ -217,4 +222,16 @@ template class Workers<OclLaunchData>;
#endif
#ifdef XMRIG_FEATURE_CUDA
template<>
xmrig::IWorker *xmrig::Workers<CudaLaunchData>::create(Thread<CudaLaunchData> *handle)
{
return new CudaWorker(handle->id(), handle->config());
}
template class Workers<CudaLaunchData>;
#endif
} // namespace xmrig

View file

@ -37,6 +37,11 @@
#endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaLaunchData.h"
#endif
namespace xmrig {
@ -80,6 +85,13 @@ extern template class Workers<OclLaunchData>;
#endif
#ifdef XMRIG_FEATURE_CUDA
template<>
IWorker *Workers<CudaLaunchData>::create(Thread<CudaLaunchData> *handle);
extern template class Workers<CudaLaunchData>;
#endif
} // namespace xmrig

View file

@ -0,0 +1,421 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mutex>
#include <string>
#include "backend/cuda/CudaBackend.h"
#include "backend/common/Hashrate.h"
#include "backend/common/interfaces/IWorker.h"
#include "backend/common/Tags.h"
#include "backend/common/Workers.h"
#include "backend/cuda/CudaConfig.h"
#include "backend/cuda/CudaThreads.h"
#include "backend/cuda/CudaWorker.h"
#include "backend/cuda/wrappers/CudaDevice.h"
#include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/log/Log.h"
#include "base/net/stratum/Job.h"
#include "base/tools/Chrono.h"
#include "base/tools/String.h"
#include "core/config/Config.h"
#include "core/Controller.h"
#include "rapidjson/document.h"
#ifdef XMRIG_FEATURE_API
# include "base/api/interfaces/IApiRequest.h"
#endif
namespace xmrig {
extern template class Threads<CudaThreads>;
constexpr const size_t oneMiB = 1024u * 1024u;
static const char *tag = GREEN_BG_BOLD(WHITE_BOLD_S " nv ");
static const String kType = "cuda";
static std::mutex mutex;
static void printDisabled(const char *reason)
{
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") RED_BOLD("disabled") "%s", "CUDA", reason);
}
struct CudaLaunchStatus
{
public:
inline size_t threads() const { return m_threads; }
inline bool started(bool ready)
{
ready ? m_started++ : m_errors++;
return (m_started + m_errors) == m_threads;
}
inline void start(size_t threads)
{
m_started = 0;
m_errors = 0;
m_threads = threads;
m_ts = Chrono::steadyMSecs();
CudaWorker::ready = false;
}
inline void print() const
{
if (m_started == 0) {
LOG_ERR("%s " RED_BOLD("disabled") YELLOW(" (failed to start threads)"), tag);
return;
}
LOG_INFO("%s" GREEN_BOLD(" READY") " threads " "%s%zu/%zu" BLACK_BOLD(" (%" PRIu64 " ms)"),
tag,
m_errors == 0 ? CYAN_BOLD_S : YELLOW_BOLD_S,
m_started,
m_threads,
Chrono::steadyMSecs() - m_ts
);
}
private:
size_t m_errors = 0;
size_t m_started = 0;
size_t m_threads = 0;
uint64_t m_ts = 0;
};
class CudaBackendPrivate
{
public:
inline CudaBackendPrivate(Controller *controller) :
controller(controller)
{
init(controller->config()->cuda());
}
void init(const CudaConfig &cuda)
{
if (!cuda.isEnabled()) {
return printDisabled("");
}
if (!CudaLib::init(cuda.loader())) {
return printDisabled(RED_S " (failed to load CUDA plugin)");
}
const uint32_t runtimeVersion = CudaLib::runtimeVersion();
const uint32_t driverVersion = CudaLib::driverVersion();
if (!runtimeVersion || !driverVersion || !CudaLib::deviceCount()) {
return printDisabled(RED_S " (no devices)");
}
if (!devices.empty()) {
return;
}
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%u.%u") "/" WHITE_BOLD("%u.%u") BLACK_BOLD("/%s"), "CUDA",
runtimeVersion / 1000, runtimeVersion % 100, driverVersion / 1000, driverVersion % 100, CudaLib::pluginVersion());
devices = CudaLib::devices(cuda.bfactor(), cuda.bsleep());
for (const CudaDevice &device : devices) {
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("#%zu") YELLOW(" %s") GREEN_BOLD(" %s ") WHITE_BOLD("%u/%u MHz") " smx:" WHITE_BOLD("%u") " arch:" WHITE_BOLD("%u%u") " mem:" CYAN("%zu/%zu") " MB",
"CUDA GPU",
device.index(),
device.topology().toString().data(),
device.name().data(),
device.clock(),
device.memoryClock(),
device.smx(),
device.computeCapability(true),
device.computeCapability(false),
device.freeMemSize() / oneMiB,
device.globalMemSize() / oneMiB);
}
}
inline void start(const Job &)
{
LOG_INFO("%s use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"),
tag,
profileName.data(),
threads.size(),
algo.l3() / 1024
);
Log::print(WHITE_BOLD("| # | GPU | BUS ID | I | T | B | BF | BS | MEM | NAME"));
size_t i = 0;
for (const auto &data : threads) {
Log::print("|" CYAN_BOLD("%3zu") " |" CYAN_BOLD("%4u") " |" YELLOW(" %7s") " |" CYAN_BOLD("%5d") " |" CYAN_BOLD("%4d") " |"
CYAN_BOLD("%4d") " |" CYAN_BOLD("%3d") " |" CYAN_BOLD("%4d") " |" CYAN("%5zu") " | " GREEN("%s"),
i,
data.thread.index(),
data.device.topology().toString().data(),
data.thread.threads() * data.thread.blocks(),
data.thread.threads(),
data.thread.blocks(),
data.thread.bfactor(),
data.thread.bsleep(),
(data.thread.threads() * data.thread.blocks()) * algo.l3() / oneMiB,
data.device.name().data()
);
i++;
}
status.start(threads.size());
workers.start(threads);
}
Algorithm algo;
Controller *controller;
CudaLaunchStatus status;
std::vector<CudaDevice> devices;
std::vector<CudaLaunchData> threads;
String profileName;
Workers<CudaLaunchData> workers;
};
} // namespace xmrig
const char *xmrig::cuda_tag()
{
return tag;
}
xmrig::CudaBackend::CudaBackend(Controller *controller) :
d_ptr(new CudaBackendPrivate(controller))
{
d_ptr->workers.setBackend(this);
}
xmrig::CudaBackend::~CudaBackend()
{
delete d_ptr;
CudaLib::close();
}
bool xmrig::CudaBackend::isEnabled() const
{
return d_ptr->controller->config()->cuda().isEnabled() && CudaLib::isInitialized() && !d_ptr->devices.empty();;
}
bool xmrig::CudaBackend::isEnabled(const Algorithm &algorithm) const
{
return !d_ptr->controller->config()->cuda().threads().get(algorithm).isEmpty();
}
const xmrig::Hashrate *xmrig::CudaBackend::hashrate() const
{
return d_ptr->workers.hashrate();
}
const xmrig::String &xmrig::CudaBackend::profileName() const
{
return d_ptr->profileName;
}
const xmrig::String &xmrig::CudaBackend::type() const
{
return kType;
}
void xmrig::CudaBackend::prepare(const Job &)
{
}
void xmrig::CudaBackend::printHashrate(bool details)
{
if (!details || !hashrate()) {
return;
}
char num[8 * 3] = { 0 };
Log::print(WHITE_BOLD_S "| CUDA # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |");
size_t i = 0;
for (const auto &data : d_ptr->threads) {
Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") GREEN(" %s"),
i,
data.thread.affinity(),
Hashrate::format(hashrate()->calc(i, Hashrate::ShortInterval), num, sizeof num / 3),
Hashrate::format(hashrate()->calc(i, Hashrate::MediumInterval), num + 8, sizeof num / 3),
Hashrate::format(hashrate()->calc(i, Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3),
data.device.index(),
data.device.topology().toString().data(),
data.device.name().data()
);
i++;
}
Log::print(WHITE_BOLD_S "| - | - | %7s | %7s | %7s |",
Hashrate::format(hashrate()->calc(Hashrate::ShortInterval), num, sizeof num / 3),
Hashrate::format(hashrate()->calc(Hashrate::MediumInterval), num + 8, sizeof num / 3),
Hashrate::format(hashrate()->calc(Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3)
);
}
void xmrig::CudaBackend::setJob(const Job &job)
{
const auto &cuda = d_ptr->controller->config()->cuda();
if (cuda.isEnabled()) {
d_ptr->init(cuda);
}
if (!isEnabled()) {
return stop();
}
auto threads = cuda.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->devices);
if (!d_ptr->threads.empty() && d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) {
return;
}
d_ptr->algo = job.algorithm();
d_ptr->profileName = cuda.threads().profileName(job.algorithm());
if (d_ptr->profileName.isNull() || threads.empty()) {
LOG_WARN("%s " RED_BOLD("disabled") YELLOW(" (no suitable configuration found)"), tag);
return stop();
}
stop();
d_ptr->threads = std::move(threads);
d_ptr->start(job);
}
void xmrig::CudaBackend::start(IWorker *worker, bool ready)
{
mutex.lock();
if (d_ptr->status.started(ready)) {
d_ptr->status.print();
CudaWorker::ready = true;
}
mutex.unlock();
if (ready) {
worker->start();
}
}
void xmrig::CudaBackend::stop()
{
if (d_ptr->threads.empty()) {
return;
}
const uint64_t ts = Chrono::steadyMSecs();
d_ptr->workers.stop();
d_ptr->threads.clear();
LOG_INFO("%s" YELLOW(" stopped") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts);
}
void xmrig::CudaBackend::tick(uint64_t ticks)
{
d_ptr->workers.tick(ticks);
}
#ifdef XMRIG_FEATURE_API
rapidjson::Value xmrig::CudaBackend::toJSON(rapidjson::Document &doc) const
{
using namespace rapidjson;
auto &allocator = doc.GetAllocator();
Value out(kObjectType);
out.AddMember("type", type().toJSON(), allocator);
out.AddMember("enabled", isEnabled(), allocator);
out.AddMember("algo", d_ptr->algo.toJSON(), allocator);
out.AddMember("profile", profileName().toJSON(), allocator);
if (d_ptr->threads.empty() || !hashrate()) {
return out;
}
out.AddMember("hashrate", hashrate()->toJSON(doc), allocator);
Value threads(kArrayType);
size_t i = 0;
for (const auto &data : d_ptr->threads) {
Value thread = data.thread.toJSON(doc);
thread.AddMember("hashrate", hashrate()->toJSON(i, doc), allocator);
data.device.toJSON(thread, doc);
i++;
threads.PushBack(thread, allocator);
}
out.AddMember("threads", threads, allocator);
return out;
}
void xmrig::CudaBackend::handleRequest(IApiRequest &)
{
}
#endif

View file

@ -0,0 +1,79 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDABACKEND_H
#define XMRIG_CUDABACKEND_H
#include <utility>
#include "backend/common/interfaces/IBackend.h"
#include "base/tools/Object.h"
namespace xmrig {
class Controller;
class CudaBackendPrivate;
class Miner;
class CudaBackend : public IBackend
{
public:
XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaBackend)
CudaBackend(Controller *controller);
~CudaBackend() override;
protected:
bool isEnabled() const override;
bool isEnabled(const Algorithm &algorithm) const override;
const Hashrate *hashrate() const override;
const String &profileName() const override;
const String &type() const override;
void prepare(const Job &nextJob) override;
void printHashrate(bool details) override;
void setJob(const Job &job) override;
void start(IWorker *worker, bool ready) override;
void stop() override;
void tick(uint64_t ticks) override;
# ifdef XMRIG_FEATURE_API
rapidjson::Value toJSON(rapidjson::Document &doc) const override;
void handleRequest(IApiRequest &request) override;
# endif
private:
CudaBackendPrivate *d_ptr;
};
} /* namespace xmrig */
#endif /* XMRIG_CUDABACKEND_H */

View file

@ -0,0 +1,163 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaConfig.h"
#include "backend/common/Tags.h"
#include "backend/cuda/CudaConfig_gen.h"
#include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/json/Json.h"
#include "base/io/log/Log.h"
#include "rapidjson/document.h"
namespace xmrig {
static bool generated = false;
static const char *kDevicesHint = "devices-hint";
static const char *kEnabled = "enabled";
static const char *kLoader = "loader";
extern template class Threads<CudaThreads>;
}
rapidjson::Value xmrig::CudaConfig::toJSON(rapidjson::Document &doc) const
{
using namespace rapidjson;
auto &allocator = doc.GetAllocator();
Value obj(kObjectType);
obj.AddMember(StringRef(kEnabled), m_enabled, allocator);
obj.AddMember(StringRef(kLoader), m_loader.toJSON(), allocator);
m_threads.toJSON(obj, doc);
return obj;
}
std::vector<xmrig::CudaLaunchData> xmrig::CudaConfig::get(const Miner *miner, const Algorithm &algorithm, const std::vector<CudaDevice> &devices) const
{
std::vector<CudaLaunchData> out;
const auto &threads = m_threads.get(algorithm);
if (threads.isEmpty()) {
return out;
}
out.reserve(threads.count() * 2);
for (const auto &thread : threads.data()) {
if (thread.index() >= devices.size()) {
LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), cuda_tag(), thread.index());
continue;
}
out.emplace_back(miner, algorithm, thread, devices[thread.index()]);
}
return out;
}
void xmrig::CudaConfig::read(const rapidjson::Value &value)
{
if (value.IsObject()) {
m_enabled = Json::getBool(value, kEnabled, m_enabled);
m_loader = Json::getString(value, kLoader);
setDevicesHint(Json::getString(value, kDevicesHint));
m_threads.read(value);
generate();
}
else if (value.IsBool()) {
m_enabled = value.GetBool();
generate();
}
else {
m_shouldSave = true;
generate();
}
}
void xmrig::CudaConfig::generate()
{
if (generated) {
return;
}
if (!isEnabled() || m_threads.has("*")) {
return;
}
if (!CudaLib::init(loader())) {
return;
}
if (!CudaLib::runtimeVersion() || !CudaLib::driverVersion() || !CudaLib::deviceCount()) {
return;
}
const auto devices = CudaLib::devices(bfactor(), bsleep());
if (devices.empty()) {
return;
}
size_t count = 0;
count += xmrig::generate<Algorithm::CN>(m_threads, devices);
count += xmrig::generate<Algorithm::CN_LITE>(m_threads, devices);
count += xmrig::generate<Algorithm::CN_HEAVY>(m_threads, devices);
count += xmrig::generate<Algorithm::CN_PICO>(m_threads, devices);
count += xmrig::generate<Algorithm::RANDOM_X>(m_threads, devices);
generated = true;
m_shouldSave = count > 0;
}
void xmrig::CudaConfig::setDevicesHint(const char *devicesHint)
{
if (devicesHint == nullptr) {
return;
}
const auto indexes = String(devicesHint).split(',');
m_devicesHint.reserve(indexes.size());
for (const auto &index : indexes) {
m_devicesHint.push_back(strtoul(index, nullptr, 10));
}
}

View file

@ -0,0 +1,76 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDACONFIG_H
#define XMRIG_CUDACONFIG_H
#include "backend/cuda/CudaLaunchData.h"
#include "backend/common/Threads.h"
#include "backend/cuda/CudaThreads.h"
namespace xmrig {
class CudaConfig
{
public:
CudaConfig() = default;
rapidjson::Value toJSON(rapidjson::Document &doc) const;
std::vector<CudaLaunchData> get(const Miner *miner, const Algorithm &algorithm, const std::vector<CudaDevice> &devices) const;
void read(const rapidjson::Value &value);
inline bool isEnabled() const { return m_enabled; }
inline bool isShouldSave() const { return m_shouldSave; }
inline const String &loader() const { return m_loader; }
inline const Threads<CudaThreads> &threads() const { return m_threads; }
inline int32_t bfactor() const { return m_bfactor; }
inline int32_t bsleep() const { return m_bsleep; }
private:
void generate();
void setDevicesHint(const char *devicesHint);
bool m_enabled = false;
bool m_shouldSave = false;
std::vector<uint32_t> m_devicesHint;
String m_loader;
Threads<CudaThreads> m_threads;
# ifdef _WIN32
int32_t m_bfactor = 6;
int32_t m_bsleep = 25;
# else
int32_t m_bfactor = 0;
int32_t m_bsleep = 0;
# endif
};
} /* namespace xmrig */
#endif /* XMRIG_CUDACONFIG_H */

View file

@ -0,0 +1,137 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDACONFIG_GEN_H
#define XMRIG_CUDACONFIG_GEN_H
#include "backend/common/Threads.h"
#include "backend/cuda/CudaThreads.h"
#include "backend/cuda/wrappers/CudaDevice.h"
#include <algorithm>
namespace xmrig {
static inline size_t generate(const char *key, Threads<CudaThreads> &threads, const Algorithm &algorithm, const std::vector<CudaDevice> &devices)
{
if (threads.isExist(algorithm) || threads.has(key)) {
return 0;
}
return threads.move(key, CudaThreads(devices, algorithm));
}
template<Algorithm::Family FAMILY>
static inline size_t generate(Threads<CudaThreads> &, const std::vector<CudaDevice> &) { return 0; }
template<>
size_t inline generate<Algorithm::CN>(Threads<CudaThreads> &threads, const std::vector<CudaDevice> &devices)
{
size_t count = 0;
count += generate("cn", threads, Algorithm::CN_1, devices);
count += generate("cn/2", threads, Algorithm::CN_2, devices);
if (!threads.isExist(Algorithm::CN_0)) {
threads.disable(Algorithm::CN_0);
count++;
}
# ifdef XMRIG_ALGO_CN_GPU
count += generate("cn/gpu", threads, Algorithm::CN_GPU, devices);
# endif
return count;
}
#ifdef XMRIG_ALGO_CN_LITE
template<>
size_t inline generate<Algorithm::CN_LITE>(Threads<CudaThreads> &threads, const std::vector<CudaDevice> &devices)
{
size_t count = generate("cn-lite", threads, Algorithm::CN_LITE_1, devices);
if (!threads.isExist(Algorithm::CN_LITE_0)) {
threads.disable(Algorithm::CN_LITE_0);
++count;
}
return count;
}
#endif
#ifdef XMRIG_ALGO_CN_HEAVY
template<>
size_t inline generate<Algorithm::CN_HEAVY>(Threads<CudaThreads> &threads, const std::vector<CudaDevice> &devices)
{
return generate("cn-heavy", threads, Algorithm::CN_HEAVY_0, devices);
}
#endif
#ifdef XMRIG_ALGO_CN_PICO
template<>
size_t inline generate<Algorithm::CN_PICO>(Threads<CudaThreads> &threads, const std::vector<CudaDevice> &devices)
{
return generate("cn-pico", threads, Algorithm::CN_PICO_0, devices);
}
#endif
#ifdef XMRIG_ALGO_RANDOMX
template<>
size_t inline generate<Algorithm::RANDOM_X>(Threads<CudaThreads> &threads, const std::vector<CudaDevice> &devices)
{
size_t count = 0;
auto rx = CudaThreads(devices, Algorithm::RX_0);
auto wow = CudaThreads(devices, Algorithm::RX_WOW);
auto arq = CudaThreads(devices, Algorithm::RX_ARQ);
if (!threads.isExist(Algorithm::RX_WOW) && wow != rx) {
count += threads.move("rx/wow", std::move(wow));
}
if (!threads.isExist(Algorithm::RX_ARQ) && arq != rx) {
count += threads.move("rx/arq", std::move(arq));
}
count += threads.move("rx", std::move(rx));
return count;
}
#endif
} /* namespace xmrig */
#endif /* XMRIG_CUDACONFIG_GEN_H */

View file

@ -0,0 +1,51 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaLaunchData.h"
#include "backend/common/Tags.h"
xmrig::CudaLaunchData::CudaLaunchData(const Miner *miner, const Algorithm &algorithm, const CudaThread &thread, const CudaDevice &device) :
algorithm(algorithm),
miner(miner),
device(device),
thread(thread)
{
}
bool xmrig::CudaLaunchData::isEqual(const CudaLaunchData &other) const
{
return (other.algorithm.family() == algorithm.family() &&
other.algorithm.l3() == algorithm.l3() &&
other.thread == thread);
}
const char *xmrig::CudaLaunchData::tag()
{
return cuda_tag();
}

View file

@ -0,0 +1,66 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDALAUNCHDATA_H
#define XMRIG_CUDALAUNCHDATA_H
#include "backend/cuda/CudaThread.h"
#include "crypto/common/Algorithm.h"
#include "crypto/common/Nonce.h"
namespace xmrig {
class CudaDevice;
class Miner;
class CudaLaunchData
{
public:
CudaLaunchData(const Miner *miner, const Algorithm &algorithm, const CudaThread &thread, const CudaDevice &device);
bool isEqual(const CudaLaunchData &other) const;
inline constexpr static Nonce::Backend backend() { return Nonce::CUDA; }
inline bool operator!=(const CudaLaunchData &other) const { return !isEqual(other); }
inline bool operator==(const CudaLaunchData &other) const { return isEqual(other); }
static const char *tag();
const Algorithm algorithm;
const Miner *miner;
const CudaDevice &device;
const CudaThread thread;
};
} // namespace xmrig
#endif /* XMRIG_OCLLAUNCHDATA_H */

View file

@ -0,0 +1,99 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaThread.h"
#include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/json/Json.h"
#include "rapidjson/document.h"
#include <algorithm>
namespace xmrig {
static const char *kAffinity = "affinity";
static const char *kBFactor = "bfactor";
static const char *kBlocks = "blocks";
static const char *kBSleep = "bsleep";
static const char *kIndex = "index";
static const char *kThreads = "threads";
} // namespace xmrig
xmrig::CudaThread::CudaThread(const rapidjson::Value &value)
{
if (!value.IsObject()) {
return;
}
m_index = Json::getUint(value, kIndex);
m_threads = Json::getInt(value, kThreads);
m_blocks = Json::getInt(value, kBlocks);
m_bfactor = std::min(Json::getUint(value, kBFactor, m_bfactor), 12u);
m_bsleep = Json::getUint(value, kBSleep, m_bsleep);
m_affinity = Json::getUint64(value, kAffinity, m_affinity);
}
xmrig::CudaThread::CudaThread(uint32_t index, nvid_ctx *ctx) :
m_blocks(CudaLib::deviceInt(ctx, CudaLib::DeviceBlocks)),
m_threads(CudaLib::deviceInt(ctx, CudaLib::DeviceThreads)),
m_index(index),
m_bfactor(CudaLib::deviceUint(ctx, CudaLib::DeviceBFactor)),
m_bsleep(CudaLib::deviceUint(ctx, CudaLib::DeviceBSleep))
{
}
bool xmrig::CudaThread::isEqual(const CudaThread &other) const
{
return m_blocks == other.m_blocks &&
m_threads == other.m_threads &&
m_affinity == other.m_affinity &&
m_index == other.m_index &&
m_bfactor == other.m_bfactor &&
m_bsleep == other.m_bsleep;
}
rapidjson::Value xmrig::CudaThread::toJSON(rapidjson::Document &doc) const
{
using namespace rapidjson;
auto &allocator = doc.GetAllocator();
Value out(kObjectType);
out.AddMember(StringRef(kIndex), index(), allocator);
out.AddMember(StringRef(kThreads), threads(), allocator);
out.AddMember(StringRef(kBlocks), blocks(), allocator);
out.AddMember(StringRef(kBFactor), bfactor(), allocator);
out.AddMember(StringRef(kBSleep), bsleep(), allocator);
out.AddMember(StringRef(kAffinity), affinity(), allocator);
return out;
}

View file

@ -0,0 +1,79 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDATHREAD_H
#define XMRIG_CUDATHREAD_H
using nvid_ctx = struct nvid_ctx;
#include "crypto/common/Algorithm.h"
#include "rapidjson/fwd.h"
namespace xmrig {
class CudaThread
{
public:
CudaThread() = delete;
CudaThread(const rapidjson::Value &value);
CudaThread(uint32_t index, nvid_ctx *ctx);
inline bool isValid() const { return m_blocks > 0 && m_threads > 0; }
inline int32_t bfactor() const { return static_cast<int32_t>(m_bfactor); }
inline int32_t blocks() const { return m_blocks; }
inline int32_t bsleep() const { return static_cast<int32_t>(m_bsleep); }
inline int32_t threads() const { return m_threads; }
inline int64_t affinity() const { return m_affinity; }
inline uint32_t index() const { return m_index; }
inline bool operator!=(const CudaThread &other) const { return !isEqual(other); }
inline bool operator==(const CudaThread &other) const { return isEqual(other); }
bool isEqual(const CudaThread &other) const;
rapidjson::Value toJSON(rapidjson::Document &doc) const;
private:
int32_t m_blocks = 0;
int32_t m_threads = 0;
int64_t m_affinity = -1;
uint32_t m_index = 0;
# ifdef _WIN32
uint32_t m_bfactor = 6;
uint32_t m_bsleep = 25;
# else
uint32_t m_bfactor = 0;
uint32_t m_bsleep = 0;
# endif
};
} /* namespace xmrig */
#endif /* XMRIG_CUDATHREAD_H */

View file

@ -0,0 +1,79 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaThreads.h"
#include "base/io/json/Json.h"
#include "rapidjson/document.h"
#include <algorithm>
xmrig::CudaThreads::CudaThreads(const rapidjson::Value &value)
{
if (value.IsArray()) {
for (auto &v : value.GetArray()) {
CudaThread thread(v);
if (thread.isValid()) {
add(std::move(thread));
}
}
}
}
xmrig::CudaThreads::CudaThreads(const std::vector<CudaDevice> &devices, const Algorithm &algorithm)
{
for (const auto &device : devices) {
device.generate(algorithm, *this);
}
}
bool xmrig::CudaThreads::isEqual(const CudaThreads &other) const
{
if (isEmpty() && other.isEmpty()) {
return true;
}
return count() == other.count() && std::equal(m_data.begin(), m_data.end(), other.m_data.begin());
}
rapidjson::Value xmrig::CudaThreads::toJSON(rapidjson::Document &doc) const
{
using namespace rapidjson;
auto &allocator = doc.GetAllocator();
Value out(kArrayType);
out.SetArray();
for (const CudaThread &thread : m_data) {
out.PushBack(thread.toJSON(doc), allocator);
}
return out;
}

View file

@ -0,0 +1,66 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDATHREADS_H
#define XMRIG_CUDATHREADS_H
#include <vector>
#include "backend/cuda/CudaThread.h"
#include "backend/cuda/wrappers/CudaDevice.h"
namespace xmrig {
class CudaThreads
{
public:
CudaThreads() = default;
CudaThreads(const rapidjson::Value &value);
CudaThreads(const std::vector<CudaDevice> &devices, const Algorithm &algorithm);
inline bool isEmpty() const { return m_data.empty(); }
inline const std::vector<CudaThread> &data() const { return m_data; }
inline size_t count() const { return m_data.size(); }
inline void add(CudaThread &&thread) { m_data.push_back(thread); }
inline void reserve(size_t capacity) { m_data.reserve(capacity); }
inline bool operator!=(const CudaThreads &other) const { return !isEqual(other); }
inline bool operator==(const CudaThreads &other) const { return isEqual(other); }
bool isEqual(const CudaThreads &other) const;
rapidjson::Value toJSON(rapidjson::Document &doc) const;
private:
std::vector<CudaThread> m_data;
};
} /* namespace xmrig */
#endif /* XMRIG_CUDATHREADS_H */

View file

@ -0,0 +1,171 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaWorker.h"
#include "backend/common/Tags.h"
#include "backend/cuda/runners/CudaCnRunner.h"
#include "base/io/log/Log.h"
#include "base/tools/Chrono.h"
#include "core/Miner.h"
#include "crypto/common/Nonce.h"
#include "net/JobResults.h"
#ifdef XMRIG_ALGO_RANDOMX
# include "backend/cuda/runners/CudaRxRunner.h"
#endif
#include <cassert>
#include <thread>
namespace xmrig {
static constexpr uint32_t kReserveCount = 32768;
std::atomic<bool> CudaWorker::ready;
static inline bool isReady() { return !Nonce::isPaused() && CudaWorker::ready; }
static inline uint32_t roundSize(uint32_t intensity) { return kReserveCount / intensity + 1; }
} // namespace xmrig
xmrig::CudaWorker::CudaWorker(size_t id, const CudaLaunchData &data) :
Worker(id, data.thread.affinity(), -1),
m_algorithm(data.algorithm),
m_miner(data.miner)
{
switch (m_algorithm.family()) {
case Algorithm::RANDOM_X:
# ifdef XMRIG_ALGO_RANDOMX
m_runner = new CudaRxRunner(id, data);
# endif
break;
case Algorithm::ARGON2:
break;
default:
m_runner = new CudaCnRunner(id, data);
break;
}
if (!m_runner || !m_runner->init()) {
return;
}
}
xmrig::CudaWorker::~CudaWorker()
{
delete m_runner;
}
bool xmrig::CudaWorker::selfTest()
{
return m_runner != nullptr;
}
size_t xmrig::CudaWorker::intensity() const
{
return m_runner ? m_runner->intensity() : 0;
}
void xmrig::CudaWorker::start()
{
while (Nonce::sequence(Nonce::CUDA) > 0) {
if (!isReady()) {
do {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
}
while (!isReady() && Nonce::sequence(Nonce::CUDA) > 0);
if (Nonce::sequence(Nonce::CUDA) == 0) {
break;
}
if (!consumeJob()) {
return;
}
}
while (!Nonce::isOutdated(Nonce::CUDA, m_job.sequence())) {
uint32_t foundNonce[10] = { 0 };
uint32_t foundCount = 0;
if (!m_runner->run(*m_job.nonce(), &foundCount, foundNonce)) {
return;
}
if (foundCount) {
JobResults::submit(m_job.currentJob(), foundNonce, foundCount);
}
const size_t batch_size = intensity();
m_job.nextRound(roundSize(batch_size), batch_size);
storeStats();
std::this_thread::yield();
}
if (!consumeJob()) {
return;
}
}
}
bool xmrig::CudaWorker::consumeJob()
{
if (Nonce::sequence(Nonce::CUDA) == 0) {
return false;
}
const size_t batch_size = intensity();
m_job.add(m_miner->job(), Nonce::sequence(Nonce::CUDA), roundSize(batch_size) * batch_size);
return m_runner->set(m_job.currentJob(), m_job.blob());;
}
void xmrig::CudaWorker::storeStats()
{
if (!isReady()) {
return;
}
m_count += intensity();
Worker::storeStats();
}

View file

@ -0,0 +1,73 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDAWORKER_H
#define XMRIG_CUDAWORKER_H
#include "backend/common/Worker.h"
#include "backend/common/WorkerJob.h"
#include "backend/cuda/CudaLaunchData.h"
#include "base/tools/Object.h"
#include "net/JobResult.h"
namespace xmrig {
class ICudaRunner;
class CudaWorker : public Worker
{
public:
XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaWorker)
CudaWorker(size_t id, const CudaLaunchData &data);
~CudaWorker() override;
static std::atomic<bool> ready;
protected:
bool selfTest() override;
size_t intensity() const override;
void start() override;
private:
bool consumeJob();
void storeStats();
const Algorithm m_algorithm;
const Miner *m_miner;
ICudaRunner *m_runner = nullptr;
WorkerJob<1> m_job;
};
} // namespace xmrig
#endif /* XMRIG_CUDAWORKER_H */

View file

@ -0,0 +1,38 @@
if (WITH_CUDA)
add_definitions(/DXMRIG_FEATURE_CUDA)
set(HEADERS_BACKEND_CUDA
src/backend/cuda/CudaBackend.h
src/backend/cuda/CudaConfig_gen.h
src/backend/cuda/CudaConfig.h
src/backend/cuda/CudaLaunchData.h
src/backend/cuda/CudaThread.h
src/backend/cuda/CudaThreads.h
src/backend/cuda/CudaWorker.h
src/backend/cuda/interfaces/ICudaRunner.h
src/backend/cuda/runners/CudaBaseRunner.h
src/backend/cuda/runners/CudaCnRunner.h
src/backend/cuda/runners/CudaRxRunner.h
src/backend/cuda/wrappers/CudaDevice.h
src/backend/cuda/wrappers/CudaLib.h
)
set(SOURCES_BACKEND_CUDA
src/backend/cuda/CudaBackend.cpp
src/backend/cuda/CudaConfig.cpp
src/backend/cuda/CudaLaunchData.cpp
src/backend/cuda/CudaThread.cpp
src/backend/cuda/CudaThreads.cpp
src/backend/cuda/CudaWorker.cpp
src/backend/cuda/runners/CudaBaseRunner.cpp
src/backend/cuda/runners/CudaCnRunner.cpp
src/backend/cuda/runners/CudaRxRunner.cpp
src/backend/cuda/wrappers/CudaDevice.cpp
src/backend/cuda/wrappers/CudaLib.cpp
)
else()
remove_definitions(/DXMRIG_FEATURE_CUDA)
set(HEADERS_BACKEND_CUDA "")
set(SOURCES_BACKEND_CUDA "")
endif()

View file

@ -0,0 +1,71 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_ICUDARUNNER_H
#define XMRIG_ICUDARUNNER_H
#include "base/tools/Object.h"
#include <cstdint>
namespace xmrig {
class Job;
class ICudaRunner
{
public:
XMRIG_DISABLE_COPY_MOVE(ICudaRunner)
ICudaRunner() = default;
virtual ~ICudaRunner() = default;
// virtual cl_context ctx() const = 0;
// virtual const Algorithm &algorithm() const = 0;
// virtual const char *buildOptions() const = 0;
// virtual const char *deviceKey() const = 0;
// virtual const char *source() const = 0;
// virtual const OclLaunchData &data() const = 0;
virtual size_t intensity() const = 0;
// virtual size_t threadId() const = 0;
// virtual uint32_t deviceIndex() const = 0;
// virtual void build() = 0;
virtual bool init() = 0;
virtual bool run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce) = 0;
virtual bool set(const Job &job, uint8_t *blob) = 0;
protected:
// virtual size_t bufferSize() const = 0;
};
} /* namespace xmrig */
#endif // XMRIG_ICUDARUNNER_H

View file

@ -0,0 +1,83 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/runners/CudaBaseRunner.h"
#include "backend/cuda/wrappers/CudaLib.h"
#include "backend/cuda/CudaLaunchData.h"
#include "backend/common/Tags.h"
#include "base/io/log/Log.h"
#include "base/net/stratum/Job.h"
xmrig::CudaBaseRunner::CudaBaseRunner(size_t id, const CudaLaunchData &data) :
m_data(data),
m_threadId(id)
{
}
xmrig::CudaBaseRunner::~CudaBaseRunner()
{
CudaLib::release(m_ctx);
}
bool xmrig::CudaBaseRunner::init()
{
m_ctx = CudaLib::alloc(m_data.thread.index(), m_data.thread.bfactor(), m_data.thread.bsleep());
if (CudaLib::deviceInfo(m_ctx, m_data.thread.blocks(), m_data.thread.threads(), m_data.algorithm) != 0) {
return false;
}
return callWrapper(CudaLib::deviceInit(m_ctx));
}
bool xmrig::CudaBaseRunner::set(const Job &job, uint8_t *blob)
{
m_height = job.height();
m_target = job.target();
return callWrapper(CudaLib::setJob(m_ctx, blob, job.size(), job.algorithm()));
}
size_t xmrig::CudaBaseRunner::intensity() const
{
return m_data.thread.threads() * m_data.thread.blocks();
}
bool xmrig::CudaBaseRunner::callWrapper(bool result) const
{
if (!result) {
const char *error = CudaLib::lastError(m_ctx);
if (error) {
LOG_ERR("%s" RED_S " thread " RED_BOLD("#%zu") RED_S " failed with error " RED_BOLD("%s"), cuda_tag(), m_threadId, error);
}
}
return result;
}

View file

@ -0,0 +1,68 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDABASERUNNER_H
#define XMRIG_CUDABASERUNNER_H
#include "backend/cuda/interfaces/ICudaRunner.h"
using nvid_ctx = struct nvid_ctx;
namespace xmrig {
class CudaLaunchData;
class CudaBaseRunner : public ICudaRunner
{
public:
XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaBaseRunner)
CudaBaseRunner(size_t id, const CudaLaunchData &data);
~CudaBaseRunner() override;
protected:
bool init() override;
bool set(const Job &job, uint8_t *blob) override;
size_t intensity() const override;
protected:
bool callWrapper(bool result) const;
const CudaLaunchData &m_data;
const size_t m_threadId;
nvid_ctx *m_ctx = nullptr;
uint64_t m_height = 0;
uint64_t m_target = 0;
};
} /* namespace xmrig */
#endif // XMRIG_CUDABASERUNNER_H

View file

@ -0,0 +1,38 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/runners/CudaCnRunner.h"
#include "backend/cuda/wrappers/CudaLib.h"
xmrig::CudaCnRunner::CudaCnRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data)
{
}
bool xmrig::CudaCnRunner::run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce)
{
return callWrapper(CudaLib::cnHash(m_ctx, startNonce, m_height, m_target, rescount, resnonce));
}

View file

@ -0,0 +1,48 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDACNRUNNER_H
#define XMRIG_CUDACNRUNNER_H
#include "backend/cuda/runners/CudaBaseRunner.h"
namespace xmrig {
class CudaCnRunner : public CudaBaseRunner
{
public:
CudaCnRunner(size_t index, const CudaLaunchData &data);
protected:
bool run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce) override;
};
} /* namespace xmrig */
#endif // XMRIG_CUDACNRUNNER_H

View file

@ -0,0 +1,65 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/runners/CudaRxRunner.h"
#include "backend/cuda/CudaLaunchData.h"
#include "backend/cuda/wrappers/CudaLib.h"
#include "base/net/stratum/Job.h"
#include "crypto/rx/Rx.h"
#include "crypto/rx/RxDataset.h"
xmrig::CudaRxRunner::CudaRxRunner(size_t index, const CudaLaunchData &data) : CudaBaseRunner(index, data)
{
m_intensity = m_data.thread.threads() * m_data.thread.blocks();
const size_t scratchpads_size = m_intensity * m_data.algorithm.l3();
const size_t num_scratchpads = scratchpads_size / m_data.algorithm.l3();
if (m_intensity > num_scratchpads) {
m_intensity = num_scratchpads;
}
m_intensity -= m_intensity % 32;
}
bool xmrig::CudaRxRunner::run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce)
{
return callWrapper(CudaLib::rxHash(m_ctx, startNonce, m_target, rescount, resnonce));
}
bool xmrig::CudaRxRunner::set(const Job &job, uint8_t *blob)
{
const bool rc = CudaBaseRunner::set(job, blob);
if (!rc || m_ready) {
return rc;
}
auto dataset = Rx::dataset(job, 0);
m_ready = callWrapper(CudaLib::rxPrepare(m_ctx, dataset->raw(), dataset->size(false), m_intensity));
return m_ready;
}

View file

@ -0,0 +1,55 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDARXRUNNER_H
#define XMRIG_CUDARXRUNNER_H
#include "backend/cuda/runners/CudaBaseRunner.h"
namespace xmrig {
class CudaRxRunner : public CudaBaseRunner
{
public:
CudaRxRunner(size_t index, const CudaLaunchData &data);
protected:
inline size_t intensity() const override { return m_intensity; }
bool run(uint32_t startNonce, uint32_t *rescount, uint32_t *resnonce) override;
bool set(const Job &job, uint8_t *blob) override;
private:
bool m_ready = false;
size_t m_intensity = 0;
};
} /* namespace xmrig */
#endif // XMRIG_CUDARXRUNNER_H

View file

@ -0,0 +1,129 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/wrappers/CudaDevice.h"
#include "backend/cuda/CudaThreads.h"
#include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/log/Log.h"
#include "crypto/common/Algorithm.h"
#include "rapidjson/document.h"
#include <algorithm>
xmrig::CudaDevice::CudaDevice(uint32_t index, int32_t bfactor, int32_t bsleep) :
m_index(index)
{
auto ctx = CudaLib::alloc(index, bfactor, bsleep);
if (CudaLib::deviceInfo(ctx, 0, 0, Algorithm::INVALID) != 0) {
CudaLib::release(ctx);
return;
}
m_ctx = ctx;
m_name = CudaLib::deviceName(ctx);
m_topology = PciTopology(CudaLib::deviceUint(ctx, CudaLib::DevicePciBusID), CudaLib::deviceUint(ctx, CudaLib::DevicePciDeviceID), 0);
}
xmrig::CudaDevice::CudaDevice(CudaDevice &&other) noexcept :
m_index(other.m_index),
m_ctx(other.m_ctx),
m_topology(other.m_topology),
m_name(std::move(other.m_name))
{
other.m_ctx = nullptr;
}
xmrig::CudaDevice::~CudaDevice()
{
CudaLib::release(m_ctx);
}
size_t xmrig::CudaDevice::freeMemSize() const
{
return CudaLib::deviceUlong(m_ctx, CudaLib::DeviceMemoryFree);
}
size_t xmrig::CudaDevice::globalMemSize() const
{
return CudaLib::deviceUlong(m_ctx, CudaLib::DeviceMemoryTotal);
}
uint32_t xmrig::CudaDevice::clock() const
{
return CudaLib::deviceUint(m_ctx, CudaLib::DeviceClockRate) / 1000;
}
uint32_t xmrig::CudaDevice::computeCapability(bool major) const
{
return CudaLib::deviceUint(m_ctx, major ? CudaLib::DeviceArchMajor : CudaLib::DeviceArchMinor);
}
uint32_t xmrig::CudaDevice::memoryClock() const
{
return CudaLib::deviceUint(m_ctx, CudaLib::DeviceMemoryClockRate) / 1000;
}
uint32_t xmrig::CudaDevice::smx() const
{
return CudaLib::deviceUint(m_ctx, CudaLib::DeviceSmx);
}
void xmrig::CudaDevice::generate(const Algorithm &algorithm, CudaThreads &threads) const
{
if (CudaLib::deviceInfo(m_ctx, -1, -1, algorithm) != 0) {
return;
}
threads.add(CudaThread(m_index, m_ctx));
}
#ifdef XMRIG_FEATURE_API
void xmrig::CudaDevice::toJSON(rapidjson::Value &out, rapidjson::Document &doc) const
{
using namespace rapidjson;
auto &allocator = doc.GetAllocator();
out.AddMember("name", name().toJSON(doc), allocator);
out.AddMember("bus_id", topology().toString().toJSON(doc), allocator);
out.AddMember("smx", smx(), allocator);
out.AddMember("arch", arch(), allocator);
out.AddMember("global_mem", static_cast<uint64_t>(globalMemSize()), allocator);
out.AddMember("clock", clock(), allocator);
out.AddMember("memory_clock", memoryClock(), allocator);
}
#endif

View file

@ -0,0 +1,84 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDADEVICE_H
#define XMRIG_CUDADEVICE_H
#include "backend/common/misc/PciTopology.h"
#include "base/tools/String.h"
using nvid_ctx = struct nvid_ctx;
namespace xmrig {
class Algorithm;
class CudaThreads;
class CudaDevice
{
public:
CudaDevice() = delete;
CudaDevice(const CudaDevice &other) = delete;
CudaDevice(CudaDevice &&other) noexcept;
CudaDevice(uint32_t index, int32_t bfactor, int32_t bsleep);
~CudaDevice();
size_t freeMemSize() const;
size_t globalMemSize() const;
uint32_t clock() const;
uint32_t computeCapability(bool major = true) const;
uint32_t memoryClock() const;
uint32_t smx() const;
void generate(const Algorithm &algorithm, CudaThreads &threads) const;
inline bool isValid() const { return m_ctx != nullptr; }
inline const PciTopology &topology() const { return m_topology; }
inline const String &name() const { return m_name; }
inline uint32_t arch() const { return (computeCapability(true) * 10) + computeCapability(false); }
inline uint32_t index() const { return m_index; }
# ifdef XMRIG_FEATURE_API
void toJSON(rapidjson::Value &out, rapidjson::Document &doc) const;
# endif
CudaDevice &operator=(const CudaDevice &other) = delete;
CudaDevice &operator=(CudaDevice &&other) = delete;
private:
const uint32_t m_index = 0;
nvid_ctx *m_ctx = nullptr;
PciTopology m_topology;
String m_name;
};
} // namespace xmrig
#endif /* XMRIG_CUDADEVICE_H */

View file

@ -0,0 +1,311 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdexcept>
#include <uv.h>
#include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/log/Log.h"
namespace xmrig {
enum Version : uint32_t
{
ApiVersion,
DriverVersion,
RuntimeVersion
};
static uv_lib_t cudaLib;
static const char *kAlloc = "alloc";
static const char *kCnHash = "cnHash";
static const char *kDeviceCount = "deviceCount";
static const char *kDeviceInfo = "deviceInfo";
static const char *kDeviceInit = "deviceInit";
static const char *kDeviceInt = "deviceInt";
static const char *kDeviceName = "deviceName";
static const char *kDeviceUint = "deviceUint";
static const char *kDeviceUlong = "deviceUlong";
static const char *kInit = "init";
static const char *kLastError = "lastError";
static const char *kPluginVersion = "pluginVersion";
static const char *kRelease = "release";
static const char *kRxHash = "rxHash";
static const char *kRxPrepare = "rxPrepare";
static const char *kSetJob = "setJob";
static const char *kSymbolNotFound = "symbol not found";
static const char *kVersion = "version";
using alloc_t = nvid_ctx * (*)(uint32_t, int32_t, int32_t);
using cnHash_t = bool (*)(nvid_ctx *, uint32_t, uint64_t, uint64_t, uint32_t *, uint32_t *);
using deviceCount_t = uint32_t (*)();
using deviceInfo_t = int32_t (*)(nvid_ctx *, int32_t, int32_t, int32_t);
using deviceInit_t = bool (*)(nvid_ctx *);
using deviceInt_t = int32_t (*)(nvid_ctx *, CudaLib::DeviceProperty);
using deviceName_t = const char * (*)(nvid_ctx *);
using deviceUint_t = uint32_t (*)(nvid_ctx *, CudaLib::DeviceProperty);
using deviceUlong_t = uint64_t (*)(nvid_ctx *, CudaLib::DeviceProperty);
using init_t = void (*)();
using lastError_t = const char * (*)(nvid_ctx *);
using pluginVersion_t = const char * (*)();
using release_t = void (*)(nvid_ctx *);
using rxHash_t = bool (*)(nvid_ctx *, uint32_t, uint64_t, uint32_t *, uint32_t *);
using rxPrepare_t = bool (*)(nvid_ctx *, const void *, size_t, uint32_t);
using setJob_t = bool (*)(nvid_ctx *, const void *, size_t, int32_t);
using version_t = uint32_t (*)(Version);
static alloc_t pAlloc = nullptr;
static cnHash_t pCnHash = nullptr;
static deviceCount_t pDeviceCount = nullptr;
static deviceInfo_t pDeviceInfo = nullptr;
static deviceInit_t pDeviceInit = nullptr;
static deviceInt_t pDeviceInt = nullptr;
static deviceName_t pDeviceName = nullptr;
static deviceUint_t pDeviceUint = nullptr;
static deviceUlong_t pDeviceUlong = nullptr;
static init_t pInit = nullptr;
static lastError_t pLastError = nullptr;
static pluginVersion_t pPluginVersion = nullptr;
static release_t pRelease = nullptr;
static rxHash_t pRxHash = nullptr;
static rxPrepare_t pRxPrepare = nullptr;
static setJob_t pSetJob = nullptr;
static version_t pVersion = nullptr;
#define DLSYM(x) if (uv_dlsym(&cudaLib, k##x, reinterpret_cast<void**>(&p##x)) == -1) { throw std::runtime_error(kSymbolNotFound); }
bool CudaLib::m_initialized = false;
bool CudaLib::m_ready = false;
String CudaLib::m_loader;
} // namespace xmrig
bool xmrig::CudaLib::init(const char *fileName)
{
if (!m_initialized) {
m_loader = fileName == nullptr ? defaultLoader() : fileName;
m_ready = uv_dlopen(m_loader, &cudaLib) == 0 && load();
m_initialized = true;
}
return m_ready;
}
const char *xmrig::CudaLib::lastError() noexcept
{
return uv_dlerror(&cudaLib);
}
void xmrig::CudaLib::close()
{
uv_dlclose(&cudaLib);
}
bool xmrig::CudaLib::cnHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t height, uint64_t target, uint32_t *rescount, uint32_t *resnonce)
{
return pCnHash(ctx, startNonce, height, target, rescount, resnonce);
}
bool xmrig::CudaLib::deviceInit(nvid_ctx *ctx) noexcept
{
return pDeviceInit(ctx);
}
bool xmrig::CudaLib::rxHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t target, uint32_t *rescount, uint32_t *resnonce) noexcept
{
return pRxHash(ctx, startNonce, target, rescount, resnonce);
}
bool xmrig::CudaLib::rxPrepare(nvid_ctx *ctx, const void *dataset, size_t datasetSize, uint32_t batchSize) noexcept
{
return pRxPrepare(ctx, dataset, datasetSize, batchSize);
}
bool xmrig::CudaLib::setJob(nvid_ctx *ctx, const void *data, size_t size, const Algorithm &algorithm) noexcept
{
return pSetJob(ctx, data, size, algorithm);
}
const char *xmrig::CudaLib::deviceName(nvid_ctx *ctx) noexcept
{
return pDeviceName(ctx);
}
const char *xmrig::CudaLib::lastError(nvid_ctx *ctx) noexcept
{
return pLastError(ctx);
}
const char *xmrig::CudaLib::pluginVersion() noexcept
{
return pPluginVersion();
}
int xmrig::CudaLib::deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm) noexcept
{
return pDeviceInfo(ctx, blocks, threads, algorithm);
}
int32_t xmrig::CudaLib::deviceInt(nvid_ctx *ctx, DeviceProperty property) noexcept
{
return pDeviceInt(ctx, property);
}
nvid_ctx *xmrig::CudaLib::alloc(uint32_t id, int32_t bfactor, int32_t bsleep) noexcept
{
return pAlloc(id, bfactor, bsleep);
}
std::vector<xmrig::CudaDevice> xmrig::CudaLib::devices(int32_t bfactor, int32_t bsleep) noexcept
{
const uint32_t count = deviceCount();
if (!count) {
return {};
}
std::vector<CudaDevice> out;
out.reserve(count);
for (uint32_t i = 0; i < count; ++i) {
CudaDevice device(i, bfactor, bsleep);
if (device.isValid()) {
out.emplace_back(std::move(device));
}
}
return out;
}
uint32_t xmrig::CudaLib::deviceCount() noexcept
{
return pDeviceCount();
}
uint32_t xmrig::CudaLib::deviceUint(nvid_ctx *ctx, DeviceProperty property) noexcept
{
return pDeviceUint(ctx, property);
}
uint32_t xmrig::CudaLib::driverVersion() noexcept
{
return pVersion(DriverVersion);
}
uint32_t xmrig::CudaLib::runtimeVersion() noexcept
{
return pVersion(RuntimeVersion);
}
uint64_t xmrig::CudaLib::deviceUlong(nvid_ctx *ctx, DeviceProperty property) noexcept
{
return pDeviceUlong(ctx, property);
}
void xmrig::CudaLib::release(nvid_ctx *ctx) noexcept
{
pRelease(ctx);
}
bool xmrig::CudaLib::load()
{
if (uv_dlsym(&cudaLib, kVersion, reinterpret_cast<void**>(&pVersion)) == -1) {
return false;
}
if (pVersion(ApiVersion) != 1u) {
return false;
}
try {
DLSYM(Alloc);
DLSYM(CnHash);
DLSYM(DeviceCount);
DLSYM(DeviceInfo);
DLSYM(DeviceInit);
DLSYM(DeviceInt);
DLSYM(DeviceName);
DLSYM(DeviceUint);
DLSYM(DeviceUlong);
DLSYM(Init);
DLSYM(LastError);
DLSYM(PluginVersion);
DLSYM(Release);
DLSYM(RxHash);
DLSYM(RxPrepare);
DLSYM(SetJob);
DLSYM(Version);
} catch (std::exception &ex) {
return false;
}
pInit();
return true;
}
const char *xmrig::CudaLib::defaultLoader()
{
# if defined(__APPLE__)
return "/System/Library/Frameworks/OpenCL.framework/OpenCL"; // FIXME
# elif defined(_WIN32)
return "xmrig-cuda.dll";
# else
return "xmrig-cuda.so";
# endif
}

View file

@ -0,0 +1,105 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDALIB_H
#define XMRIG_CUDALIB_H
using nvid_ctx = struct nvid_ctx;
#include "backend/cuda/wrappers/CudaDevice.h"
#include "base/tools/String.h"
#include "crypto/common/Algorithm.h"
#include <vector>
namespace xmrig {
class CudaLib
{
public:
enum DeviceProperty : uint32_t
{
DeviceId,
DeviceAlgorithm,
DeviceArchMajor,
DeviceArchMinor,
DeviceSmx,
DeviceBlocks,
DeviceThreads,
DeviceBFactor,
DeviceBSleep,
DeviceClockRate,
DeviceMemoryClockRate,
DeviceMemoryTotal,
DeviceMemoryFree,
DevicePciBusID,
DevicePciDeviceID,
DevicePciDomainID
};
static bool init(const char *fileName = nullptr);
static const char *lastError() noexcept;
static void close();
static inline bool isInitialized() { return m_initialized; }
static inline const String &loader() { return m_loader; }
static bool cnHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t height, uint64_t target, uint32_t *rescount, uint32_t *resnonce);
static bool deviceInit(nvid_ctx *ctx) noexcept;
static bool rxHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t target, uint32_t *rescount, uint32_t *resnonce) noexcept;
static bool rxPrepare(nvid_ctx *ctx, const void *dataset, size_t datasetSize, uint32_t batchSize) noexcept;
static bool setJob(nvid_ctx *ctx, const void *data, size_t size, const Algorithm &algorithm) noexcept;
static const char *deviceName(nvid_ctx *ctx) noexcept;
static const char *lastError(nvid_ctx *ctx) noexcept;
static const char *pluginVersion() noexcept;
static int deviceInfo(nvid_ctx *ctx, int32_t blocks, int32_t threads, const Algorithm &algorithm) noexcept;
static int32_t deviceInt(nvid_ctx *ctx, DeviceProperty property) noexcept;
static nvid_ctx *alloc(uint32_t id, int32_t bfactor, int32_t bsleep) noexcept;
static std::vector<CudaDevice> devices(int32_t bfactor, int32_t bsleep) noexcept;
static uint32_t deviceCount() noexcept;
static uint32_t deviceUint(nvid_ctx *ctx, DeviceProperty property) noexcept;
static uint32_t driverVersion() noexcept;
static uint32_t runtimeVersion() noexcept;
static uint64_t deviceUlong(nvid_ctx *ctx, DeviceProperty property) noexcept;
static void release(nvid_ctx *ctx) noexcept;
private:
static bool load();
static const char *defaultLoader();
static bool m_initialized;
static bool m_ready;
static String m_loader;
};
} // namespace xmrig
#endif /* XMRIG_CUDALIB_H */

View file

@ -153,7 +153,8 @@ public:
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("#%zu ") WHITE_BOLD("%s") "/" WHITE_BOLD("%s"), "OPENCL", platform.index(), platform.name().data(), platform.version().data());
for (const OclDevice &device : devices) {
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("#%zu") YELLOW(" %s") " %s " WHITE_BOLD("%uMHz") " cu:" WHITE_BOLD("%u") " mem:" CYAN("%zu/%zu") " MB", "OPENCL GPU",
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("#%zu") YELLOW(" %s") " %s " WHITE_BOLD("%u MHz") " cu:" WHITE_BOLD("%u") " mem:" CYAN("%zu/%zu") " MB",
"OPENCL GPU",
device.index(),
device.topology().toString().data(),
device.printableName().data(),
@ -284,7 +285,7 @@ void xmrig::OclBackend::printHashrate(bool details)
Log::print(WHITE_BOLD_S "| OPENCL # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |");
size_t i = 0;
for (const OclLaunchData &data : d_ptr->threads) {
for (const auto &data : d_ptr->threads) {
Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") " %s",
i,
data.affinity,
@ -309,7 +310,7 @@ void xmrig::OclBackend::printHashrate(bool details)
void xmrig::OclBackend::setJob(const Job &job)
{
const OclConfig &cl = d_ptr->controller->config()->cl();
const auto &cl = d_ptr->controller->config()->cl();
if (cl.isEnabled()) {
d_ptr->init(cl);
}
@ -318,7 +319,7 @@ void xmrig::OclBackend::setJob(const Job &job)
return stop();
}
std::vector<OclLaunchData> threads = cl.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->platform, d_ptr->devices, tag);
auto threads = cl.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->platform, d_ptr->devices);
if (!d_ptr->threads.empty() && d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) {
return;
}
@ -408,7 +409,7 @@ rapidjson::Value xmrig::OclBackend::toJSON(rapidjson::Document &doc) const
Value threads(kArrayType);
size_t i = 0;
for (const OclLaunchData &data : d_ptr->threads) {
for (const auto &data : d_ptr->threads) {
Value thread = data.thread.toJSON(doc);
thread.AddMember("affinity", data.affinity, allocator);
thread.AddMember("hashrate", hashrate()->toJSON(i, doc), allocator);

View file

@ -30,6 +30,7 @@
#include "backend/common/interfaces/IBackend.h"
#include "base/tools/Object.h"
namespace xmrig {
@ -43,16 +44,12 @@ class Miner;
class OclBackend : public IBackend
{
public:
OclBackend() = delete;
OclBackend(const OclBackend &other) = delete;
XMRIG_DISABLE_COPY_MOVE_DEFAULT(OclBackend)
OclBackend(Controller *controller);
OclBackend(OclBackend &&other) = delete;
~OclBackend() override;
OclBackend &operator=(const OclBackend &other) = delete;
OclBackend &operator=(OclBackend &&other) = delete;
protected:
bool isEnabled() const override;
bool isEnabled(const Algorithm &algorithm) const override;

View file

@ -24,6 +24,7 @@
#include "backend/opencl/OclConfig.h"
#include "backend/common/Tags.h"
#include "backend/opencl/OclConfig_gen.h"
#include "backend/opencl/wrappers/OclLib.h"
#include "base/io/json/Json.h"
@ -113,10 +114,10 @@ rapidjson::Value xmrig::OclConfig::toJSON(rapidjson::Document &doc) const
}
std::vector<xmrig::OclLaunchData> xmrig::OclConfig::get(const Miner *miner, const Algorithm &algorithm, const OclPlatform &platform, const std::vector<OclDevice> &devices, const char *tag) const
std::vector<xmrig::OclLaunchData> xmrig::OclConfig::get(const Miner *miner, const Algorithm &algorithm, const OclPlatform &platform, const std::vector<OclDevice> &devices) const
{
std::vector<OclLaunchData> out;
const OclThreads &threads = m_threads.get(algorithm);
const auto &threads = m_threads.get(algorithm);
if (threads.isEmpty()) {
return out;
@ -124,9 +125,9 @@ std::vector<xmrig::OclLaunchData> xmrig::OclConfig::get(const Miner *miner, cons
out.reserve(threads.count() * 2);
for (const OclThread &thread : threads.data()) {
for (const auto &thread : threads.data()) {
if (thread.index() >= devices.size()) {
LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), tag, thread.index());
LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), ocl_tag(), thread.index());
continue;
}

View file

@ -42,7 +42,7 @@ public:
OclPlatform platform() const;
rapidjson::Value toJSON(rapidjson::Document &doc) const;
std::vector<OclLaunchData> get(const Miner *miner, const Algorithm &algorithm, const OclPlatform &platform, const std::vector<OclDevice> &devices, const char *tag) const;
std::vector<OclLaunchData> get(const Miner *miner, const Algorithm &algorithm, const OclPlatform &platform, const std::vector<OclDevice> &devices) const;
void read(const rapidjson::Value &value);
inline bool isCacheEnabled() const { return m_cache; }

View file

@ -46,7 +46,7 @@ xmrig::OclThreads::OclThreads(const rapidjson::Value &value)
xmrig::OclThreads::OclThreads(const std::vector<OclDevice> &devices, const Algorithm &algorithm)
{
for (const OclDevice &device : devices) {
for (const auto &device : devices) {
device.generate(algorithm, *this);
}
}

View file

@ -30,6 +30,7 @@
#include "backend/common/Worker.h"
#include "backend/common/WorkerJob.h"
#include "backend/opencl/OclLaunchData.h"
#include "base/tools/Object.h"
#include "net/JobResult.h"
@ -42,16 +43,12 @@ class IOclRunner;
class OclWorker : public Worker
{
public:
OclWorker() = delete;
OclWorker(const OclWorker &other) = delete;
OclWorker(OclWorker &&other) = delete;
XMRIG_DISABLE_COPY_MOVE_DEFAULT(OclWorker)
OclWorker(size_t id, const OclLaunchData &data);
~OclWorker() override;
OclWorker &operator=(const OclWorker &other) = delete;
OclWorker &operator=(OclWorker &&other) = delete;
static std::atomic<bool> ready;
protected:

View file

@ -39,8 +39,8 @@ constexpr size_t oneGiB = 1024 * 1024 * 1024;
xmrig::OclBaseRunner::OclBaseRunner(size_t id, const OclLaunchData &data) :
m_algorithm(data.algorithm),
m_ctx(data.ctx),
m_algorithm(data.algorithm),
m_source(OclSource::get(data.algorithm)),
m_data(data),
m_align(OclLib::getUint(data.device.id(), CL_DEVICE_MEM_BASE_ADDR_ALIGN)),

View file

@ -70,21 +70,21 @@ protected:
void enqueueWriteBuffer(cl_mem buffer, cl_bool blocking_write, size_t offset, size_t size, const void *ptr);
void finalize(uint32_t *hashOutput);
Algorithm m_algorithm;
cl_command_queue m_queue = nullptr;
cl_context m_ctx;
cl_mem m_buffer = nullptr;
cl_mem m_input = nullptr;
cl_mem m_output = nullptr;
cl_program m_program = nullptr;
const Algorithm m_algorithm;
const char *m_source;
const OclLaunchData &m_data;
const size_t m_align;
const size_t m_threadId;
const uint32_t m_intensity;
size_t m_offset = 0;
std::string m_deviceKey;
std::string m_options;
uint32_t m_intensity;
};

View file

@ -26,14 +26,13 @@
#define XMRIG_OCLDEVICE_H
#include <vector>
#include "backend/common/misc/PciTopology.h"
#include "backend/opencl/wrappers/OclVendor.h"
#include "base/tools/String.h"
#include <algorithm>
#include <vector>
using cl_device_id = struct _cl_device_id *;
using cl_platform_id = struct _cl_platform_id *;

View file

@ -77,6 +77,7 @@ static const char *kRetainMemObject = "clRetainMemObject";
static const char *kRetainProgram = "clRetainProgram";
static const char *kSetKernelArg = "clSetKernelArg";
static const char *kSetMemObjectDestructorCallback = "clSetMemObjectDestructorCallback";
static const char *kSymbolNotFound = "symbol not found";
static const char *kUnloadPlatformCompiler = "clUnloadPlatformCompiler";
@ -156,7 +157,7 @@ static setKernelArg_t pSetKernelArg = nu
static setMemObjectDestructorCallback_t pSetMemObjectDestructorCallback = nullptr;
static unloadPlatformCompiler_t pUnloadPlatformCompiler = nullptr;
#define DLSYM(x) if (uv_dlsym(&oclLib, k##x, reinterpret_cast<void**>(&p##x)) == -1) { return false; }
#define DLSYM(x) if (uv_dlsym(&oclLib, k##x, reinterpret_cast<void**>(&p##x)) == -1) { throw std::runtime_error(kSymbolNotFound); }
namespace xmrig {
@ -210,6 +211,7 @@ void xmrig::OclLib::close()
bool xmrig::OclLib::load()
{
try {
DLSYM(CreateCommandQueue);
DLSYM(CreateContext);
DLSYM(BuildProgram);
@ -243,6 +245,9 @@ bool xmrig::OclLib::load()
DLSYM(CreateSubBuffer);
DLSYM(RetainProgram);
DLSYM(RetainMemObject);
} catch (std::exception &ex) {
return false;
}
# if defined(CL_VERSION_2_0)
uv_dlsym(&oclLib, kCreateCommandQueueWithProperties, reinterpret_cast<void**>(&pCreateCommandQueueWithProperties));

View file

@ -82,6 +82,7 @@ private:
#define WHITE_S CSI "0;37m" // another name for LT.GRAY
#define WHITE_BOLD_S CSI "1;37m" // actually white
#define GREEN_BG_BOLD_S CSI "42;1m"
#define BLUE_BG_S CSI "44m"
#define BLUE_BG_BOLD_S CSI "44;1m"
#define MAGENTA_BG_S CSI "45m"
@ -107,6 +108,7 @@ private:
#define WHITE(x) WHITE_S x CLEAR
#define WHITE_BOLD(x) WHITE_BOLD_S x CLEAR
#define GREEN_BG_BOLD(x) GREEN_BG_BOLD_S x CLEAR
#define BLUE_BG(x) BLUE_BG_S x CLEAR
#define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR
#define MAGENTA_BG(x) MAGENTA_BG_S x CLEAR

View file

@ -56,6 +56,11 @@
#endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaBackend.h"
#endif
#ifdef XMRIG_ALGO_RANDOMX
# include "crypto/rx/RxConfig.h"
#endif
@ -270,12 +275,17 @@ xmrig::Miner::Miner(Controller *controller)
d_ptr->timer = new Timer(this);
d_ptr->backends.reserve(3);
d_ptr->backends.push_back(new CpuBackend(controller));
# ifdef XMRIG_FEATURE_OPENCL
d_ptr->backends.push_back(new OclBackend(controller));
# endif
# ifdef XMRIG_FEATURE_CUDA
d_ptr->backends.push_back(new CudaBackend(controller));
# endif
d_ptr->rebuild();
}

View file

@ -48,6 +48,11 @@
#endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaConfig.h"
#endif
namespace xmrig {
static const char *kCPU = "cpu";
@ -60,6 +65,10 @@ static const char *kRandomX = "randomx";
static const char *kOcl = "opencl";
#endif
#ifdef XMRIG_FEATURE_CUDA
static const char *kCuda = "cuda";
#endif
class ConfigPrivate
{
@ -73,6 +82,10 @@ public:
# ifdef XMRIG_FEATURE_OPENCL
OclConfig cl;
# endif
# ifdef XMRIG_FEATURE_CUDA
CudaConfig cuda;
# endif
};
}
@ -104,6 +117,14 @@ const xmrig::OclConfig &xmrig::Config::cl() const
#endif
#ifdef XMRIG_FEATURE_CUDA
const xmrig::CudaConfig &xmrig::Config::cuda() const
{
return d_ptr->cuda;
}
#endif
#ifdef XMRIG_ALGO_RANDOMX
const xmrig::RxConfig &xmrig::Config::rx() const
{
@ -124,6 +145,12 @@ bool xmrig::Config::isShouldSave() const
}
# endif
# ifdef XMRIG_FEATURE_CUDA
if (cuda().isShouldSave()) {
return true;
}
# endif
return (m_upgrade || cpu().isShouldSave());
}
@ -146,6 +173,10 @@ bool xmrig::Config::read(const IJsonReader &reader, const char *fileName)
d_ptr->cl.read(reader.getValue(kOcl));
# endif
# ifdef XMRIG_FEATURE_CUDA
d_ptr->cuda.read(reader.getValue(kCuda));
# endif
return true;
}
@ -178,6 +209,10 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const
doc.AddMember(StringRef(kOcl), cl().toJSON(doc), allocator);
# endif
# ifdef XMRIG_FEATURE_CUDA
doc.AddMember(StringRef(kCuda), cuda().toJSON(doc), allocator);
# endif
doc.AddMember("donate-level", m_pools.donateLevel(), allocator);
doc.AddMember("donate-over-proxy", m_pools.proxyDonate(), allocator);
doc.AddMember("log-file", m_logFile.toJSON(), allocator);

View file

@ -39,9 +39,10 @@ namespace xmrig {
class ConfigPrivate;
class CudaConfig;
class IThread;
class RxConfig;
class OclConfig;
class RxConfig;
class Config : public BaseConfig
@ -58,6 +59,10 @@ public:
const OclConfig &cl() const;
# endif
# ifdef XMRIG_FEATURE_CUDA
const CudaConfig &cuda() const;
# endif
# ifdef XMRIG_ALGO_RANDOMX
const RxConfig &rx() const;
# endif

View file

@ -49,7 +49,7 @@ public:
constexpr inline bool isR() const { return ALGO == Algorithm::CN_R; }
constexpr inline size_t memory() const { static_assert(ALGO > Algorithm::INVALID && ALGO < Algorithm::RX_0, "invalid CRYPTONIGHT algorithm"); return CN_MEMORY; }
constexpr inline uint32_t iterations() const { static_assert(ALGO > Algorithm::INVALID && ALGO < Algorithm::RX_0, "invalid CRYPTONIGHT algorithm"); return CN_ITER; }
constexpr inline uint32_t mask() const { return ((memory() - 1) / 16) * 16; }
constexpr inline uint32_t mask() const { return static_cast<uint32_t>(((memory() - 1) / 16) * 16); }
inline static size_t memory(Algorithm::Id algo)
{