Added CudaWorker and CudaLaunchData.

This commit is contained in:
XMRig 2019-10-26 17:37:54 +07:00
parent d4a3024996
commit bb2cc0deb7
14 changed files with 567 additions and 28 deletions

View file

@ -37,6 +37,11 @@
#endif #endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaWorker.h"
#endif
namespace xmrig { namespace xmrig {
@ -217,4 +222,16 @@ template class Workers<OclLaunchData>;
#endif #endif
#ifdef XMRIG_FEATURE_CUDA
template<>
xmrig::IWorker *xmrig::Workers<CudaLaunchData>::create(Thread<CudaLaunchData> *handle)
{
return new CudaWorker(handle->id(), handle->config());
}
template class Workers<CudaLaunchData>;
#endif
} // namespace xmrig } // namespace xmrig

View file

@ -37,6 +37,11 @@
#endif #endif
#ifdef XMRIG_FEATURE_CUDA
# include "backend/cuda/CudaLaunchData.h"
#endif
namespace xmrig { namespace xmrig {
@ -80,6 +85,13 @@ extern template class Workers<OclLaunchData>;
#endif #endif
#ifdef XMRIG_FEATURE_CUDA
template<>
IWorker *Workers<CudaLaunchData>::create(Thread<CudaLaunchData> *handle);
extern template class Workers<CudaLaunchData>;
#endif
} // namespace xmrig } // namespace xmrig

View file

@ -34,6 +34,7 @@
#include "backend/common/Workers.h" #include "backend/common/Workers.h"
#include "backend/cuda/CudaConfig.h" #include "backend/cuda/CudaConfig.h"
#include "backend/cuda/CudaThreads.h" #include "backend/cuda/CudaThreads.h"
#include "backend/cuda/CudaWorker.h"
#include "backend/cuda/wrappers/CudaDevice.h" #include "backend/cuda/wrappers/CudaDevice.h"
#include "backend/cuda/wrappers/CudaLib.h" #include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/log/Log.h" #include "base/io/log/Log.h"
@ -57,7 +58,7 @@ extern template class Threads<CudaThreads>;
constexpr const size_t oneMiB = 1024u * 1024u; constexpr const size_t oneMiB = 1024u * 1024u;
static const char *tag = MAGENTA_BG_BOLD(WHITE_BOLD_S " nv "); static const char *tag = GREEN_BG_BOLD(WHITE_BOLD_S " nv ");
static const String kType = "cuda"; static const String kType = "cuda";
static std::mutex mutex; static std::mutex mutex;
@ -69,6 +70,51 @@ static void printDisabled(const char *reason)
} }
struct CudaLaunchStatus
{
public:
inline size_t threads() const { return m_threads; }
inline bool started(bool ready)
{
ready ? m_started++ : m_errors++;
return (m_started + m_errors) == m_threads;
}
inline void start(size_t threads)
{
m_started = 0;
m_errors = 0;
m_threads = threads;
m_ts = Chrono::steadyMSecs();
CudaWorker::ready = false;
}
inline void print() const
{
if (m_started == 0) {
LOG_ERR("%s " RED_BOLD("disabled") YELLOW(" (failed to start threads)"), tag);
return;
}
LOG_INFO("%s" GREEN_BOLD(" READY") " threads " "%s%zu/%zu" BLACK_BOLD(" (%" PRIu64 " ms)"),
tag,
m_errors == 0 ? CYAN_BOLD_S : YELLOW_BOLD_S,
m_started,
m_threads,
Chrono::steadyMSecs() - m_ts
);
}
private:
size_t m_errors = 0;
size_t m_started = 0;
size_t m_threads = 0;
uint64_t m_ts = 0;
};
class CudaBackendPrivate class CudaBackendPrivate
{ {
@ -125,13 +171,46 @@ public:
inline void start(const Job &job) inline void start(const Job &job)
{ {
LOG_INFO("%s use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"),
tag,
profileName.data(),
threads.size(),
algo.l3() / 1024
);
Log::print(WHITE_BOLD("| # | GPU | BUS ID | I | T | B | BF | BS | MEM | NAME"));
size_t i = 0;
for (const auto &data : threads) {
Log::print("|" CYAN_BOLD("%3zu") " |" CYAN_BOLD("%4u") " |" YELLOW(" %7s") " |" CYAN_BOLD("%5d") " |" CYAN_BOLD("%4d") " |"
CYAN_BOLD("%4d") " |" CYAN_BOLD("%3d") " |" CYAN_BOLD("%4d") " |" CYAN("%5zu") " | " GREEN("%s"),
i,
data.thread.index(),
data.device.topology().toString().data(),
data.thread.threads() * data.thread.blocks(),
data.thread.threads(),
data.thread.blocks(),
data.thread.bfactor(),
data.thread.bsleep(),
(data.thread.threads() * data.thread.blocks()) * algo.l3() / oneMiB,
data.device.name().data()
);
i++;
}
status.start(threads.size());
workers.start(threads);
} }
Algorithm algo; Algorithm algo;
Controller *controller; Controller *controller;
CudaLaunchStatus status;
std::vector<CudaDevice> devices; std::vector<CudaDevice> devices;
std::vector<CudaLaunchData> threads;
String profileName; String profileName;
Workers<CudaLaunchData> workers;
}; };
@ -147,6 +226,7 @@ const char *xmrig::cuda_tag()
xmrig::CudaBackend::CudaBackend(Controller *controller) : xmrig::CudaBackend::CudaBackend(Controller *controller) :
d_ptr(new CudaBackendPrivate(controller)) d_ptr(new CudaBackendPrivate(controller))
{ {
d_ptr->workers.setBackend(this);
} }
@ -172,8 +252,7 @@ bool xmrig::CudaBackend::isEnabled(const Algorithm &algorithm) const
const xmrig::Hashrate *xmrig::CudaBackend::hashrate() const const xmrig::Hashrate *xmrig::CudaBackend::hashrate() const
{ {
return nullptr; return d_ptr->workers.hashrate();
// return d_ptr->workers.hashrate();
} }
@ -204,21 +283,21 @@ void xmrig::CudaBackend::printHashrate(bool details)
Log::print(WHITE_BOLD_S "| CUDA # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); Log::print(WHITE_BOLD_S "| CUDA # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |");
// size_t i = 0; size_t i = 0;
// for (const OclLaunchData &data : d_ptr->threads) { for (const auto &data : d_ptr->threads) {
// Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") " %s", Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") " %s",
// i, i,
// data.affinity, data.thread.affinity(),
// Hashrate::format(hashrate()->calc(i, Hashrate::ShortInterval), num, sizeof num / 3), Hashrate::format(hashrate()->calc(i, Hashrate::ShortInterval), num, sizeof num / 3),
// Hashrate::format(hashrate()->calc(i, Hashrate::MediumInterval), num + 8, sizeof num / 3), Hashrate::format(hashrate()->calc(i, Hashrate::MediumInterval), num + 8, sizeof num / 3),
// Hashrate::format(hashrate()->calc(i, Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3), Hashrate::format(hashrate()->calc(i, Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3),
// data.device.index(), data.device.index(),
// data.device.topology().toString().data(), data.device.topology().toString().data(),
// data.device.printableName().data() data.device.name().data()
// ); );
// i++; i++;
// } }
Log::print(WHITE_BOLD_S "| - | - | %7s | %7s | %7s |", Log::print(WHITE_BOLD_S "| - | - | %7s | %7s | %7s |",
Hashrate::format(hashrate()->calc(Hashrate::ShortInterval), num, sizeof num / 3), Hashrate::format(hashrate()->calc(Hashrate::ShortInterval), num, sizeof num / 3),
@ -230,21 +309,72 @@ void xmrig::CudaBackend::printHashrate(bool details)
void xmrig::CudaBackend::setJob(const Job &job) void xmrig::CudaBackend::setJob(const Job &job)
{ {
const auto &cuda = d_ptr->controller->config()->cuda();
if (cuda.isEnabled()) {
d_ptr->init(cuda);
}
if (!isEnabled()) {
return stop();
}
auto threads = cuda.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->devices);
if (!d_ptr->threads.empty() && d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) {
return;
}
d_ptr->algo = job.algorithm();
d_ptr->profileName = cuda.threads().profileName(job.algorithm());
if (d_ptr->profileName.isNull() || threads.empty()) {
LOG_WARN("%s " RED_BOLD("disabled") YELLOW(" (no suitable configuration found)"), tag);
return stop();
}
stop();
d_ptr->threads = std::move(threads);
d_ptr->start(job);
} }
void xmrig::CudaBackend::start(IWorker *worker, bool ready) void xmrig::CudaBackend::start(IWorker *worker, bool ready)
{ {
mutex.lock();
if (d_ptr->status.started(ready)) {
d_ptr->status.print();
CudaWorker::ready = true;
}
mutex.unlock();
if (ready) {
worker->start();
}
} }
void xmrig::CudaBackend::stop() void xmrig::CudaBackend::stop()
{ {
if (d_ptr->threads.empty()) {
return;
}
const uint64_t ts = Chrono::steadyMSecs();
d_ptr->workers.stop();
d_ptr->threads.clear();
LOG_INFO("%s" YELLOW(" stopped") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts);
} }
void xmrig::CudaBackend::tick(uint64_t ticks) void xmrig::CudaBackend::tick(uint64_t ticks)
{ {
d_ptr->workers.tick(ticks);
} }

View file

@ -24,6 +24,7 @@
#include "backend/cuda/CudaConfig.h" #include "backend/cuda/CudaConfig.h"
#include "backend/common/Tags.h"
#include "backend/cuda/CudaConfig_gen.h" #include "backend/cuda/CudaConfig_gen.h"
#include "backend/cuda/wrappers/CudaLib.h" #include "backend/cuda/wrappers/CudaLib.h"
#include "base/io/json/Json.h" #include "base/io/json/Json.h"
@ -61,6 +62,30 @@ rapidjson::Value xmrig::CudaConfig::toJSON(rapidjson::Document &doc) const
} }
std::vector<xmrig::CudaLaunchData> xmrig::CudaConfig::get(const Miner *miner, const Algorithm &algorithm, const std::vector<CudaDevice> &devices) const
{
std::vector<CudaLaunchData> out;
const auto &threads = m_threads.get(algorithm);
if (threads.isEmpty()) {
return out;
}
out.reserve(threads.count() * 2);
for (const auto &thread : threads.data()) {
if (thread.index() >= devices.size()) {
LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), cuda_tag(), thread.index());
continue;
}
out.emplace_back(miner, algorithm, thread, devices[thread.index()]);
}
return out;
}
void xmrig::CudaConfig::read(const rapidjson::Value &value) void xmrig::CudaConfig::read(const rapidjson::Value &value)
{ {
if (value.IsObject()) { if (value.IsObject()) {

View file

@ -26,6 +26,7 @@
#define XMRIG_CUDACONFIG_H #define XMRIG_CUDACONFIG_H
#include "backend/cuda/CudaLaunchData.h"
#include "backend/common/Threads.h" #include "backend/common/Threads.h"
#include "backend/cuda/CudaThreads.h" #include "backend/cuda/CudaThreads.h"
@ -39,6 +40,7 @@ public:
CudaConfig() = default; CudaConfig() = default;
rapidjson::Value toJSON(rapidjson::Document &doc) const; rapidjson::Value toJSON(rapidjson::Document &doc) const;
std::vector<CudaLaunchData> get(const Miner *miner, const Algorithm &algorithm, const std::vector<CudaDevice> &devices) const;
void read(const rapidjson::Value &value); void read(const rapidjson::Value &value);
inline bool isEnabled() const { return m_enabled; } inline bool isEnabled() const { return m_enabled; }

View file

@ -0,0 +1,50 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaLaunchData.h"
#include "backend/common/Tags.h"
xmrig::CudaLaunchData::CudaLaunchData(const Miner *miner, const Algorithm &algorithm, const CudaThread &thread, const CudaDevice &device) :
algorithm(algorithm),
miner(miner),
device(device),
thread(thread)
{
}
bool xmrig::CudaLaunchData::isEqual(const CudaLaunchData &other) const
{
return (other.algorithm == algorithm &&
other.thread == thread);
}
const char *xmrig::CudaLaunchData::tag()
{
return cuda_tag();
}

View file

@ -0,0 +1,66 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDALAUNCHDATA_H
#define XMRIG_CUDALAUNCHDATA_H
#include "backend/cuda/CudaThread.h"
#include "crypto/common/Algorithm.h"
#include "crypto/common/Nonce.h"
namespace xmrig {
class CudaDevice;
class Miner;
class CudaLaunchData
{
public:
CudaLaunchData(const Miner *miner, const Algorithm &algorithm, const CudaThread &thread, const CudaDevice &device);
bool isEqual(const CudaLaunchData &other) const;
inline constexpr static Nonce::Backend backend() { return Nonce::CUDA; }
inline bool operator!=(const CudaLaunchData &other) const { return !isEqual(other); }
inline bool operator==(const CudaLaunchData &other) const { return isEqual(other); }
static const char *tag();
const Algorithm algorithm;
const Miner *miner;
const CudaDevice &device;
const CudaThread thread;
};
} // namespace xmrig
#endif /* XMRIG_OCLLAUNCHDATA_H */

View file

@ -0,0 +1,164 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/cuda/CudaWorker.h"
#include "backend/common/Tags.h"
#include "base/io/log/Log.h"
#include "base/tools/Chrono.h"
#include "core/Miner.h"
#include "crypto/common/Nonce.h"
#include "net/JobResults.h"
#include <cassert>
#include <thread>
namespace xmrig {
static constexpr uint32_t kReserveCount = 32768;
std::atomic<bool> CudaWorker::ready;
static inline bool isReady() { return !Nonce::isPaused() && CudaWorker::ready; }
static inline uint32_t roundSize(uint32_t intensity) { return kReserveCount / intensity + 1; }
static inline void printError(size_t id, const char *error)
{
LOG_ERR("%s" RED_S " thread " RED_BOLD("#%zu") RED_S " failed with error " RED_BOLD("%s"), cuda_tag(), id, error);
}
} // namespace xmrig
xmrig::CudaWorker::CudaWorker(size_t id, const CudaLaunchData &data) :
Worker(id, data.thread.affinity(), -1),
m_algorithm(data.algorithm),
m_miner(data.miner),
m_intensity(data.thread.threads() * data.thread.blocks())
{
}
xmrig::CudaWorker::~CudaWorker()
{
// delete m_runner;
}
bool xmrig::CudaWorker::selfTest()
{
return false; // FIXME
}
size_t xmrig::CudaWorker::intensity() const
{
return 0; // FIXME;
// return m_runner ? m_runner->intensity() : 0;
}
void xmrig::CudaWorker::start()
{
while (Nonce::sequence(Nonce::CUDA) > 0) {
if (!isReady()) {
do {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
}
while (!isReady() && Nonce::sequence(Nonce::CUDA) > 0);
if (Nonce::sequence(Nonce::CUDA) == 0) {
break;
}
if (!consumeJob()) {
return;
}
}
while (!Nonce::isOutdated(Nonce::CUDA, m_job.sequence())) {
// try {
// m_runner->run(*m_job.nonce(), results);
// }
// catch (std::exception &ex) {
// printError(id(), ex.what());
// return;
// }
// if (results[0xFF] > 0) {
// JobResults::submit(m_job.currentJob(), results, results[0xFF]);
// }
m_job.nextRound(roundSize(m_intensity), m_intensity);
storeStats();
std::this_thread::yield();
}
if (!consumeJob()) {
return;
}
}
}
bool xmrig::CudaWorker::consumeJob()
{
if (Nonce::sequence(Nonce::CUDA) == 0) {
return false;
}
m_job.add(m_miner->job(), Nonce::sequence(Nonce::CUDA), roundSize(m_intensity) * m_intensity);
// try {
// m_runner->set(m_job.currentJob(), m_job.blob());
// }
// catch (std::exception &ex) {
// printError(id(), ex.what());
// return false;
// }
return true;
}
void xmrig::CudaWorker::storeStats()
{
if (!isReady()) {
return;
}
m_count += m_intensity;
Worker::storeStats();
}

View file

@ -0,0 +1,70 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CUDAWORKER_H
#define XMRIG_CUDAWORKER_H
#include "backend/common/Worker.h"
#include "backend/common/WorkerJob.h"
#include "backend/cuda/CudaLaunchData.h"
#include "base/tools/Object.h"
#include "net/JobResult.h"
namespace xmrig {
class CudaWorker : public Worker
{
public:
XMRIG_DISABLE_COPY_MOVE_DEFAULT(CudaWorker)
CudaWorker(size_t id, const CudaLaunchData &data);
~CudaWorker() override;
static std::atomic<bool> ready;
protected:
bool selfTest() override;
size_t intensity() const override;
void start() override;
private:
bool consumeJob();
void storeStats();
const Algorithm m_algorithm;
const Miner *m_miner;
const uint32_t m_intensity;
WorkerJob<1> m_job;
};
} // namespace xmrig
#endif /* XMRIG_CUDAWORKER_H */

View file

@ -5,8 +5,10 @@ if (WITH_CUDA)
src/backend/cuda/CudaBackend.h src/backend/cuda/CudaBackend.h
src/backend/cuda/CudaConfig_gen.h src/backend/cuda/CudaConfig_gen.h
src/backend/cuda/CudaConfig.h src/backend/cuda/CudaConfig.h
src/backend/cuda/CudaLaunchData.h
src/backend/cuda/CudaThread.h src/backend/cuda/CudaThread.h
src/backend/cuda/CudaThreads.h src/backend/cuda/CudaThreads.h
src/backend/cuda/CudaWorker.h
src/backend/cuda/wrappers/CudaDevice.h src/backend/cuda/wrappers/CudaDevice.h
src/backend/cuda/wrappers/CudaLib.h src/backend/cuda/wrappers/CudaLib.h
) )
@ -14,8 +16,10 @@ if (WITH_CUDA)
set(SOURCES_BACKEND_CUDA set(SOURCES_BACKEND_CUDA
src/backend/cuda/CudaBackend.cpp src/backend/cuda/CudaBackend.cpp
src/backend/cuda/CudaConfig.cpp src/backend/cuda/CudaConfig.cpp
src/backend/cuda/CudaLaunchData.cpp
src/backend/cuda/CudaThread.cpp src/backend/cuda/CudaThread.cpp
src/backend/cuda/CudaThreads.cpp src/backend/cuda/CudaThreads.cpp
src/backend/cuda/CudaWorker.cpp
src/backend/cuda/wrappers/CudaDevice.cpp src/backend/cuda/wrappers/CudaDevice.cpp
src/backend/cuda/wrappers/CudaLib.cpp src/backend/cuda/wrappers/CudaLib.cpp
) )

View file

@ -285,7 +285,7 @@ void xmrig::OclBackend::printHashrate(bool details)
Log::print(WHITE_BOLD_S "| OPENCL # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); Log::print(WHITE_BOLD_S "| OPENCL # | AFFINITY | 10s H/s | 60s H/s | 15m H/s |");
size_t i = 0; size_t i = 0;
for (const OclLaunchData &data : d_ptr->threads) { for (const auto &data : d_ptr->threads) {
Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") " %s", Log::print("| %8zu | %8" PRId64 " | %7s | %7s | %7s |" CYAN_BOLD(" #%u") YELLOW(" %s") " %s",
i, i,
data.affinity, data.affinity,
@ -310,7 +310,7 @@ void xmrig::OclBackend::printHashrate(bool details)
void xmrig::OclBackend::setJob(const Job &job) void xmrig::OclBackend::setJob(const Job &job)
{ {
const OclConfig &cl = d_ptr->controller->config()->cl(); const auto &cl = d_ptr->controller->config()->cl();
if (cl.isEnabled()) { if (cl.isEnabled()) {
d_ptr->init(cl); d_ptr->init(cl);
} }
@ -319,7 +319,7 @@ void xmrig::OclBackend::setJob(const Job &job)
return stop(); return stop();
} }
std::vector<OclLaunchData> threads = cl.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->platform, d_ptr->devices); auto threads = cl.get(d_ptr->controller->miner(), job.algorithm(), d_ptr->platform, d_ptr->devices);
if (!d_ptr->threads.empty() && d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) { if (!d_ptr->threads.empty() && d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) {
return; return;
} }

View file

@ -117,7 +117,7 @@ rapidjson::Value xmrig::OclConfig::toJSON(rapidjson::Document &doc) const
std::vector<xmrig::OclLaunchData> xmrig::OclConfig::get(const Miner *miner, const Algorithm &algorithm, const OclPlatform &platform, const std::vector<OclDevice> &devices) const std::vector<xmrig::OclLaunchData> xmrig::OclConfig::get(const Miner *miner, const Algorithm &algorithm, const OclPlatform &platform, const std::vector<OclDevice> &devices) const
{ {
std::vector<OclLaunchData> out; std::vector<OclLaunchData> out;
const OclThreads &threads = m_threads.get(algorithm); const auto &threads = m_threads.get(algorithm);
if (threads.isEmpty()) { if (threads.isEmpty()) {
return out; return out;
@ -125,7 +125,7 @@ std::vector<xmrig::OclLaunchData> xmrig::OclConfig::get(const Miner *miner, cons
out.reserve(threads.count() * 2); out.reserve(threads.count() * 2);
for (const OclThread &thread : threads.data()) { for (const auto &thread : threads.data()) {
if (thread.index() >= devices.size()) { if (thread.index() >= devices.size()) {
LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), ocl_tag(), thread.index()); LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), ocl_tag(), thread.index());
continue; continue;

View file

@ -30,6 +30,7 @@
#include "backend/common/Worker.h" #include "backend/common/Worker.h"
#include "backend/common/WorkerJob.h" #include "backend/common/WorkerJob.h"
#include "backend/opencl/OclLaunchData.h" #include "backend/opencl/OclLaunchData.h"
#include "base/tools/Object.h"
#include "net/JobResult.h" #include "net/JobResult.h"
@ -42,16 +43,12 @@ class IOclRunner;
class OclWorker : public Worker class OclWorker : public Worker
{ {
public: public:
OclWorker() = delete; XMRIG_DISABLE_COPY_MOVE_DEFAULT(OclWorker)
OclWorker(const OclWorker &other) = delete;
OclWorker(OclWorker &&other) = delete;
OclWorker(size_t id, const OclLaunchData &data); OclWorker(size_t id, const OclLaunchData &data);
~OclWorker() override; ~OclWorker() override;
OclWorker &operator=(const OclWorker &other) = delete;
OclWorker &operator=(OclWorker &&other) = delete;
static std::atomic<bool> ready; static std::atomic<bool> ready;
protected: protected:

View file

@ -82,6 +82,7 @@ private:
#define WHITE_S CSI "0;37m" // another name for LT.GRAY #define WHITE_S CSI "0;37m" // another name for LT.GRAY
#define WHITE_BOLD_S CSI "1;37m" // actually white #define WHITE_BOLD_S CSI "1;37m" // actually white
#define GREEN_BG_BOLD_S CSI "42;1m"
#define BLUE_BG_S CSI "44m" #define BLUE_BG_S CSI "44m"
#define BLUE_BG_BOLD_S CSI "44;1m" #define BLUE_BG_BOLD_S CSI "44;1m"
#define MAGENTA_BG_S CSI "45m" #define MAGENTA_BG_S CSI "45m"
@ -107,6 +108,7 @@ private:
#define WHITE(x) WHITE_S x CLEAR #define WHITE(x) WHITE_S x CLEAR
#define WHITE_BOLD(x) WHITE_BOLD_S x CLEAR #define WHITE_BOLD(x) WHITE_BOLD_S x CLEAR
#define GREEN_BG_BOLD(x) GREEN_BG_BOLD_S x CLEAR
#define BLUE_BG(x) BLUE_BG_S x CLEAR #define BLUE_BG(x) BLUE_BG_S x CLEAR
#define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR #define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR
#define MAGENTA_BG(x) MAGENTA_BG_S x CLEAR #define MAGENTA_BG(x) MAGENTA_BG_S x CLEAR