mirror of
https://github.com/xmrig/xmrig.git
synced 2025-01-10 21:04:37 +00:00
#438 Fixed memory release.
This commit is contained in:
parent
a01b4d0566
commit
41abe17286
4 changed files with 16 additions and 16 deletions
|
@ -37,6 +37,7 @@ int Mem::m_algo = 0;
|
||||||
int Mem::m_flags = 0;
|
int Mem::m_flags = 0;
|
||||||
int Mem::m_threads = 0;
|
int Mem::m_threads = 0;
|
||||||
size_t Mem::m_offset = 0;
|
size_t Mem::m_offset = 0;
|
||||||
|
size_t Mem::m_size = 0;
|
||||||
uint8_t *Mem::m_memory = nullptr;
|
uint8_t *Mem::m_memory = nullptr;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,7 @@ private:
|
||||||
static int m_flags;
|
static int m_flags;
|
||||||
static int m_threads;
|
static int m_threads;
|
||||||
static size_t m_offset;
|
static size_t m_offset;
|
||||||
|
static size_t m_size;
|
||||||
VAR_ALIGN(16, static uint8_t *m_memory);
|
VAR_ALIGN(16, static uint8_t *m_memory);
|
||||||
|
|
||||||
# ifndef XMRIG_NO_AEON
|
# ifndef XMRIG_NO_AEON
|
||||||
|
|
|
@ -47,8 +47,8 @@ bool Mem::allocate(int algo, int threads, bool doubleHash, bool enabled)
|
||||||
m_threads = threads;
|
m_threads = threads;
|
||||||
m_doubleHash = doubleHash;
|
m_doubleHash = doubleHash;
|
||||||
|
|
||||||
const int ratio = (doubleHash && algo != xmrig::ALGO_CRYPTONIGHT_LITE) ? 2 : 1;
|
const int ratio = (doubleHash && algo != xmrig::ALGO_CRYPTONIGHT_LITE) ? 2 : 1;
|
||||||
const size_t size = MONERO_MEMORY * (threads * ratio + 1);
|
m_size = MONERO_MEMORY * (threads * ratio + 1);
|
||||||
|
|
||||||
if (!enabled) {
|
if (!enabled) {
|
||||||
m_memory = static_cast<uint8_t*>(_mm_malloc(size, 16));
|
m_memory = static_cast<uint8_t*>(_mm_malloc(size, 16));
|
||||||
|
@ -58,24 +58,24 @@ bool Mem::allocate(int algo, int threads, bool doubleHash, bool enabled)
|
||||||
m_flags |= HugepagesAvailable;
|
m_flags |= HugepagesAvailable;
|
||||||
|
|
||||||
# if defined(__APPLE__)
|
# if defined(__APPLE__)
|
||||||
m_memory = static_cast<uint8_t*>(mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0));
|
m_memory = static_cast<uint8_t*>(mmap(0, m_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0));
|
||||||
# elif defined(__FreeBSD__)
|
# elif defined(__FreeBSD__)
|
||||||
m_memory = static_cast<uint8_t*>(mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER | MAP_PREFAULT_READ, -1, 0));
|
m_memory = static_cast<uint8_t*>(mmap(0, m_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER | MAP_PREFAULT_READ, -1, 0));
|
||||||
# else
|
# else
|
||||||
m_memory = static_cast<uint8_t*>(mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, 0, 0));
|
m_memory = static_cast<uint8_t*>(mmap(0, m_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, 0, 0));
|
||||||
# endif
|
# endif
|
||||||
if (m_memory == MAP_FAILED) {
|
if (m_memory == MAP_FAILED) {
|
||||||
m_memory = static_cast<uint8_t*>(_mm_malloc(size, 16));
|
m_memory = static_cast<uint8_t*>(_mm_malloc(m_size, 16));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_flags |= HugepagesEnabled;
|
m_flags |= HugepagesEnabled;
|
||||||
|
|
||||||
if (madvise(m_memory, size, MADV_RANDOM | MADV_WILLNEED) != 0) {
|
if (madvise(m_memory, m_size, MADV_RANDOM | MADV_WILLNEED) != 0) {
|
||||||
LOG_ERR("madvise failed");
|
LOG_ERR("madvise failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mlock(m_memory, size) == 0) {
|
if (mlock(m_memory, m_size) == 0) {
|
||||||
m_flags |= Lock;
|
m_flags |= Lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,14 +85,12 @@ bool Mem::allocate(int algo, int threads, bool doubleHash, bool enabled)
|
||||||
|
|
||||||
void Mem::release()
|
void Mem::release()
|
||||||
{
|
{
|
||||||
const int size = MONERO_MEMORY * (m_threads + 1);
|
|
||||||
|
|
||||||
if (m_flags & HugepagesEnabled) {
|
if (m_flags & HugepagesEnabled) {
|
||||||
if (m_flags & Lock) {
|
if (m_flags & Lock) {
|
||||||
munlock(m_memory, size);
|
munlock(m_memory, m_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
munmap(m_memory, size);
|
munmap(m_memory, m_size);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
_mm_free(m_memory);
|
_mm_free(m_memory);
|
||||||
|
|
|
@ -153,10 +153,10 @@ bool Mem::allocate(int algo, int threads, bool doubleHash, bool enabled)
|
||||||
m_doubleHash = doubleHash;
|
m_doubleHash = doubleHash;
|
||||||
|
|
||||||
const int ratio = (doubleHash && algo != xmrig::ALGO_CRYPTONIGHT_LITE) ? 2 : 1;
|
const int ratio = (doubleHash && algo != xmrig::ALGO_CRYPTONIGHT_LITE) ? 2 : 1;
|
||||||
const size_t size = MONERO_MEMORY * (threads * ratio + 1);
|
m_size = MONERO_MEMORY * (threads * ratio + 1);
|
||||||
|
|
||||||
if (!enabled) {
|
if (!enabled) {
|
||||||
m_memory = static_cast<uint8_t*>(_mm_malloc(size, 16));
|
m_memory = static_cast<uint8_t*>(_mm_malloc(m_size, 16));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,9 +164,9 @@ bool Mem::allocate(int algo, int threads, bool doubleHash, bool enabled)
|
||||||
m_flags |= HugepagesAvailable;
|
m_flags |= HugepagesAvailable;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_memory = static_cast<uint8_t*>(VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_READWRITE));
|
m_memory = static_cast<uint8_t*>(VirtualAlloc(NULL, m_size, MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_READWRITE));
|
||||||
if (!m_memory) {
|
if (!m_memory) {
|
||||||
m_memory = static_cast<uint8_t*>(_mm_malloc(size, 16));
|
m_memory = static_cast<uint8_t*>(_mm_malloc(m_size, 16));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
m_flags |= HugepagesEnabled;
|
m_flags |= HugepagesEnabled;
|
||||||
|
|
Loading…
Reference in a new issue