mirror of
https://github.com/monero-project/monero.git
synced 2024-12-23 12:09:54 +00:00
Merge pull request #5192
d0e07b3d
performance_tests: fix NetBSD build (moneromooo-monero)7d88d8f2
discontinue use of alloca (moneromooo-monero)
This commit is contained in:
commit
c3de019f56
3 changed files with 6 additions and 16 deletions
|
@ -91,7 +91,7 @@ int spawn(const char *filename, const std::vector<std::string>& args, bool wait)
|
||||||
MINFO("Child exited with " << exitCode);
|
MINFO("Child exited with " << exitCode);
|
||||||
return static_cast<int>(exitCode);
|
return static_cast<int>(exitCode);
|
||||||
#else
|
#else
|
||||||
char **argv = (char**)alloca(sizeof(char*) * (args.size() + 1));
|
std::vector<char*> argv(args.size() + 1);
|
||||||
for (size_t n = 0; n < args.size(); ++n)
|
for (size_t n = 0; n < args.size(); ++n)
|
||||||
argv[n] = (char*)args[n].c_str();
|
argv[n] = (char*)args[n].c_str();
|
||||||
argv[args.size()] = NULL;
|
argv[args.size()] = NULL;
|
||||||
|
@ -109,7 +109,7 @@ int spawn(const char *filename, const std::vector<std::string>& args, bool wait)
|
||||||
tools::closefrom(3);
|
tools::closefrom(3);
|
||||||
close(0);
|
close(0);
|
||||||
char *envp[] = {NULL};
|
char *envp[] = {NULL};
|
||||||
execve(filename, argv, envp);
|
execve(filename, argv.data(), envp);
|
||||||
MERROR("Failed to execve: " << strerror(errno));
|
MERROR("Failed to execve: " << strerror(errno));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,15 +34,6 @@
|
||||||
|
|
||||||
#include "hash-ops.h"
|
#include "hash-ops.h"
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#include <malloc.h>
|
|
||||||
#elif !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__DragonFly__) \
|
|
||||||
&& !defined(__NetBSD__)
|
|
||||||
#include <alloca.h>
|
|
||||||
#else
|
|
||||||
#include <stdlib.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/***
|
/***
|
||||||
* Round to power of two, for count>=3 and for count being not too large (as reasonable for tree hash calculations)
|
* Round to power of two, for count>=3 and for count being not too large (as reasonable for tree hash calculations)
|
||||||
*/
|
*/
|
||||||
|
@ -91,9 +82,8 @@ void tree_hash(const char (*hashes)[HASH_SIZE], size_t count, char *root_hash) {
|
||||||
|
|
||||||
size_t cnt = tree_hash_cnt( count );
|
size_t cnt = tree_hash_cnt( count );
|
||||||
|
|
||||||
char (*ints)[HASH_SIZE];
|
char ints[cnt][HASH_SIZE];
|
||||||
size_t ints_size = cnt * HASH_SIZE;
|
memset(ints, 0 , sizeof(ints)); // zero out as extra protection for using uninitialized mem
|
||||||
ints = alloca(ints_size); memset( ints , 0 , ints_size); // allocate, and zero out as extra protection for using uninitialized mem
|
|
||||||
|
|
||||||
memcpy(ints, hashes, (2 * cnt - count) * HASH_SIZE);
|
memcpy(ints, hashes, (2 * cnt - count) * HASH_SIZE);
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
|
|
||||||
void set_process_affinity(int core)
|
void set_process_affinity(int core)
|
||||||
{
|
{
|
||||||
#if defined (__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun)
|
#if defined (__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__NetBSD__) || defined(__sun)
|
||||||
return;
|
return;
|
||||||
#elif defined(BOOST_WINDOWS)
|
#elif defined(BOOST_WINDOWS)
|
||||||
DWORD_PTR mask = 1;
|
DWORD_PTR mask = 1;
|
||||||
|
@ -62,7 +62,7 @@ void set_process_affinity(int core)
|
||||||
|
|
||||||
void set_thread_high_priority()
|
void set_thread_high_priority()
|
||||||
{
|
{
|
||||||
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun)
|
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(_NetBSD_) || defined(__sun)
|
||||||
return;
|
return;
|
||||||
#elif defined(BOOST_WINDOWS)
|
#elif defined(BOOST_WINDOWS)
|
||||||
::SetPriorityClass(::GetCurrentProcess(), HIGH_PRIORITY_CLASS);
|
::SetPriorityClass(::GetCurrentProcess(), HIGH_PRIORITY_CLASS);
|
||||||
|
|
Loading…
Reference in a new issue