2015-01-02 16:52:46 +00:00
|
|
|
// Copyright (c) 2014-2015, The Monero Project
|
2014-07-23 13:03:52 +00:00
|
|
|
//
|
|
|
|
// All rights reserved.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without modification, are
|
|
|
|
// permitted provided that the following conditions are met:
|
|
|
|
//
|
|
|
|
// 1. Redistributions of source code must retain the above copyright notice, this list of
|
|
|
|
// conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
|
|
|
// of conditions and the following disclaimer in the documentation and/or other
|
|
|
|
// materials provided with the distribution.
|
|
|
|
//
|
|
|
|
// 3. Neither the name of the copyright holder nor the names of its contributors may be
|
|
|
|
// used to endorse or promote products derived from this software without specific
|
|
|
|
// prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
|
|
|
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
|
|
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
|
|
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
|
|
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
//
|
|
|
|
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
|
2014-03-03 22:07:58 +00:00
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include "common/int-util.h"
|
|
|
|
#include "hash-ops.h"
|
|
|
|
#include "oaes_lib.h"
|
|
|
|
|
2015-04-13 23:38:33 +00:00
|
|
|
#if defined(__x86_64__)
|
2015-01-27 07:43:15 +00:00
|
|
|
// Optimised code below, uses x86-specific intrinsics, SSE2, AES-NI
|
|
|
|
// Fall back to more portable code is down at the bottom
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
#include <emmintrin.h>
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
#if defined(_MSC_VER)
|
2014-05-21 16:29:34 +00:00
|
|
|
#include <intrin.h>
|
2014-08-06 17:06:03 +00:00
|
|
|
#include <windows.h>
|
2014-05-21 16:29:34 +00:00
|
|
|
#define STATIC
|
|
|
|
#define INLINE __inline
|
|
|
|
#if !defined(RDATA_ALIGN16)
|
|
|
|
#define RDATA_ALIGN16 __declspec(align(16))
|
|
|
|
#endif
|
2014-08-06 17:07:36 +00:00
|
|
|
#elif defined(__MINGW32__)
|
|
|
|
#include <intrin.h>
|
|
|
|
#include <windows.h>
|
|
|
|
#define STATIC static
|
|
|
|
#define INLINE inline
|
|
|
|
#if !defined(RDATA_ALIGN16)
|
|
|
|
#define RDATA_ALIGN16 __attribute__ ((aligned(16)))
|
|
|
|
#endif
|
2014-05-21 16:29:34 +00:00
|
|
|
#else
|
|
|
|
#include <wmmintrin.h>
|
2014-06-16 10:35:09 +00:00
|
|
|
#include <sys/mman.h>
|
2014-05-21 16:29:34 +00:00
|
|
|
#define STATIC static
|
|
|
|
#define INLINE inline
|
|
|
|
#if !defined(RDATA_ALIGN16)
|
|
|
|
#define RDATA_ALIGN16 __attribute__ ((aligned(16)))
|
|
|
|
#endif
|
|
|
|
#endif
|
2014-03-03 22:07:58 +00:00
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
#if defined(__INTEL_COMPILER)
|
|
|
|
#define ASM __asm__
|
|
|
|
#elif !defined(_MSC_VER)
|
|
|
|
#define ASM __asm__
|
|
|
|
#else
|
|
|
|
#define ASM __asm
|
|
|
|
#endif
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
#define MEMORY (1 << 21) // 2MB scratchpad
|
2014-03-03 22:07:58 +00:00
|
|
|
#define ITER (1 << 20)
|
|
|
|
#define AES_BLOCK_SIZE 16
|
2014-05-21 16:29:34 +00:00
|
|
|
#define AES_KEY_SIZE 32
|
2014-03-03 22:07:58 +00:00
|
|
|
#define INIT_SIZE_BLK 8
|
|
|
|
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
|
2014-06-16 10:35:09 +00:00
|
|
|
#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
|
2014-03-03 22:07:58 +00:00
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
#define U64(x) ((uint64_t *) (x))
|
|
|
|
#define R128(x) ((__m128i *) (x))
|
2014-06-16 10:35:09 +00:00
|
|
|
|
|
|
|
#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4)
|
|
|
|
#if defined(_MSC_VER)
|
2014-06-17 04:13:05 +00:00
|
|
|
#if !defined(_WIN64)
|
|
|
|
#define __mul() lo = mul128(c[0], b[0], &hi);
|
|
|
|
#else
|
2014-06-16 10:35:09 +00:00
|
|
|
#define __mul() lo = _umul128(c[0], b[0], &hi);
|
2014-06-17 04:13:05 +00:00
|
|
|
#endif
|
2014-06-16 10:35:09 +00:00
|
|
|
#else
|
2014-06-17 04:13:05 +00:00
|
|
|
#if defined(__x86_64__)
|
2014-06-16 10:35:09 +00:00
|
|
|
#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc");
|
2014-06-17 04:13:05 +00:00
|
|
|
#else
|
|
|
|
#define __mul() lo = mul128(c[0], b[0], &hi);
|
|
|
|
#endif
|
2014-06-16 10:35:09 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define pre_aes() \
|
|
|
|
j = state_index(a); \
|
|
|
|
_c = _mm_load_si128(R128(&hp_state[j])); \
|
|
|
|
_a = _mm_load_si128(R128(a)); \
|
2014-10-05 17:47:13 +00:00
|
|
|
|
|
|
|
/*
|
2014-10-06 21:42:58 +00:00
|
|
|
* An SSE-optimized implementation of the second half of CryptoNight step 3.
|
2014-10-05 17:47:13 +00:00
|
|
|
* After using AES to mix a scratchpad value into _c (done by the caller),
|
|
|
|
* this macro xors it with _b and stores the result back to the same index (j) that it
|
|
|
|
* loaded the scratchpad value from. It then performs a second random memory
|
|
|
|
* read/write from the scratchpad, but this time mixes the values using a 64
|
|
|
|
* bit multiply.
|
|
|
|
* This code is based upon an optimized implementation by dga.
|
|
|
|
*/
|
2014-06-16 10:35:09 +00:00
|
|
|
#define post_aes() \
|
|
|
|
_mm_store_si128(R128(c), _c); \
|
|
|
|
_b = _mm_xor_si128(_b, _c); \
|
|
|
|
_mm_store_si128(R128(&hp_state[j]), _b); \
|
|
|
|
j = state_index(c); \
|
|
|
|
p = U64(&hp_state[j]); \
|
|
|
|
b[0] = p[0]; b[1] = p[1]; \
|
|
|
|
__mul(); \
|
|
|
|
a[0] += hi; a[1] += lo; \
|
|
|
|
p = U64(&hp_state[j]); \
|
|
|
|
p[0] = a[0]; p[1] = a[1]; \
|
|
|
|
a[0] ^= b[0]; a[1] ^= b[1]; \
|
|
|
|
_b = _c; \
|
|
|
|
|
|
|
|
#if defined(_MSC_VER)
|
|
|
|
#define THREADV __declspec(thread)
|
|
|
|
#else
|
|
|
|
#define THREADV __thread
|
|
|
|
#endif
|
2014-05-21 16:29:34 +00:00
|
|
|
|
2014-05-17 23:25:00 +00:00
|
|
|
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
|
|
|
|
extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey);
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
#pragma pack(push, 1)
|
|
|
|
union cn_slow_hash_state
|
|
|
|
{
|
|
|
|
union hash_state hs;
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
uint8_t k[64];
|
|
|
|
uint8_t init[INIT_SIZE_BYTE];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
#pragma pack(pop)
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
THREADV uint8_t *hp_state = NULL;
|
|
|
|
THREADV int hp_allocated = 0;
|
|
|
|
|
|
|
|
#if defined(_MSC_VER)
|
2014-05-21 16:29:34 +00:00
|
|
|
#define cpuid(info,x) __cpuidex(info,x,0)
|
|
|
|
#else
|
|
|
|
void cpuid(int CPUInfo[4], int InfoType)
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
ASM __volatile__
|
2014-05-21 16:29:34 +00:00
|
|
|
(
|
2014-06-16 10:35:09 +00:00
|
|
|
"cpuid":
|
2014-05-21 16:29:34 +00:00
|
|
|
"=a" (CPUInfo[0]),
|
|
|
|
"=b" (CPUInfo[1]),
|
|
|
|
"=c" (CPUInfo[2]),
|
|
|
|
"=d" (CPUInfo[3]) :
|
2014-06-16 10:35:09 +00:00
|
|
|
"a" (InfoType), "c" (0)
|
|
|
|
);
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-03-03 22:07:58 +00:00
|
|
|
|
2014-10-05 17:47:13 +00:00
|
|
|
/**
|
|
|
|
* @brief a = (a xor b), where a and b point to 128 bit values
|
|
|
|
*/
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b)
|
|
|
|
{
|
|
|
|
U64(a)[0] ^= U64(b)[0];
|
|
|
|
U64(a)[1] ^= U64(b)[1];
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
|
|
|
|
2014-10-05 16:33:39 +00:00
|
|
|
/**
|
|
|
|
* @brief uses cpuid to determine if the CPU supports the AES instructions
|
|
|
|
* @return true if the CPU supports AES, false otherwise
|
|
|
|
*/
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
STATIC INLINE int check_aes_hw(void)
|
|
|
|
{
|
|
|
|
int cpuid_results[4];
|
|
|
|
static int supported = -1;
|
|
|
|
|
|
|
|
if(supported >= 0)
|
|
|
|
return supported;
|
|
|
|
|
|
|
|
cpuid(cpuid_results,1);
|
|
|
|
return supported = cpuid_results[2] & (1 << 25);
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2)
|
|
|
|
{
|
|
|
|
__m128i t4;
|
|
|
|
*t2 = _mm_shuffle_epi32(*t2, 0xff);
|
|
|
|
t4 = _mm_slli_si128(*t1, 0x04);
|
|
|
|
*t1 = _mm_xor_si128(*t1, t4);
|
|
|
|
t4 = _mm_slli_si128(t4, 0x04);
|
|
|
|
*t1 = _mm_xor_si128(*t1, t4);
|
|
|
|
t4 = _mm_slli_si128(t4, 0x04);
|
|
|
|
*t1 = _mm_xor_si128(*t1, t4);
|
|
|
|
*t1 = _mm_xor_si128(*t1, *t2);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3)
|
|
|
|
{
|
|
|
|
__m128i t2, t4;
|
|
|
|
t4 = _mm_aeskeygenassist_si128(*t1, 0x00);
|
|
|
|
t2 = _mm_shuffle_epi32(t4, 0xaa);
|
|
|
|
t4 = _mm_slli_si128(*t3, 0x04);
|
|
|
|
*t3 = _mm_xor_si128(*t3, t4);
|
|
|
|
t4 = _mm_slli_si128(t4, 0x04);
|
|
|
|
*t3 = _mm_xor_si128(*t3, t4);
|
|
|
|
t4 = _mm_slli_si128(t4, 0x04);
|
|
|
|
*t3 = _mm_xor_si128(*t3, t4);
|
|
|
|
*t3 = _mm_xor_si128(*t3, t2);
|
|
|
|
}
|
|
|
|
|
2014-10-05 16:33:39 +00:00
|
|
|
/**
|
|
|
|
* @brief expands 'key' into a form it can be used for AES encryption.
|
|
|
|
*
|
|
|
|
* This is an SSE-optimized implementation of AES key schedule generation. It
|
|
|
|
* expands the key into multiple round keys, each of which is used in one round
|
|
|
|
* of the AES encryption used to fill (and later, extract randomness from)
|
|
|
|
* the large 2MB buffer. Note that CryptoNight does not use a completely
|
|
|
|
* standard AES encryption for its buffer expansion, so do not copy this
|
2014-10-05 17:47:13 +00:00
|
|
|
* function outside of Monero without caution! This version uses the hardware
|
|
|
|
* AESKEYGENASSIST instruction to speed key generation, and thus requires
|
|
|
|
* CPU AES support.
|
|
|
|
* For more information about these functions, see page 19 of Intel's AES instructions
|
|
|
|
* white paper:
|
|
|
|
* http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/aes-instructions-set-white-paper.pdf
|
2014-10-05 16:33:39 +00:00
|
|
|
*
|
|
|
|
* @param key the input 128 bit key
|
|
|
|
* @param expandedKey An output buffer to hold the generated key schedule
|
|
|
|
*/
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey)
|
|
|
|
{
|
|
|
|
__m128i *ek = R128(expandedKey);
|
|
|
|
__m128i t1, t2, t3;
|
|
|
|
|
|
|
|
t1 = _mm_loadu_si128(R128(key));
|
|
|
|
t3 = _mm_loadu_si128(R128(key + 16));
|
|
|
|
|
|
|
|
ek[0] = t1;
|
|
|
|
ek[1] = t3;
|
|
|
|
|
|
|
|
t2 = _mm_aeskeygenassist_si128(t3, 0x01);
|
|
|
|
aes_256_assist1(&t1, &t2);
|
|
|
|
ek[2] = t1;
|
|
|
|
aes_256_assist2(&t1, &t3);
|
|
|
|
ek[3] = t3;
|
|
|
|
|
|
|
|
t2 = _mm_aeskeygenassist_si128(t3, 0x02);
|
|
|
|
aes_256_assist1(&t1, &t2);
|
|
|
|
ek[4] = t1;
|
|
|
|
aes_256_assist2(&t1, &t3);
|
|
|
|
ek[5] = t3;
|
|
|
|
|
|
|
|
t2 = _mm_aeskeygenassist_si128(t3, 0x04);
|
|
|
|
aes_256_assist1(&t1, &t2);
|
|
|
|
ek[6] = t1;
|
|
|
|
aes_256_assist2(&t1, &t3);
|
|
|
|
ek[7] = t3;
|
|
|
|
|
|
|
|
t2 = _mm_aeskeygenassist_si128(t3, 0x08);
|
|
|
|
aes_256_assist1(&t1, &t2);
|
|
|
|
ek[8] = t1;
|
|
|
|
aes_256_assist2(&t1, &t3);
|
|
|
|
ek[9] = t3;
|
|
|
|
|
|
|
|
t2 = _mm_aeskeygenassist_si128(t3, 0x10);
|
|
|
|
aes_256_assist1(&t1, &t2);
|
|
|
|
ek[10] = t1;
|
|
|
|
}
|
|
|
|
|
2014-10-06 14:15:21 +00:00
|
|
|
/**
|
2014-10-06 00:32:49 +00:00
|
|
|
* @brief a "pseudo" round of AES (similar to but slightly different from normal AES encryption)
|
2014-10-05 16:33:39 +00:00
|
|
|
*
|
|
|
|
* To fill its 2MB scratch buffer, CryptoNight uses a nonstandard implementation
|
|
|
|
* of AES encryption: It applies 10 rounds of the basic AES encryption operation
|
|
|
|
* to an input 128 bit chunk of data <in>. Unlike normal AES, however, this is
|
|
|
|
* all it does; it does not perform the initial AddRoundKey step (this is done
|
|
|
|
* in subsequent steps by aesenc_si128), and it does not use the simpler final round.
|
|
|
|
* Hence, this is a "pseudo" round - though the function actually implements 10 rounds together.
|
|
|
|
*
|
2014-10-05 17:47:13 +00:00
|
|
|
* Note that unlike aesb_pseudo_round, this function works on multiple data chunks.
|
|
|
|
*
|
2014-10-05 16:33:39 +00:00
|
|
|
* @param in a pointer to nblocks * 128 bits of data to be encrypted
|
|
|
|
* @param out a pointer to an nblocks * 128 bit buffer where the output will be stored
|
|
|
|
* @param expandedKey the expanded AES key
|
|
|
|
* @param nblocks the number of 128 blocks of data to be encrypted
|
|
|
|
*/
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out,
|
|
|
|
const uint8_t *expandedKey, int nblocks)
|
|
|
|
{
|
|
|
|
__m128i *k = R128(expandedKey);
|
|
|
|
__m128i d;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for(i = 0; i < nblocks; i++)
|
|
|
|
{
|
|
|
|
d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[0]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[1]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[2]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[3]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[4]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[5]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[6]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[7]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[8]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[9]));
|
|
|
|
_mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-06 14:15:21 +00:00
|
|
|
/**
|
2014-10-05 17:47:13 +00:00
|
|
|
* @brief aes_pseudo_round that loads data from *in and xors it with *xor first
|
|
|
|
*
|
|
|
|
* This function performs the same operations as aes_pseudo_round, but before
|
|
|
|
* performing the encryption of each 128 bit block from <in>, it xors
|
|
|
|
* it with the corresponding block from <xor>.
|
|
|
|
*
|
|
|
|
* @param in a pointer to nblocks * 128 bits of data to be encrypted
|
|
|
|
* @param out a pointer to an nblocks * 128 bit buffer where the output will be stored
|
|
|
|
* @param expandedKey the expanded AES key
|
|
|
|
* @param xor a pointer to an nblocks * 128 bit buffer that is xored into in before encryption (in is left unmodified)
|
|
|
|
* @param nblocks the number of 128 blocks of data to be encrypted
|
|
|
|
*/
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out,
|
|
|
|
const uint8_t *expandedKey, const uint8_t *xor, int nblocks)
|
2014-05-21 16:29:34 +00:00
|
|
|
{
|
|
|
|
__m128i *k = R128(expandedKey);
|
2014-06-16 10:35:09 +00:00
|
|
|
__m128i *x = R128(xor);
|
2014-05-21 16:29:34 +00:00
|
|
|
__m128i d;
|
2014-06-16 10:35:09 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for(i = 0; i < nblocks; i++)
|
|
|
|
{
|
|
|
|
d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
|
|
|
|
d = _mm_xor_si128(d, *R128(x++));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[0]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[1]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[2]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[3]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[4]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[5]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[6]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[7]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[8]));
|
|
|
|
d = _mm_aesenc_si128(d, *R128(&k[9]));
|
|
|
|
_mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-06 17:08:15 +00:00
|
|
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
2014-06-16 10:35:09 +00:00
|
|
|
BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable)
|
|
|
|
{
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
DWORD count;
|
|
|
|
LUID_AND_ATTRIBUTES privilege[1];
|
|
|
|
} info;
|
|
|
|
|
|
|
|
HANDLE token;
|
|
|
|
if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token))
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
info.count = 1;
|
|
|
|
info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0;
|
2014-05-21 16:29:34 +00:00
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid)))
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL))
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
if (GetLastError() != ERROR_SUCCESS)
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
CloseHandle(token);
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-10-05 17:47:13 +00:00
|
|
|
/**
|
|
|
|
* @brief allocate the 2MB scratch buffer using OS support for huge pages, if available
|
|
|
|
*
|
|
|
|
* This function tries to allocate the 2MB scratch buffer using a single
|
|
|
|
* 2MB "huge page" (instead of the usual 4KB page sizes) to reduce TLB misses
|
|
|
|
* during the random accesses to the scratch buffer. This is one of the
|
|
|
|
* important speed optimizations needed to make CryptoNight faster.
|
|
|
|
*
|
|
|
|
* No parameters. Updates a thread-local pointer, hp_state, to point to
|
|
|
|
* the allocated buffer.
|
|
|
|
*/
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
void slow_hash_allocate_state(void)
|
|
|
|
{
|
|
|
|
if(hp_state != NULL)
|
|
|
|
return;
|
|
|
|
|
2014-08-06 17:08:15 +00:00
|
|
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
2014-06-16 10:35:09 +00:00
|
|
|
SetLockPagesPrivilege(GetCurrentProcess(), TRUE);
|
|
|
|
hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES |
|
|
|
|
MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
|
2014-06-20 06:49:25 +00:00
|
|
|
#else
|
2014-09-10 12:01:39 +00:00
|
|
|
#if defined(__APPLE__) || defined(__FreeBSD__)
|
2014-06-20 06:49:25 +00:00
|
|
|
hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANON, 0, 0);
|
2014-06-16 10:35:09 +00:00
|
|
|
#else
|
|
|
|
hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0);
|
2014-06-20 06:49:25 +00:00
|
|
|
#endif
|
2014-06-16 10:35:09 +00:00
|
|
|
if(hp_state == MAP_FAILED)
|
|
|
|
hp_state = NULL;
|
|
|
|
#endif
|
|
|
|
hp_allocated = 1;
|
|
|
|
if(hp_state == NULL)
|
|
|
|
{
|
|
|
|
hp_allocated = 0;
|
|
|
|
hp_state = (uint8_t *) malloc(MEMORY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-05 17:47:13 +00:00
|
|
|
/**
|
|
|
|
*@brief frees the state allocated by slow_hash_allocate_state
|
|
|
|
*/
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
void slow_hash_free_state(void)
|
|
|
|
{
|
|
|
|
if(hp_state == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if(!hp_allocated)
|
|
|
|
free(hp_state);
|
|
|
|
else
|
|
|
|
{
|
2014-08-06 17:08:15 +00:00
|
|
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
2014-06-16 10:35:09 +00:00
|
|
|
VirtualFree(hp_state, MEMORY, MEM_RELEASE);
|
|
|
|
#else
|
|
|
|
munmap(hp_state, MEMORY);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
hp_state = NULL;
|
|
|
|
hp_allocated = 0;
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
|
|
|
|
2014-10-05 16:33:39 +00:00
|
|
|
/**
|
|
|
|
* @brief the hash function implementing CryptoNight, used for the Monero proof-of-work
|
|
|
|
*
|
|
|
|
* Computes the hash of <data> (which consists of <length> bytes), returning the
|
|
|
|
* hash in <hash>. The CryptoNight hash operates by first using Keccak 1600,
|
|
|
|
* the 1600 bit variant of the Keccak hash used in SHA-3, to create a 200 byte
|
|
|
|
* buffer of pseudorandom data by hashing the supplied data. It then uses this
|
|
|
|
* random data to fill a large 2MB buffer with pseudorandom data by iteratively
|
|
|
|
* encrypting it using 10 rounds of AES per entry. After this initialization,
|
|
|
|
* it executes 500,000 rounds of mixing through the random 2MB buffer using
|
|
|
|
* AES (typically provided in hardware on modern CPUs) and a 64 bit multiply.
|
|
|
|
* Finally, it re-mixes this large buffer back into
|
|
|
|
* the 200 byte "text" buffer, and then hashes this buffer using one of four
|
|
|
|
* pseudorandomly selected hash functions (Blake, Groestl, JH, or Skein)
|
|
|
|
* to populate the output.
|
|
|
|
*
|
|
|
|
* The 2MB buffer and choice of functions for mixing are designed to make the
|
|
|
|
* algorithm "CPU-friendly" (and thus, reduce the advantage of GPU, FPGA,
|
|
|
|
* or ASIC-based implementations): the functions used are fast on modern
|
|
|
|
* CPUs, and the 2MB size matches the typical amount of L3 cache available per
|
|
|
|
* core on 2013-era CPUs. When available, this implementation will use hardware
|
|
|
|
* AES support on x86 CPUs.
|
|
|
|
*
|
2014-10-05 17:47:13 +00:00
|
|
|
* A diagram of the inner loop of this function can be found at
|
|
|
|
* http://www.cs.cmu.edu/~dga/crypto/xmr/cryptonight.png
|
|
|
|
*
|
2014-10-05 16:33:39 +00:00
|
|
|
* @param data the data to hash
|
|
|
|
* @param length the length in bytes of the data
|
2014-10-05 17:47:13 +00:00
|
|
|
* @param hash a pointer to a buffer in which the final 256 bit hash will be stored
|
2014-10-05 16:33:39 +00:00
|
|
|
*/
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
void cn_slow_hash(const void *data, size_t length, char *hash)
|
|
|
|
{
|
2014-10-05 16:33:39 +00:00
|
|
|
RDATA_ALIGN16 uint8_t expandedKey[240]; /* These buffers are aligned to use later with SSE functions */
|
2014-05-21 16:29:34 +00:00
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
uint8_t text[INIT_SIZE_BYTE];
|
|
|
|
RDATA_ALIGN16 uint64_t a[2];
|
|
|
|
RDATA_ALIGN16 uint64_t b[2];
|
|
|
|
RDATA_ALIGN16 uint64_t c[2];
|
2014-05-21 16:29:34 +00:00
|
|
|
union cn_slow_hash_state state;
|
2014-06-16 10:35:09 +00:00
|
|
|
__m128i _a, _b, _c;
|
|
|
|
uint64_t hi, lo;
|
2014-03-03 22:07:58 +00:00
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
size_t i, j;
|
2014-06-16 10:35:09 +00:00
|
|
|
uint64_t *p = NULL;
|
2014-05-21 16:29:34 +00:00
|
|
|
oaes_ctx *aes_ctx;
|
|
|
|
int useAes = check_aes_hw();
|
2014-06-16 10:35:09 +00:00
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
static void (*const extra_hashes[4])(const void *, size_t, char *) =
|
|
|
|
{
|
|
|
|
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
|
|
|
|
};
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
// this isn't supposed to happen, but guard against it for now.
|
|
|
|
if(hp_state == NULL)
|
|
|
|
slow_hash_allocate_state();
|
|
|
|
|
2014-10-05 16:33:39 +00:00
|
|
|
/* CryptoNight Step 1: Use Keccak1600 to initialize the 'state' (and 'text') buffers from the data. */
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
hash_process(&state.hs, data, length);
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
|
2014-10-06 21:44:23 +00:00
|
|
|
/* CryptoNight Step 2: Iteratively encrypt the results from Keccak to fill
|
2014-10-05 16:33:39 +00:00
|
|
|
* the 2MB large random access buffer.
|
|
|
|
*/
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
if(useAes)
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
aes_expand_key(state.hs.b, expandedKey);
|
2014-05-21 16:29:34 +00:00
|
|
|
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK);
|
|
|
|
memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
aes_ctx = (oaes_ctx *) oaes_alloc();
|
|
|
|
oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);
|
2014-05-21 16:29:34 +00:00
|
|
|
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
|
|
|
|
{
|
|
|
|
for(j = 0; j < INIT_SIZE_BLK; j++)
|
2014-06-16 10:35:09 +00:00
|
|
|
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
|
2014-05-21 16:29:34 +00:00
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0];
|
|
|
|
U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1];
|
|
|
|
U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0];
|
|
|
|
U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1];
|
|
|
|
|
2014-10-05 16:33:39 +00:00
|
|
|
/* CryptoNight Step 3: Bounce randomly 1 million times through the mixing buffer,
|
|
|
|
* using 500,000 iterations of the following mixing function. Each execution
|
|
|
|
* performs two reads and writes from the mixing buffer.
|
|
|
|
*/
|
|
|
|
|
2014-06-16 10:35:09 +00:00
|
|
|
_b = _mm_load_si128(R128(b));
|
2014-10-05 18:14:59 +00:00
|
|
|
// Two independent versions, one with AES, one without, to ensure that
|
2014-10-05 17:47:13 +00:00
|
|
|
// the useAes test is only performed once, not every iteration.
|
2014-06-16 10:35:09 +00:00
|
|
|
if(useAes)
|
2014-05-21 16:29:34 +00:00
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
for(i = 0; i < ITER / 2; i++)
|
|
|
|
{
|
|
|
|
pre_aes();
|
|
|
|
_c = _mm_aesenc_si128(_c, _a);
|
|
|
|
post_aes();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for(i = 0; i < ITER / 2; i++)
|
|
|
|
{
|
|
|
|
pre_aes();
|
|
|
|
aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a);
|
|
|
|
post_aes();
|
|
|
|
}
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
|
|
|
|
2014-10-05 16:33:39 +00:00
|
|
|
/* CryptoNight Step 4: Sequentially pass through the mixing buffer and use 10 rounds
|
|
|
|
* of AES encryption to mix the random data back into the 'text' buffer. 'text'
|
|
|
|
* was originally created with the output of Keccak1600. */
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
if(useAes)
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
aes_expand_key(&state.hs.b[32], expandedKey);
|
2014-05-21 16:29:34 +00:00
|
|
|
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
// add the xor to the pseudo round
|
|
|
|
aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK);
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
2014-05-21 16:29:34 +00:00
|
|
|
else
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
|
2014-05-21 16:29:34 +00:00
|
|
|
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
|
|
|
|
{
|
|
|
|
for(j = 0; j < INIT_SIZE_BLK; j++)
|
|
|
|
{
|
2014-06-16 10:35:09 +00:00
|
|
|
xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
|
|
|
|
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
|
2014-05-21 16:29:34 +00:00
|
|
|
}
|
|
|
|
}
|
2014-06-16 10:35:09 +00:00
|
|
|
oaes_free((OAES_CTX **) &aes_ctx);
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
2014-05-21 16:29:34 +00:00
|
|
|
|
2014-10-05 17:47:13 +00:00
|
|
|
/* CryptoNight Step 5: Apply Keccak to the state again, and then
|
|
|
|
* use the resulting data to select which of four finalizer
|
|
|
|
* hash functions to apply to the data (Blake, Groestl, JH, or Skein).
|
|
|
|
* Use this hash to squeeze the state array down
|
|
|
|
* to the final 256 bit hash output.
|
2014-10-05 16:33:39 +00:00
|
|
|
*/
|
|
|
|
|
2014-05-21 16:29:34 +00:00
|
|
|
memcpy(state.init, text, INIT_SIZE_BYTE);
|
|
|
|
hash_permutation(&state.hs);
|
|
|
|
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
2015-01-27 07:43:15 +00:00
|
|
|
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
#elif defined(__arm__)
|
|
|
|
// ND: Some minor optimizations for ARM7 (raspberrry pi 2), effect seems to be ~40-50% faster.
|
|
|
|
// Needs more work.
|
|
|
|
void slow_hash_allocate_state(void)
|
|
|
|
{
|
|
|
|
// Do nothing, this is just to maintain compatibility with the upgraded slow-hash.c
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void slow_hash_free_state(void)
|
|
|
|
{
|
|
|
|
// As above
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void (*const extra_hashes[4])(const void *, size_t, char *) = {
|
|
|
|
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MEMORY (1 << 21) /* 2 MiB */
|
|
|
|
#define ITER (1 << 20)
|
|
|
|
#define AES_BLOCK_SIZE 16
|
|
|
|
#define AES_KEY_SIZE 32 /*16*/
|
|
|
|
#define INIT_SIZE_BLK 8
|
|
|
|
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
|
|
|
|
|
|
|
|
#if defined(__GNUC__)
|
|
|
|
#define RDATA_ALIGN16 __attribute__ ((aligned(16)))
|
|
|
|
#define STATIC static
|
|
|
|
#define INLINE inline
|
|
|
|
#else
|
|
|
|
#define RDATA_ALIGN16
|
|
|
|
#define STATIC static
|
|
|
|
#define INLINE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define U64(x) ((uint64_t *) (x))
|
|
|
|
|
|
|
|
#include "aesb.c"
|
|
|
|
|
|
|
|
STATIC INLINE void ___mul128(uint32_t *a, uint32_t *b, uint32_t *h, uint32_t *l)
|
|
|
|
{
|
|
|
|
// ND: 64x64 multiplication for ARM7
|
|
|
|
__asm__ __volatile__
|
|
|
|
(
|
|
|
|
// lo hi
|
|
|
|
"umull %[r0], %[r1], %[b], %[d]\n\t" // bd [r0 = bd.lo]
|
|
|
|
"umull %[r2], %[r3], %[b], %[c]\n\t" // bc
|
|
|
|
"umull %[b], %[c], %[a], %[c]\n\t" // ac
|
|
|
|
"adds %[r1], %[r1], %[r2]\n\t" // r1 = bd.hi + bc.lo
|
|
|
|
"adcs %[r2], %[r3], %[b]\n\t" // r2 = ac.lo + bc.hi + carry
|
|
|
|
"adc %[r3], %[c], #0\n\t" // r3 = ac.hi + carry
|
|
|
|
"umull %[b], %[a], %[a], %[d]\n\t" // ad
|
|
|
|
"adds %[r1], %[r1], %[b]\n\t" // r1 = bd.hi + bc.lo + ad.lo
|
|
|
|
"adcs %[r2], %[r2], %[a]\n\t" // r2 = ac.lo + bc.hi + ad.hi + carry
|
|
|
|
"adc %[r3], %[r3], #0\n\t" // r3 = ac.hi + carry
|
|
|
|
: [r0]"=&r"(l[0]), [r1]"=&r"(l[1]), [r2]"=&r"(h[0]), [r3]"=&r"(h[1])
|
|
|
|
: [a]"r"(a[1]), [b]"r"(a[0]), [c]"r"(b[1]), [d]"r"(b[0])
|
|
|
|
: "cc"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC INLINE void mul(const uint8_t* a, const uint8_t* b, uint8_t* res)
|
|
|
|
{
|
|
|
|
___mul128((uint32_t *) a, (uint32_t *) b, (uint32_t *) (res + 0), (uint32_t *) (res + 8));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC INLINE void sum_half_blocks(uint8_t* a, const uint8_t* b)
|
|
|
|
{
|
|
|
|
uint64_t a0, a1, b0, b1;
|
|
|
|
a0 = U64(a)[0];
|
|
|
|
a1 = U64(a)[1];
|
|
|
|
b0 = U64(b)[0];
|
|
|
|
b1 = U64(b)[1];
|
|
|
|
a0 += b0;
|
|
|
|
a1 += b1;
|
|
|
|
U64(a)[0] = a0;
|
|
|
|
U64(a)[1] = a1;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC INLINE void swap_blocks(uint8_t *a, uint8_t *b)
|
|
|
|
{
|
|
|
|
uint64_t t[2];
|
|
|
|
U64(t)[0] = U64(a)[0];
|
|
|
|
U64(t)[1] = U64(a)[1];
|
|
|
|
U64(a)[0] = U64(b)[0];
|
|
|
|
U64(a)[1] = U64(b)[1];
|
|
|
|
U64(b)[0] = U64(t)[0];
|
|
|
|
U64(b)[1] = U64(t)[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC INLINE void xor_blocks(uint8_t* a, const uint8_t* b)
|
|
|
|
{
|
|
|
|
U64(a)[0] ^= U64(b)[0];
|
|
|
|
U64(a)[1] ^= U64(b)[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
#pragma pack(push, 1)
|
|
|
|
union cn_slow_hash_state
|
|
|
|
{
|
|
|
|
union hash_state hs;
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
uint8_t k[64];
|
|
|
|
uint8_t init[INIT_SIZE_BYTE];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
#pragma pack(pop)
|
|
|
|
|
|
|
|
void cn_slow_hash(const void *data, size_t length, char *hash)
|
|
|
|
{
|
|
|
|
uint8_t long_state[MEMORY];
|
|
|
|
uint8_t text[INIT_SIZE_BYTE];
|
|
|
|
uint8_t a[AES_BLOCK_SIZE];
|
|
|
|
uint8_t b[AES_BLOCK_SIZE];
|
|
|
|
uint8_t d[AES_BLOCK_SIZE];
|
|
|
|
uint8_t aes_key[AES_KEY_SIZE];
|
|
|
|
RDATA_ALIGN16 uint8_t expandedKey[256];
|
|
|
|
|
|
|
|
union cn_slow_hash_state state;
|
|
|
|
|
|
|
|
size_t i, j;
|
|
|
|
uint8_t *p = NULL;
|
|
|
|
oaes_ctx *aes_ctx;
|
|
|
|
static void (*const extra_hashes[4])(const void *, size_t, char *) =
|
|
|
|
{
|
|
|
|
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
|
|
|
|
};
|
|
|
|
|
|
|
|
hash_process(&state.hs, data, length);
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
|
|
|
|
aes_ctx = (oaes_ctx *) oaes_alloc();
|
|
|
|
oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);
|
|
|
|
|
|
|
|
// use aligned data
|
|
|
|
memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len);
|
|
|
|
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
|
|
|
|
{
|
|
|
|
for(j = 0; j < INIT_SIZE_BLK; j++)
|
|
|
|
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
|
|
|
|
memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
|
|
|
|
}
|
|
|
|
|
|
|
|
U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0];
|
|
|
|
U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1];
|
|
|
|
U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0];
|
|
|
|
U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1];
|
|
|
|
|
|
|
|
for(i = 0; i < ITER / 2; i++)
|
|
|
|
{
|
|
|
|
#define MASK ((uint32_t)(((MEMORY / AES_BLOCK_SIZE) - 1) << 4))
|
|
|
|
#define state_index(x) ((*(uint32_t *) x) & MASK)
|
|
|
|
|
|
|
|
// Iteration 1
|
|
|
|
p = &long_state[state_index(a)];
|
|
|
|
aesb_single_round(p, p, a);
|
|
|
|
|
|
|
|
xor_blocks(b, p);
|
|
|
|
swap_blocks(b, p);
|
|
|
|
swap_blocks(a, b);
|
|
|
|
|
|
|
|
// Iteration 2
|
|
|
|
p = &long_state[state_index(a)];
|
|
|
|
|
|
|
|
mul(a, p, d);
|
|
|
|
sum_half_blocks(b, d);
|
|
|
|
swap_blocks(b, p);
|
|
|
|
xor_blocks(b, p);
|
|
|
|
swap_blocks(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
|
|
|
|
memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len);
|
|
|
|
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
|
|
|
|
{
|
|
|
|
for(j = 0; j < INIT_SIZE_BLK; j++)
|
|
|
|
{
|
|
|
|
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
|
|
|
|
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
oaes_free((OAES_CTX **) &aes_ctx);
|
|
|
|
memcpy(state.init, text, INIT_SIZE_BYTE);
|
|
|
|
hash_permutation(&state.hs);
|
|
|
|
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
|
|
|
|
}
|
|
|
|
|
2015-01-27 07:43:15 +00:00
|
|
|
#else
|
|
|
|
// Portable implementation as a fallback
|
|
|
|
|
2015-01-28 09:32:52 +00:00
|
|
|
void slow_hash_allocate_state(void)
|
|
|
|
{
|
|
|
|
// Do nothing, this is just to maintain compatibility with the upgraded slow-hash.c
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void slow_hash_free_state(void)
|
|
|
|
{
|
|
|
|
// As above
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-01-27 07:43:15 +00:00
|
|
|
static void (*const extra_hashes[4])(const void *, size_t, char *) = {
|
|
|
|
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MEMORY (1 << 21) /* 2 MiB */
|
|
|
|
#define ITER (1 << 20)
|
|
|
|
#define AES_BLOCK_SIZE 16
|
|
|
|
#define AES_KEY_SIZE 32 /*16*/
|
|
|
|
#define INIT_SIZE_BLK 8
|
|
|
|
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
|
|
|
|
|
|
|
|
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
|
|
|
|
extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey);
|
|
|
|
|
|
|
|
static size_t e2i(const uint8_t* a, size_t count) { return (*((uint64_t*)a) / AES_BLOCK_SIZE) & (count - 1); }
|
|
|
|
|
|
|
|
static void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) {
|
|
|
|
uint64_t a0, b0;
|
|
|
|
uint64_t hi, lo;
|
|
|
|
|
|
|
|
a0 = SWAP64LE(((uint64_t*)a)[0]);
|
|
|
|
b0 = SWAP64LE(((uint64_t*)b)[0]);
|
|
|
|
lo = mul128(a0, b0, &hi);
|
|
|
|
((uint64_t*)res)[0] = SWAP64LE(hi);
|
|
|
|
((uint64_t*)res)[1] = SWAP64LE(lo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sum_half_blocks(uint8_t* a, const uint8_t* b) {
|
|
|
|
uint64_t a0, a1, b0, b1;
|
|
|
|
|
|
|
|
a0 = SWAP64LE(((uint64_t*)a)[0]);
|
|
|
|
a1 = SWAP64LE(((uint64_t*)a)[1]);
|
|
|
|
b0 = SWAP64LE(((uint64_t*)b)[0]);
|
|
|
|
b1 = SWAP64LE(((uint64_t*)b)[1]);
|
|
|
|
a0 += b0;
|
|
|
|
a1 += b1;
|
|
|
|
((uint64_t*)a)[0] = SWAP64LE(a0);
|
|
|
|
((uint64_t*)a)[1] = SWAP64LE(a1);
|
|
|
|
}
|
|
|
|
#define U64(x) ((uint64_t *) (x))
|
|
|
|
|
|
|
|
static void copy_block(uint8_t* dst, const uint8_t* src) {
|
|
|
|
memcpy(dst, src, AES_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void swap_blocks(uint8_t *a, uint8_t *b){
|
|
|
|
uint64_t t[2];
|
|
|
|
U64(t)[0] = U64(a)[0];
|
|
|
|
U64(t)[1] = U64(a)[1];
|
|
|
|
U64(a)[0] = U64(b)[0];
|
|
|
|
U64(a)[1] = U64(b)[1];
|
|
|
|
U64(b)[0] = U64(t)[0];
|
|
|
|
U64(b)[1] = U64(t)[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xor_blocks(uint8_t* a, const uint8_t* b) {
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < AES_BLOCK_SIZE; i++) {
|
|
|
|
a[i] ^= b[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#pragma pack(push, 1)
|
|
|
|
union cn_slow_hash_state {
|
|
|
|
union hash_state hs;
|
|
|
|
struct {
|
|
|
|
uint8_t k[64];
|
|
|
|
uint8_t init[INIT_SIZE_BYTE];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
#pragma pack(pop)
|
|
|
|
|
|
|
|
void cn_slow_hash(const void *data, size_t length, char *hash) {
|
|
|
|
uint8_t long_state[MEMORY];
|
|
|
|
union cn_slow_hash_state state;
|
|
|
|
uint8_t text[INIT_SIZE_BYTE];
|
|
|
|
uint8_t a[AES_BLOCK_SIZE];
|
|
|
|
uint8_t b[AES_BLOCK_SIZE];
|
|
|
|
uint8_t c[AES_BLOCK_SIZE];
|
|
|
|
uint8_t d[AES_BLOCK_SIZE];
|
|
|
|
size_t i, j;
|
|
|
|
uint8_t aes_key[AES_KEY_SIZE];
|
|
|
|
oaes_ctx *aes_ctx;
|
|
|
|
|
|
|
|
hash_process(&state.hs, data, length);
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
|
|
|
|
aes_ctx = (oaes_ctx *) oaes_alloc();
|
|
|
|
|
|
|
|
oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE);
|
|
|
|
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
|
|
|
|
for (j = 0; j < INIT_SIZE_BLK; j++) {
|
|
|
|
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
|
|
|
|
}
|
|
|
|
memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
a[i] = state.k[ i] ^ state.k[32 + i];
|
|
|
|
b[i] = state.k[16 + i] ^ state.k[48 + i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ITER / 2; i++) {
|
|
|
|
/* Dependency chain: address -> read value ------+
|
|
|
|
* written value <-+ hard function (AES or MUL) <+
|
|
|
|
* next address <-+
|
|
|
|
*/
|
|
|
|
/* Iteration 1 */
|
|
|
|
j = e2i(a, MEMORY / AES_BLOCK_SIZE);
|
|
|
|
copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
|
|
|
|
aesb_single_round(c, c, a);
|
|
|
|
xor_blocks(b, c);
|
|
|
|
swap_blocks(b, c);
|
|
|
|
copy_block(&long_state[j * AES_BLOCK_SIZE], c);
|
|
|
|
assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
|
|
|
|
swap_blocks(a, b);
|
|
|
|
/* Iteration 2 */
|
|
|
|
j = e2i(a, MEMORY / AES_BLOCK_SIZE);
|
|
|
|
copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
|
|
|
|
mul(a, c, d);
|
|
|
|
sum_half_blocks(b, d);
|
|
|
|
swap_blocks(b, c);
|
|
|
|
xor_blocks(b, c);
|
|
|
|
copy_block(&long_state[j * AES_BLOCK_SIZE], c);
|
|
|
|
assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
|
|
|
|
swap_blocks(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
|
|
|
|
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
|
|
|
|
for (j = 0; j < INIT_SIZE_BLK; j++) {
|
|
|
|
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
|
|
|
|
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memcpy(state.init, text, INIT_SIZE_BYTE);
|
|
|
|
hash_permutation(&state.hs);
|
|
|
|
/*memcpy(hash, &state, 32);*/
|
|
|
|
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
|
|
|
|
oaes_free((OAES_CTX **) &aes_ctx);
|
|
|
|
}
|
|
|
|
|
2015-04-05 15:29:54 +00:00
|
|
|
#endif
|