Restored all cn/2 based algorithms, except cn/r.

This commit is contained in:
XMRig 2019-09-02 19:42:00 +07:00
parent 235cda1051
commit 9b6ab55936
5 changed files with 1228 additions and 1676 deletions

View file

@ -57,7 +57,7 @@ xmrig::OclThread::OclThread(const rapidjson::Value &value)
m_intensity = Json::getUint(value, kIntensity); m_intensity = Json::getUint(value, kIntensity);
m_worksize = Json::getUint(value, kWorksize); m_worksize = Json::getUint(value, kWorksize);
m_affinity = Json::getInt64(value, kAffinity, -1); m_affinity = Json::getInt64(value, kAffinity, -1);
m_memChunk = std::max(Json::getUint(value, kMemChunk, m_memChunk), 18u); m_memChunk = std::min(Json::getUint(value, kMemChunk, m_memChunk), 18u);
m_compMode = Json::getBool(value, kCompMode, m_compMode); m_compMode = Json::getBool(value, kCompMode, m_compMode);
setUnrollFactor(Json::getUint(value, kUnroll, m_unrollFactor)); setUnrollFactor(Json::getUint(value, kUnroll, m_unrollFactor));

View file

@ -503,9 +503,15 @@ __kernel void cn1(__global ulong *input, __global uint4 *Scratchpad, __global ul
c = AES_Round(AES0, AES1, AES2, AES3, c, ((uint4 *)a)[0]); c = AES_Round(AES0, AES1, AES2, AES3, c, ((uint4 *)a)[0]);
{ {
# if (ALGO == ALGO_CN_RWZ)
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(3));
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(1));
# else
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1)); const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1));
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2)); const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3)); const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
# endif
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1); SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0); SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
@ -544,9 +550,15 @@ __kernel void cn1(__global ulong *input, __global uint4 *Scratchpad, __global ul
t ^= chunk2; t ^= chunk2;
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3)); const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
# if (ALGO == ALGO_CN_RWZ)
SCRATCHPAD_CHUNK(1) = as_uint4(chunk1 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk3 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
# else
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1); SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0); SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]); SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
# endif
} }
a[1] += t.s1; a[1] += t.s1;

View file

@ -1,480 +0,0 @@
R"===(
__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
__kernel void cn1_v2_rwz(__global uint4 *Scratchpad, __global ulong *states, uint variant, __global ulong *input, uint Threads)
{
# if (ALGO == CRYPTONIGHT)
ulong a[2], b[4];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
const ulong gIdx = getIdx();
for(int i = get_local_id(0); i < 256; i += WORKSIZE)
{
const uint tmp = AES0_C[i];
AES0[i] = tmp;
AES1[i] = rotate(tmp, 8U);
AES2[i] = rotate(tmp, 16U);
AES3[i] = rotate(tmp, 24U);
}
barrier(CLK_LOCAL_MEM_FENCE);
# if (COMP_MODE == 1)
// do not use early return here
if (gIdx < Threads)
# endif
{
states += 25 * gIdx;
# if defined(__NV_CL_C_VERSION)
Scratchpad += gIdx * (0x40000 >> 2);
# else
# if (STRIDED_INDEX == 0)
Scratchpad += gIdx * (MEMORY >> 4);
# elif (STRIDED_INDEX == 1)
Scratchpad += gIdx;
# elif (STRIDED_INDEX == 2)
Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
# endif
# endif
a[0] = states[0] ^ states[4];
a[1] = states[1] ^ states[5];
b[0] = states[2] ^ states[6];
b[1] = states[3] ^ states[7];
b[2] = states[8] ^ states[10];
b[3] = states[9] ^ states[11];
}
ulong2 bx0 = ((ulong2 *)b)[0];
ulong2 bx1 = ((ulong2 *)b)[1];
mem_fence(CLK_LOCAL_MEM_FENCE);
# ifdef __NV_CL_C_VERSION
__local uint16 scratchpad_line_buf[WORKSIZE];
__local uint16* scratchpad_line = scratchpad_line_buf + get_local_id(0);
# define SCRATCHPAD_CHUNK(N) (*(__local uint4*)((__local uchar*)(scratchpad_line) + (idx1 ^ (N << 4))))
# else
# if (STRIDED_INDEX == 0)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + (idx ^ (N << 4))))
# elif (STRIDED_INDEX == 1)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + mul24(as_uint(idx ^ (N << 4)), Threads)))
# elif (STRIDED_INDEX == 2)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + (((idx ^ (N << 4)) % (MEM_CHUNK << 4)) + ((idx ^ (N << 4)) / (MEM_CHUNK << 4)) * WORKSIZE * (MEM_CHUNK << 4))))
# endif
# endif
# if (COMP_MODE == 1)
// do not use early return here
if (gIdx < Threads)
# endif
{
uint2 division_result = as_uint2(states[12]);
uint sqrt_result = as_uint2(states[13]).s0;
#pragma unroll CN_UNROLL
for(int i = 0; i < 0x60000; ++i)
{
# ifdef __NV_CL_C_VERSION
uint idx = a[0] & 0x1FFFC0;
uint idx1 = a[0] & 0x30;
*scratchpad_line = *(__global uint16*)((__global uchar*)(Scratchpad) + idx);
# else
uint idx = a[0] & MASK;
# endif
uint4 c = SCRATCHPAD_CHUNK(0);
c = AES_Round(AES0, AES1, AES2, AES3, c, ((uint4 *)a)[0]);
{
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(3));
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(1));
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
}
SCRATCHPAD_CHUNK(0) = as_uint4(bx0) ^ c;
# ifdef __NV_CL_C_VERSION
*(__global uint16*)((__global uchar*)(Scratchpad) + idx) = *scratchpad_line;
idx = as_ulong2(c).s0 & 0x1FFFC0;
idx1 = as_ulong2(c).s0 & 0x30;
*scratchpad_line = *(__global uint16*)((__global uchar*)(Scratchpad) + idx);
# else
idx = as_ulong2(c).s0 & MASK;
# endif
uint4 tmp = SCRATCHPAD_CHUNK(0);
{
tmp.s0 ^= division_result.s0;
tmp.s1 ^= division_result.s1 ^ sqrt_result;
division_result = fast_div_v2(as_ulong2(c).s1, (c.s0 + (sqrt_result << 1)) | 0x80000001UL);
sqrt_result = fast_sqrt_v2(as_ulong2(c).s0 + as_ulong(division_result));
}
ulong2 t;
t.s0 = mul_hi(as_ulong2(c).s0, as_ulong2(tmp).s0);
t.s1 = as_ulong2(c).s0 * as_ulong2(tmp).s0;
{
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1)) ^ t;
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
t ^= chunk2;
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
SCRATCHPAD_CHUNK(1) = as_uint4(chunk1 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk3 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
}
a[1] += t.s1;
a[0] += t.s0;
SCRATCHPAD_CHUNK(0) = ((uint4 *)a)[0];
# ifdef __NV_CL_C_VERSION
*(__global uint16*)((__global uchar*)(Scratchpad) + idx) = *scratchpad_line;
# endif
((uint4 *)a)[0] ^= tmp;
bx1 = bx0;
bx0 = as_ulong2(c);
}
# undef SCRATCHPAD_CHUNK
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
# endif
}
)==="
R"===(
__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
__kernel void cn1_v2_zls(__global uint4 *Scratchpad, __global ulong *states, uint variant, __global ulong *input, uint Threads)
{
# if (ALGO == CRYPTONIGHT)
ulong a[2], b[4];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
const ulong gIdx = getIdx();
for(int i = get_local_id(0); i < 256; i += WORKSIZE)
{
const uint tmp = AES0_C[i];
AES0[i] = tmp;
AES1[i] = rotate(tmp, 8U);
AES2[i] = rotate(tmp, 16U);
AES3[i] = rotate(tmp, 24U);
}
barrier(CLK_LOCAL_MEM_FENCE);
# if (COMP_MODE == 1)
// do not use early return here
if (gIdx < Threads)
# endif
{
states += 25 * gIdx;
# if defined(__NV_CL_C_VERSION)
Scratchpad += gIdx * (0x60000 >> 2);
# else
# if (STRIDED_INDEX == 0)
Scratchpad += gIdx * (MEMORY >> 4);
# elif (STRIDED_INDEX == 1)
Scratchpad += gIdx;
# elif (STRIDED_INDEX == 2)
Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
# endif
# endif
a[0] = states[0] ^ states[4];
a[1] = states[1] ^ states[5];
b[0] = states[2] ^ states[6];
b[1] = states[3] ^ states[7];
b[2] = states[8] ^ states[10];
b[3] = states[9] ^ states[11];
}
ulong2 bx0 = ((ulong2 *)b)[0];
ulong2 bx1 = ((ulong2 *)b)[1];
mem_fence(CLK_LOCAL_MEM_FENCE);
# ifdef __NV_CL_C_VERSION
__local uint16 scratchpad_line_buf[WORKSIZE];
__local uint16* scratchpad_line = scratchpad_line_buf + get_local_id(0);
# define SCRATCHPAD_CHUNK(N) (*(__local uint4*)((__local uchar*)(scratchpad_line) + (idx1 ^ (N << 4))))
# else
# if (STRIDED_INDEX == 0)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + (idx ^ (N << 4))))
# elif (STRIDED_INDEX == 1)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + mul24(as_uint(idx ^ (N << 4)), Threads)))
# elif (STRIDED_INDEX == 2)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + (((idx ^ (N << 4)) % (MEM_CHUNK << 4)) + ((idx ^ (N << 4)) / (MEM_CHUNK << 4)) * WORKSIZE * (MEM_CHUNK << 4))))
# endif
# endif
# if (COMP_MODE == 1)
// do not use early return here
if (gIdx < Threads)
# endif
{
uint2 division_result = as_uint2(states[12]);
uint sqrt_result = as_uint2(states[13]).s0;
#pragma unroll CN_UNROLL
for(int i = 0; i < 0x60000; ++i)
{
# ifdef __NV_CL_C_VERSION
uint idx = a[0] & 0x1FFFC0;
uint idx1 = a[0] & 0x30;
*scratchpad_line = *(__global uint16*)((__global uchar*)(Scratchpad) + idx);
# else
uint idx = a[0] & MASK;
# endif
uint4 c = SCRATCHPAD_CHUNK(0);
c = AES_Round(AES0, AES1, AES2, AES3, c, ((uint4 *)a)[0]);
{
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1));
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
}
SCRATCHPAD_CHUNK(0) = as_uint4(bx0) ^ c;
# ifdef __NV_CL_C_VERSION
*(__global uint16*)((__global uchar*)(Scratchpad) + idx) = *scratchpad_line;
idx = as_ulong2(c).s0 & 0x1FFFC0;
idx1 = as_ulong2(c).s0 & 0x30;
*scratchpad_line = *(__global uint16*)((__global uchar*)(Scratchpad) + idx);
# else
idx = as_ulong2(c).s0 & MASK;
# endif
uint4 tmp = SCRATCHPAD_CHUNK(0);
{
tmp.s0 ^= division_result.s0;
tmp.s1 ^= division_result.s1 ^ sqrt_result;
division_result = fast_div_v2(as_ulong2(c).s1, (c.s0 + (sqrt_result << 1)) | 0x80000001UL);
sqrt_result = fast_sqrt_v2(as_ulong2(c).s0 + as_ulong(division_result));
}
ulong2 t;
t.s0 = mul_hi(as_ulong2(c).s0, as_ulong2(tmp).s0);
t.s1 = as_ulong2(c).s0 * as_ulong2(tmp).s0;
{
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1)) ^ t;
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
t ^= chunk2;
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
}
a[1] += t.s1;
a[0] += t.s0;
SCRATCHPAD_CHUNK(0) = ((uint4 *)a)[0];
# ifdef __NV_CL_C_VERSION
*(__global uint16*)((__global uchar*)(Scratchpad) + idx) = *scratchpad_line;
# endif
((uint4 *)a)[0] ^= tmp;
bx1 = bx0;
bx0 = as_ulong2(c);
}
# undef SCRATCHPAD_CHUNK
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
# endif
}
)==="
R"===(
__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
__kernel void cn1_v2_double(__global uint4 *Scratchpad, __global ulong *states, uint variant, __global ulong *input, uint Threads)
{
# if (ALGO == CRYPTONIGHT)
ulong a[2], b[4];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
const ulong gIdx = getIdx();
for(int i = get_local_id(0); i < 256; i += WORKSIZE)
{
const uint tmp = AES0_C[i];
AES0[i] = tmp;
AES1[i] = rotate(tmp, 8U);
AES2[i] = rotate(tmp, 16U);
AES3[i] = rotate(tmp, 24U);
}
barrier(CLK_LOCAL_MEM_FENCE);
# if (COMP_MODE == 1)
// do not use early return here
if (gIdx < Threads)
# endif
{
states += 25 * gIdx;
# if defined(__NV_CL_C_VERSION)
Scratchpad += gIdx * (0x100000 >> 2);
# else
# if (STRIDED_INDEX == 0)
Scratchpad += gIdx * (MEMORY >> 4);
# elif (STRIDED_INDEX == 1)
Scratchpad += gIdx;
# elif (STRIDED_INDEX == 2)
Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
# endif
# endif
a[0] = states[0] ^ states[4];
a[1] = states[1] ^ states[5];
b[0] = states[2] ^ states[6];
b[1] = states[3] ^ states[7];
b[2] = states[8] ^ states[10];
b[3] = states[9] ^ states[11];
}
ulong2 bx0 = ((ulong2 *)b)[0];
ulong2 bx1 = ((ulong2 *)b)[1];
mem_fence(CLK_LOCAL_MEM_FENCE);
# ifdef __NV_CL_C_VERSION
__local uint16 scratchpad_line_buf[WORKSIZE];
__local uint16* scratchpad_line = scratchpad_line_buf + get_local_id(0);
# define SCRATCHPAD_CHUNK(N) (*(__local uint4*)((__local uchar*)(scratchpad_line) + (idx1 ^ (N << 4))))
# else
# if (STRIDED_INDEX == 0)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + (idx ^ (N << 4))))
# elif (STRIDED_INDEX == 1)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + mul24(as_uint(idx ^ (N << 4)), Threads)))
# elif (STRIDED_INDEX == 2)
# define SCRATCHPAD_CHUNK(N) (*(__global uint4*)((__global uchar*)(Scratchpad) + (((idx ^ (N << 4)) % (MEM_CHUNK << 4)) + ((idx ^ (N << 4)) / (MEM_CHUNK << 4)) * WORKSIZE * (MEM_CHUNK << 4))))
# endif
# endif
# if (COMP_MODE == 1)
// do not use early return here
if (gIdx < Threads)
# endif
{
uint2 division_result = as_uint2(states[12]);
uint sqrt_result = as_uint2(states[13]).s0;
#pragma unroll CN_UNROLL
for(int i = 0; i < 0x100000; ++i)
{
# ifdef __NV_CL_C_VERSION
uint idx = a[0] & 0x1FFFC0;
uint idx1 = a[0] & 0x30;
*scratchpad_line = *(__global uint16*)((__global uchar*)(Scratchpad) + idx);
# else
uint idx = a[0] & MASK;
# endif
uint4 c = SCRATCHPAD_CHUNK(0);
c = AES_Round(AES0, AES1, AES2, AES3, c, ((uint4 *)a)[0]);
{
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1));
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
}
SCRATCHPAD_CHUNK(0) = as_uint4(bx0) ^ c;
# ifdef __NV_CL_C_VERSION
*(__global uint16*)((__global uchar*)(Scratchpad) + idx) = *scratchpad_line;
idx = as_ulong2(c).s0 & 0x1FFFC0;
idx1 = as_ulong2(c).s0 & 0x30;
*scratchpad_line = *(__global uint16*)((__global uchar*)(Scratchpad) + idx);
# else
idx = as_ulong2(c).s0 & MASK;
# endif
uint4 tmp = SCRATCHPAD_CHUNK(0);
{
tmp.s0 ^= division_result.s0;
tmp.s1 ^= division_result.s1 ^ sqrt_result;
division_result = fast_div_v2(as_ulong2(c).s1, (c.s0 + (sqrt_result << 1)) | 0x80000001UL);
sqrt_result = fast_sqrt_v2(as_ulong2(c).s0 + as_ulong(division_result));
}
ulong2 t;
t.s0 = mul_hi(as_ulong2(c).s0, as_ulong2(tmp).s0);
t.s1 = as_ulong2(c).s0 * as_ulong2(tmp).s0;
{
const ulong2 chunk1 = as_ulong2(SCRATCHPAD_CHUNK(1)) ^ t;
const ulong2 chunk2 = as_ulong2(SCRATCHPAD_CHUNK(2));
t ^= chunk2;
const ulong2 chunk3 = as_ulong2(SCRATCHPAD_CHUNK(3));
SCRATCHPAD_CHUNK(1) = as_uint4(chunk3 + bx1);
SCRATCHPAD_CHUNK(2) = as_uint4(chunk1 + bx0);
SCRATCHPAD_CHUNK(3) = as_uint4(chunk2 + ((ulong2 *)a)[0]);
}
a[1] += t.s1;
a[0] += t.s0;
SCRATCHPAD_CHUNK(0) = ((uint4 *)a)[0];
# ifdef __NV_CL_C_VERSION
*(__global uint16*)((__global uchar*)(Scratchpad) + idx) = *scratchpad_line;
# endif
((uint4 *)a)[0] ^= tmp;
bx1 = bx0;
bx0 = as_ulong2(c);
}
# undef SCRATCHPAD_CHUNK
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
# endif
}
)==="

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,7 @@
* @author SChernykh * @author SChernykh
*/ */
#if (ALGO_BASE == ALGO_CN_2)
inline uint get_reciprocal(uint a) inline uint get_reciprocal(uint a)
{ {
const float a_hi = as_float((a >> 8) + ((126U + 31U) << 23)); const float a_hi = as_float((a >> 8) + ((126U + 31U) << 23));
@ -52,3 +53,4 @@ inline uint fast_sqrt_v2(const ulong n1)
return result; return result;
} }
#endif