From 5f113a47cfadda64242d340721d1cf1e6ed45d89 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 3 Feb 2019 20:34:24 +0700 Subject: [PATCH] Fix typo. --- src/crypto/cn_gpu_avx.cpp | 22 ++++++++++----------- src/crypto/cn_gpu_ssse3.cpp | 38 ++++++++++++++++++------------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/crypto/cn_gpu_avx.cpp b/src/crypto/cn_gpu_avx.cpp index a808e34ce..3dc7cacb7 100644 --- a/src/crypto/cn_gpu_avx.cpp +++ b/src/crypto/cn_gpu_avx.cpp @@ -88,7 +88,7 @@ inline void round_compute(const __m256& n0, const __m256& n1, const __m256& n2, // 112×4 = 448 template -inline __m256i double_comupte(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, +inline __m256i double_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, float lcnt, float hcnt, const __m256& rnd_c, __m256& sum) { __m256 c = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_set1_ps(lcnt)), _mm_set1_ps(hcnt), 1); @@ -113,10 +113,10 @@ inline __m256i double_comupte(const __m256& n0, const __m256& n1, const __m256& } template -inline void double_comupte_wrap(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, +inline void double_compute_wrap(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, float lcnt, float hcnt, const __m256& rnd_c, __m256& sum, __m256i& out) { - __m256i r = double_comupte(n0, n1, n2, n3, lcnt, hcnt, rnd_c, sum); + __m256i r = double_compute(n0, n1, n2, n3, lcnt, hcnt, rnd_c, sum); if(rot != 0) r = _mm256_or_si256(_mm256_bslli_epi128(r, 16 - rot), _mm256_bsrli_epi128(r, rot)); @@ -151,10 +151,10 @@ void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad) n33 = _mm256_permute2f128_ps(n23, n23, 0x11); out = _mm256_setzero_si256(); - double_comupte_wrap<0>(n01, n10, n22, n33, 1.3437500f, 1.4296875f, rc, suma, out); - double_comupte_wrap<1>(n01, n22, n33, n10, 1.2812500f, 1.3984375f, rc, suma, out); - double_comupte_wrap<2>(n01, n33, n10, n22, 1.3593750f, 1.3828125f, rc, sumb, out); - double_comupte_wrap<3>(n01, n33, n22, n10, 1.3671875f, 1.3046875f, rc, sumb, out); + double_compute_wrap<0>(n01, n10, n22, n33, 1.3437500f, 1.4296875f, rc, suma, out); + double_compute_wrap<1>(n01, n22, n33, n10, 1.2812500f, 1.3984375f, rc, suma, out); + double_compute_wrap<2>(n01, n33, n10, n22, 1.3593750f, 1.3828125f, rc, sumb, out); + double_compute_wrap<3>(n01, n33, n22, n10, 1.3671875f, 1.3046875f, rc, sumb, out); _mm256_store_si256(idx0, _mm256_xor_si256(v01, out)); sum0 = _mm256_add_ps(suma, sumb); out2 = out; @@ -165,10 +165,10 @@ void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad) n30 = _mm256_permute2f128_ps(n01, n23, 0x03); out = _mm256_setzero_si256(); - double_comupte_wrap<0>(n23, n11, n02, n30, 1.4140625f, 1.3203125f, rc, suma, out); - double_comupte_wrap<1>(n23, n02, n30, n11, 1.2734375f, 1.3515625f, rc, suma, out); - double_comupte_wrap<2>(n23, n30, n11, n02, 1.2578125f, 1.3359375f, rc, sumb, out); - double_comupte_wrap<3>(n23, n30, n02, n11, 1.2890625f, 1.4609375f, rc, sumb, out); + double_compute_wrap<0>(n23, n11, n02, n30, 1.4140625f, 1.3203125f, rc, suma, out); + double_compute_wrap<1>(n23, n02, n30, n11, 1.2734375f, 1.3515625f, rc, suma, out); + double_compute_wrap<2>(n23, n30, n11, n02, 1.2578125f, 1.3359375f, rc, sumb, out); + double_compute_wrap<3>(n23, n30, n02, n11, 1.2890625f, 1.4609375f, rc, sumb, out); _mm256_store_si256(idx2, _mm256_xor_si256(v23, out)); sum1 = _mm256_add_ps(suma, sumb); diff --git a/src/crypto/cn_gpu_ssse3.cpp b/src/crypto/cn_gpu_ssse3.cpp index d986752cb..ce3d19add 100644 --- a/src/crypto/cn_gpu_ssse3.cpp +++ b/src/crypto/cn_gpu_ssse3.cpp @@ -90,7 +90,7 @@ inline void round_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd // 112×4 = 448 template -inline __m128i single_comupte(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum) +inline __m128i single_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum) { __m128 c = _mm_set1_ps(cnt); __m128 r = _mm_setzero_ps(); @@ -114,9 +114,9 @@ inline __m128i single_comupte(__m128 n0, __m128 n1, __m128 n2, __m128 n3, floa } template -inline void single_comupte_wrap(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum, __m128i& out) +inline void single_compute_wrap(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum, __m128i& out) { - __m128i r = single_comupte(n0, n1, n2, n3, cnt, rnd_c, sum); + __m128i r = single_compute(n0, n1, n2, n3, cnt, rnd_c, sum); if(rot != 0) r = _mm_or_si128(_mm_slli_si128(r, 16 - rot), _mm_srli_si128(r, rot)); out = _mm_xor_si128(out, r); @@ -149,37 +149,37 @@ void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad) __m128i out, out2; out = _mm_setzero_si128(); - single_comupte_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out); - single_comupte_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out); - single_comupte_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out); - single_comupte_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out); + single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out); + single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out); + single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out); + single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out); sum0 = _mm_add_ps(suma, sumb); _mm_store_si128(idx0, _mm_xor_si128(v0, out)); out2 = out; out = _mm_setzero_si128(); - single_comupte_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out); - single_comupte_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out); - single_comupte_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out); - single_comupte_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out); + single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out); + single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out); + single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out); + single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out); sum1 = _mm_add_ps(suma, sumb); _mm_store_si128(idx1, _mm_xor_si128(v1, out)); out2 = _mm_xor_si128(out2, out); out = _mm_setzero_si128(); - single_comupte_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out); - single_comupte_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out); - single_comupte_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out); - single_comupte_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out); + single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out); + single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out); + single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out); + single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out); sum2 = _mm_add_ps(suma, sumb); _mm_store_si128(idx2, _mm_xor_si128(v2, out)); out2 = _mm_xor_si128(out2, out); out = _mm_setzero_si128(); - single_comupte_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out); - single_comupte_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out); - single_comupte_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out); - single_comupte_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out); + single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out); + single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out); + single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out); + single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out); sum3 = _mm_add_ps(suma, sumb); _mm_store_si128(idx3, _mm_xor_si128(v3, out)); out2 = _mm_xor_si128(out2, out);