diff --git a/src/3rdparty/libcpuid/asm-bits.c b/src/3rdparty/libcpuid/asm-bits.c index b8e32284..bfabd404 100644 --- a/src/3rdparty/libcpuid/asm-bits.c +++ b/src/3rdparty/libcpuid/asm-bits.c @@ -1,825 +1,836 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "libcpuid.h" -#include "asm-bits.h" - -int cpuid_exists_by_eflags(void) -{ -#if defined(PLATFORM_X64) - return 1; /* CPUID is always present on the x86_64 */ -#elif defined(PLATFORM_X86) -# if defined(COMPILER_GCC) - int result; - __asm __volatile( - " pushfl\n" - " pop %%eax\n" - " mov %%eax, %%ecx\n" - " xor $0x200000, %%eax\n" - " push %%eax\n" - " popfl\n" - " pushfl\n" - " pop %%eax\n" - " xor %%ecx, %%eax\n" - " mov %%eax, %0\n" - " push %%ecx\n" - " popfl\n" - : "=m"(result) - : :"eax", "ecx", "memory"); - return (result != 0); -# elif defined(COMPILER_MICROSOFT) - int result; - __asm { - pushfd - pop eax - mov ecx, eax - xor eax, 0x200000 - push eax - popfd - pushfd - pop eax - xor eax, ecx - mov result, eax - push ecx - popfd - }; - return (result != 0); -# else - return 0; -# endif /* COMPILER_MICROSOFT */ -#else - return 0; -#endif /* PLATFORM_X86 */ -} - -#ifdef INLINE_ASM_SUPPORTED -/* - * with MSVC/AMD64, the exec_cpuid() and cpu_rdtsc() functions - * are implemented in separate .asm files. Otherwise, use inline assembly - */ -void exec_cpuid(uint32_t *regs) -{ -#ifdef COMPILER_GCC -# ifdef PLATFORM_X64 - __asm __volatile( - " mov %0, %%rdi\n" - - " push %%rbx\n" - " push %%rcx\n" - " push %%rdx\n" - - " mov (%%rdi), %%eax\n" - " mov 4(%%rdi), %%ebx\n" - " mov 8(%%rdi), %%ecx\n" - " mov 12(%%rdi), %%edx\n" - - " cpuid\n" - - " movl %%eax, (%%rdi)\n" - " movl %%ebx, 4(%%rdi)\n" - " movl %%ecx, 8(%%rdi)\n" - " movl %%edx, 12(%%rdi)\n" - " pop %%rdx\n" - " pop %%rcx\n" - " pop %%rbx\n" - : - :"m"(regs) - :"memory", "eax", "rdi" - ); -# else - __asm __volatile( - " mov %0, %%edi\n" - - " push %%ebx\n" - " push %%ecx\n" - " push %%edx\n" - - " mov (%%edi), %%eax\n" - " mov 4(%%edi), %%ebx\n" - " mov 8(%%edi), %%ecx\n" - " mov 12(%%edi), %%edx\n" - - " cpuid\n" - - " mov %%eax, (%%edi)\n" - " mov %%ebx, 4(%%edi)\n" - " mov %%ecx, 8(%%edi)\n" - " mov %%edx, 12(%%edi)\n" - " pop %%edx\n" - " pop %%ecx\n" - " pop %%ebx\n" - : - :"m"(regs) - :"memory", "eax", "edi" - ); -# endif /* COMPILER_GCC */ -#else -# ifdef COMPILER_MICROSOFT - __asm { - push ebx - push ecx - push edx - push edi - mov edi, regs - - mov eax, [edi] - mov ebx, [edi+4] - mov ecx, [edi+8] - mov edx, [edi+12] - - cpuid - - mov [edi], eax - mov [edi+4], ebx - mov [edi+8], ecx - mov [edi+12], edx - - pop edi - pop edx - pop ecx - pop ebx - } -# else -# error "Unsupported compiler" -# endif /* COMPILER_MICROSOFT */ -#endif -} -#endif /* INLINE_ASSEMBLY_SUPPORTED */ - -#ifdef INLINE_ASM_SUPPORTED -void cpu_rdtsc(uint64_t* result) -{ - uint32_t low_part, hi_part; -#ifdef COMPILER_GCC - __asm __volatile ( - " rdtsc\n" - " mov %%eax, %0\n" - " mov %%edx, %1\n" - :"=m"(low_part), "=m"(hi_part)::"memory", "eax", "edx" - ); -#else -# ifdef COMPILER_MICROSOFT - __asm { - rdtsc - mov low_part, eax - mov hi_part, edx - }; -# else -# error "Unsupported compiler" -# endif /* COMPILER_MICROSOFT */ -#endif /* COMPILER_GCC */ - *result = (uint64_t)low_part + (((uint64_t) hi_part) << 32); -} -#endif /* INLINE_ASM_SUPPORTED */ - -#ifdef INLINE_ASM_SUPPORTED -void busy_sse_loop(int cycles) -{ -#ifdef COMPILER_GCC -#ifndef __APPLE__ -# define XALIGN ".balign 16\n" -#else -# define XALIGN ".align 4\n" -#endif - __asm __volatile ( - " xorps %%xmm0, %%xmm0\n" - " xorps %%xmm1, %%xmm1\n" - " xorps %%xmm2, %%xmm2\n" - " xorps %%xmm3, %%xmm3\n" - " xorps %%xmm4, %%xmm4\n" - " xorps %%xmm5, %%xmm5\n" - " xorps %%xmm6, %%xmm6\n" - " xorps %%xmm7, %%xmm7\n" - XALIGN - /* ".bsLoop:\n" */ - "1:\n" - // 0: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 1: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 2: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 3: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 4: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 5: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 6: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 7: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 8: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 9: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //10: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //11: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //12: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //13: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //14: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //15: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //16: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //17: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //18: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //19: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //20: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //21: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //22: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //23: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //24: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //25: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //26: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //27: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //28: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //29: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //30: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //31: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - - " dec %%eax\n" - /* "jnz .bsLoop\n" */ - " jnz 1b\n" - ::"a"(cycles) - ); -#else -# ifdef COMPILER_MICROSOFT - __asm { - mov eax, cycles - xorps xmm0, xmm0 - xorps xmm1, xmm1 - xorps xmm2, xmm2 - xorps xmm3, xmm3 - xorps xmm4, xmm4 - xorps xmm5, xmm5 - xorps xmm6, xmm6 - xorps xmm7, xmm7 - //-- - align 16 -bsLoop: - // 0: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 1: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 2: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 3: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 4: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 5: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 6: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 7: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 8: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 9: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 10: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 11: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 12: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 13: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 14: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 15: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 16: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 17: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 18: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 19: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 20: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 21: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 22: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 23: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 24: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 25: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 26: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 27: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 28: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 29: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 30: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 31: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - //---------------------- - dec eax - jnz bsLoop - } -# else -# error "Unsupported compiler" -# endif /* COMPILER_MICROSOFT */ -#endif /* COMPILER_GCC */ -} -#endif /* INLINE_ASSEMBLY_SUPPORTED */ +/* + * Copyright 2008 Veselin Georgiev, + * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "libcpuid.h" +#include "asm-bits.h" + +int cpuid_exists_by_eflags(void) +{ +#if defined(PLATFORM_X64) + return 1; /* CPUID is always present on the x86_64 */ +#elif defined(PLATFORM_X86) +# if defined(COMPILER_GCC) || defined(COMPILER_CLANG) + int result; + __asm __volatile( + " pushfl\n" + " pop %%eax\n" + " mov %%eax, %%ecx\n" + " xor $0x200000, %%eax\n" + " push %%eax\n" + " popfl\n" + " pushfl\n" + " pop %%eax\n" + " xor %%ecx, %%eax\n" + " mov %%eax, %0\n" + " push %%ecx\n" + " popfl\n" + : "=m"(result) + : :"eax", "ecx", "memory"); + return (result != 0); +# elif defined(COMPILER_MICROSOFT) + int result; + __asm { + pushfd + pop eax + mov ecx, eax + xor eax, 0x200000 + push eax + popfd + pushfd + pop eax + xor eax, ecx + mov result, eax + push ecx + popfd + }; + return (result != 0); +# else + return 0; +# endif /* COMPILER_MICROSOFT */ +#elif defined(PLATFORM_ARM) + return 0; +#else + return 0; +#endif /* PLATFORM_X86 */ +} + +#ifdef INLINE_ASM_SUPPORTED +/* + * with MSVC/AMD64, the exec_cpuid() and cpu_rdtsc() functions + * are implemented in separate .asm files. Otherwise, use inline assembly + */ +void exec_cpuid(uint32_t *regs) +{ +# if defined(COMPILER_GCC) || defined(COMPILER_CLANG) +# ifdef PLATFORM_X64 + __asm __volatile( + " mov %0, %%rdi\n" + + " push %%rbx\n" + " push %%rcx\n" + " push %%rdx\n" + + " mov (%%rdi), %%eax\n" + " mov 4(%%rdi), %%ebx\n" + " mov 8(%%rdi), %%ecx\n" + " mov 12(%%rdi), %%edx\n" + + " cpuid\n" + + " movl %%eax, (%%rdi)\n" + " movl %%ebx, 4(%%rdi)\n" + " movl %%ecx, 8(%%rdi)\n" + " movl %%edx, 12(%%rdi)\n" + " pop %%rdx\n" + " pop %%rcx\n" + " pop %%rbx\n" + : + :"m"(regs) + :"memory", "eax", "rdi" + ); +# elif defined(PLATFORM_X86) + __asm __volatile( + " mov %0, %%edi\n" + + " push %%ebx\n" + " push %%ecx\n" + " push %%edx\n" + + " mov (%%edi), %%eax\n" + " mov 4(%%edi), %%ebx\n" + " mov 8(%%edi), %%ecx\n" + " mov 12(%%edi), %%edx\n" + + " cpuid\n" + + " mov %%eax, (%%edi)\n" + " mov %%ebx, 4(%%edi)\n" + " mov %%ecx, 8(%%edi)\n" + " mov %%edx, 12(%%edi)\n" + " pop %%edx\n" + " pop %%ecx\n" + " pop %%ebx\n" + : + :"m"(regs) + :"memory", "eax", "edi" + ); +# elif defined(PLATFORM_ARM) +# endif /* COMPILER_GCC */ +#else +# ifdef COMPILER_MICROSOFT + __asm { + push ebx + push ecx + push edx + push edi + mov edi, regs + + mov eax, [edi] + mov ebx, [edi+4] + mov ecx, [edi+8] + mov edx, [edi+12] + + cpuid + + mov [edi], eax + mov [edi+4], ebx + mov [edi+8], ecx + mov [edi+12], edx + + pop edi + pop edx + pop ecx + pop ebx + } +# else +# error "Unsupported compiler" +# endif /* COMPILER_MICROSOFT */ +#endif +} +#endif /* INLINE_ASSEMBLY_SUPPORTED */ + +#ifdef INLINE_ASM_SUPPORTED +void cpu_rdtsc(uint64_t* result) +{ + uint32_t low_part, hi_part; +#if defined(COMPILER_GCC) || defined(COMPILER_CLANG) +#ifdef PLATFORM_ARM + low_part = 0; + hi_part = 0; +#else + __asm __volatile ( + " rdtsc\n" + " mov %%eax, %0\n" + " mov %%edx, %1\n" + :"=m"(low_part), "=m"(hi_part)::"memory", "eax", "edx" + ); +#endif +#else +# ifdef COMPILER_MICROSOFT + __asm { + rdtsc + mov low_part, eax + mov hi_part, edx + }; +# else +# error "Unsupported compiler" +# endif /* COMPILER_MICROSOFT */ +#endif /* COMPILER_GCC */ + *result = (uint64_t)low_part + (((uint64_t) hi_part) << 32); +} +#endif /* INLINE_ASM_SUPPORTED */ + +#ifdef INLINE_ASM_SUPPORTED +void busy_sse_loop(int cycles) +{ +# if defined(COMPILER_GCC) || defined(COMPILER_CLANG) +#ifndef __APPLE__ +# define XALIGN ".balign 16\n" +#else +# define XALIGN ".align 4\n" +#endif +#ifdef PLATFORM_ARM +#else + __asm __volatile ( + " xorps %%xmm0, %%xmm0\n" + " xorps %%xmm1, %%xmm1\n" + " xorps %%xmm2, %%xmm2\n" + " xorps %%xmm3, %%xmm3\n" + " xorps %%xmm4, %%xmm4\n" + " xorps %%xmm5, %%xmm5\n" + " xorps %%xmm6, %%xmm6\n" + " xorps %%xmm7, %%xmm7\n" + XALIGN + /* ".bsLoop:\n" */ + "1:\n" + // 0: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 1: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 2: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 3: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 4: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 5: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 6: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 7: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 8: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + // 9: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //10: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //11: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //12: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //13: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //14: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //15: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //16: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //17: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //18: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //19: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //20: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //21: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //22: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //23: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //24: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //25: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //26: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //27: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //28: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //29: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //30: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + //31: + " addps %%xmm1, %%xmm0\n" + " addps %%xmm2, %%xmm1\n" + " addps %%xmm3, %%xmm2\n" + " addps %%xmm4, %%xmm3\n" + " addps %%xmm5, %%xmm4\n" + " addps %%xmm6, %%xmm5\n" + " addps %%xmm7, %%xmm6\n" + " addps %%xmm0, %%xmm7\n" + + " dec %%eax\n" + /* "jnz .bsLoop\n" */ + " jnz 1b\n" + ::"a"(cycles) + ); +#endif +#else +# ifdef COMPILER_MICROSOFT + __asm { + mov eax, cycles + xorps xmm0, xmm0 + xorps xmm1, xmm1 + xorps xmm2, xmm2 + xorps xmm3, xmm3 + xorps xmm4, xmm4 + xorps xmm5, xmm5 + xorps xmm6, xmm6 + xorps xmm7, xmm7 + //-- + align 16 +bsLoop: + // 0: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 1: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 2: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 3: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 4: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 5: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 6: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 7: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 8: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 9: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 10: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 11: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 12: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 13: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 14: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 15: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 16: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 17: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 18: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 19: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 20: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 21: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 22: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 23: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 24: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 25: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 26: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 27: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 28: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 29: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 30: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + // 31: + addps xmm0, xmm1 + addps xmm1, xmm2 + addps xmm2, xmm3 + addps xmm3, xmm4 + addps xmm4, xmm5 + addps xmm5, xmm6 + addps xmm6, xmm7 + addps xmm7, xmm0 + //---------------------- + dec eax + jnz bsLoop + } +# else +# error "Unsupported compiler" +# endif /* COMPILER_MICROSOFT */ +#endif /* COMPILER_GCC */ +} +#endif /* INLINE_ASSEMBLY_SUPPORTED */ \ No newline at end of file diff --git a/src/3rdparty/libcpuid/asm-bits.h b/src/3rdparty/libcpuid/asm-bits.h index 3a03e11c..9049e2fe 100644 --- a/src/3rdparty/libcpuid/asm-bits.h +++ b/src/3rdparty/libcpuid/asm-bits.h @@ -1,53 +1,71 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __ASM_BITS_H__ -#define __ASM_BITS_H__ -#include "libcpuid.h" - -/* Determine Compiler: */ -#if defined(_MSC_VER) -# define COMPILER_MICROSOFT -#elif defined(__GNUC__) -# define COMPILER_GCC -#endif - -/* Determine Platform */ -#if defined(__x86_64__) || defined(_M_AMD64) -# define PLATFORM_X64 -#elif defined(__i386__) || defined(_M_IX86) -# define PLATFORM_X86 -#endif - -/* Under Windows/AMD64 with MSVC, inline assembly isn't supported */ -#if (defined(COMPILER_GCC) && defined(PLATFORM_X64)) || defined(PLATFORM_X86) -# define INLINE_ASM_SUPPORTED -#endif - -int cpuid_exists_by_eflags(void); -void exec_cpuid(uint32_t *regs); -void busy_sse_loop(int cycles); - -#endif /* __ASM_BITS_H__ */ +/* + * Copyright 2008 Veselin Georgiev, + * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __ASM_BITS_H__ +#define __ASM_BITS_H__ +#include "libcpuid.h" + +/* Determine Compiler: */ +#if defined(_MSC_VER) +#if !defined(COMPILER_MICROSOFT) +# define COMPILER_MICROSOFT +#endif +#elif defined(__GNUC__) +#if !defined(COMPILER_GCC) +# define COMPILER_GCC +#endif +#elif defined(__clang__) +#if !defined(COMPILER_CLANG) +# define COMPILER_CLANG +#endif +#endif + +/* Determine Platform */ +#if defined(__x86_64__) || defined(_M_AMD64) +#if !defined(PLATFORM_X64) +# define PLATFORM_X64 +#endif +#elif defined(__i386__) || defined(_M_IX86) +#if !defined(PLATFORM_X86) +# define PLATFORM_X86 +#endif +#elif defined(__ARMEL__) +#if !defined(PLATFORM_ARM) +# define PLATFORM_ARM +#endif +#endif + +/* Under Windows/AMD64 with MSVC, inline assembly isn't supported */ +#if (((defined(COMPILER_GCC) || defined(COMPILER_CLANG))) && \ + (defined(PLATFORM_X64) || defined(PLATFORM_X86) || defined(PLATFORM_ARM))) || \ + (defined(COMPILER_MICROSOFT) && defined(PLATFORM_X86)) +# define INLINE_ASM_SUPPORTED +#endif + +int cpuid_exists_by_eflags(void); +void exec_cpuid(uint32_t *regs); +void busy_sse_loop(int cycles); + +#endif /* __ASM_BITS_H__ */ diff --git a/src/3rdparty/libcpuid/cpuid_main.c b/src/3rdparty/libcpuid/cpuid_main.c index f22c7dd6..61cb638d 100644 --- a/src/3rdparty/libcpuid/cpuid_main.c +++ b/src/3rdparty/libcpuid/cpuid_main.c @@ -221,42 +221,42 @@ static void load_features_common(struct cpu_raw_data_t* raw, struct cpu_id_t* da static cpu_vendor_t cpuid_vendor_identify(const uint32_t *raw_vendor, char *vendor_str) { - int i; - cpu_vendor_t vendor = VENDOR_UNKNOWN; - const struct { cpu_vendor_t vendor; char match[16]; } - matchtable[NUM_CPU_VENDORS] = { - /* source: http://www.sandpile.org/ia32/cpuid.htm */ - { VENDOR_INTEL , "GenuineIntel" }, - { VENDOR_AMD , "AuthenticAMD" }, - { VENDOR_CYRIX , "CyrixInstead" }, - { VENDOR_NEXGEN , "NexGenDriven" }, - { VENDOR_TRANSMETA , "GenuineTMx86" }, - { VENDOR_UMC , "UMC UMC UMC " }, - { VENDOR_CENTAUR , "CentaurHauls" }, - { VENDOR_RISE , "RiseRiseRise" }, - { VENDOR_SIS , "SiS SiS SiS " }, - { VENDOR_NSC , "Geode by NSC" }, - }; + int i; + cpu_vendor_t vendor = VENDOR_UNKNOWN; + const struct { cpu_vendor_t vendor; char match[16]; } + matchtable[NUM_CPU_VENDORS] = { + /* source: http://www.sandpile.org/ia32/cpuid.htm */ + { VENDOR_INTEL , "GenuineIntel" }, + { VENDOR_AMD , "AuthenticAMD" }, + { VENDOR_CYRIX , "CyrixInstead" }, + { VENDOR_NEXGEN , "NexGenDriven" }, + { VENDOR_TRANSMETA , "GenuineTMx86" }, + { VENDOR_UMC , "UMC UMC UMC " }, + { VENDOR_CENTAUR , "CentaurHauls" }, + { VENDOR_RISE , "RiseRiseRise" }, + { VENDOR_SIS , "SiS SiS SiS " }, + { VENDOR_NSC , "Geode by NSC" }, + }; - memcpy(vendor_str + 0, &raw_vendor[1], 4); - memcpy(vendor_str + 4, &raw_vendor[3], 4); - memcpy(vendor_str + 8, &raw_vendor[2], 4); - vendor_str[12] = 0; + memcpy(vendor_str + 0, &raw_vendor[1], 4); + memcpy(vendor_str + 4, &raw_vendor[3], 4); + memcpy(vendor_str + 8, &raw_vendor[2], 4); + vendor_str[12] = 0; - /* Determine vendor: */ - for (i = 0; i < NUM_CPU_VENDORS; i++) - if (!strcmp(vendor_str, matchtable[i].match)) { - vendor = matchtable[i].vendor; - break; - } - return vendor; + /* Determine vendor: */ + for (i = 0; i < NUM_CPU_VENDORS; i++) + if (!strcmp(vendor_str, matchtable[i].match)) { + vendor = matchtable[i].vendor; + break; + } + return vendor; } static int cpuid_basic_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data) { int i, j, basic, xmodel, xfamily, ext; char brandstr[64] = {0}; - data->vendor = cpuid_vendor_identify(raw->basic_cpuid[0], data->vendor_str); + data->vendor = cpuid_vendor_identify(raw->basic_cpuid[0], data->vendor_str); if (data->vendor == VENDOR_UNKNOWN) return set_error(ERR_CPU_UNKN); diff --git a/src/3rdparty/libcpuid/libcpuid.h b/src/3rdparty/libcpuid/libcpuid.h index c44990c3..847e5a4a 100644 --- a/src/3rdparty/libcpuid/libcpuid.h +++ b/src/3rdparty/libcpuid/libcpuid.h @@ -60,7 +60,7 @@ */ /** @mainpage A simple libcpuid introduction - * + * * LibCPUID provides CPU identification and access to the CPUID and RDTSC * instructions on the x86. *

@@ -82,6 +82,7 @@ */ /** @defgroup libcpuid LibCPUID + * @brief LibCPUID provides CPU identification @{ */ /* Include some integer type specifications: */ @@ -535,23 +536,23 @@ typedef enum { * @brief Describes common library error codes */ typedef enum { - ERR_OK = 0, /*!< "No error" */ - ERR_NO_CPUID = -1, /*!< "CPUID instruction is not supported" */ - ERR_NO_RDTSC = -2, /*!< "RDTSC instruction is not supported" */ - ERR_NO_MEM = -3, /*!< "Memory allocation failed" */ - ERR_OPEN = -4, /*!< "File open operation failed" */ - ERR_BADFMT = -5, /*!< "Bad file format" */ - ERR_NOT_IMP = -6, /*!< "Not implemented" */ - ERR_CPU_UNKN = -7, /*!< "Unsupported processor" */ - ERR_NO_RDMSR = -8, /*!< "RDMSR instruction is not supported" */ - ERR_NO_DRIVER= -9, /*!< "RDMSR driver error (generic)" */ - ERR_NO_PERMS = -10, /*!< "No permissions to install RDMSR driver" */ - ERR_EXTRACT = -11, /*!< "Cannot extract RDMSR driver (read only media?)" */ - ERR_HANDLE = -12, /*!< "Bad handle" */ - ERR_INVMSR = -13, /*!< "Invalid MSR" */ - ERR_INVCNB = -14, /*!< "Invalid core number" */ - ERR_HANDLE_R = -15, /*!< "Error on handle read" */ - ERR_INVRANGE = -16, /*!< "Invalid given range" */ + ERR_OK = 0, /*!< No error */ + ERR_NO_CPUID = -1, /*!< CPUID instruction is not supported */ + ERR_NO_RDTSC = -2, /*!< RDTSC instruction is not supported */ + ERR_NO_MEM = -3, /*!< Memory allocation failed */ + ERR_OPEN = -4, /*!< File open operation failed */ + ERR_BADFMT = -5, /*!< Bad file format */ + ERR_NOT_IMP = -6, /*!< Not implemented */ + ERR_CPU_UNKN = -7, /*!< Unsupported processor */ + ERR_NO_RDMSR = -8, /*!< RDMSR instruction is not supported */ + ERR_NO_DRIVER= -9, /*!< RDMSR driver error (generic) */ + ERR_NO_PERMS = -10, /*!< No permissions to install RDMSR driver */ + ERR_EXTRACT = -11, /*!< Cannot extract RDMSR driver (read only media?) */ + ERR_HANDLE = -12, /*!< Bad handle */ + ERR_INVMSR = -13, /*!< Invalid MSR */ + ERR_INVCNB = -14, /*!< Invalid core number */ + ERR_HANDLE_R = -15, /*!< Error on handle read */ + ERR_INVRANGE = -16, /*!< Invalid given range */ } cpu_error_t; /** @@ -668,7 +669,7 @@ struct cpu_epc_t cpuid_get_epc(int index, const struct cpu_raw_data_t* raw); const char* cpuid_lib_version(void); #ifdef __cplusplus -}; /* extern "C" */ +} /* extern "C" */ #endif diff --git a/src/3rdparty/libcpuid/libcpuid_internal.h b/src/3rdparty/libcpuid/libcpuid_internal.h index 038aa209..64804616 100644 --- a/src/3rdparty/libcpuid/libcpuid_internal.h +++ b/src/3rdparty/libcpuid/libcpuid_internal.h @@ -75,8 +75,9 @@ enum _intel_bits_t { _3 = LBIT( 14 ), _5 = LBIT( 15 ), _7 = LBIT( 16 ), - XEON_ = LBIT( 17 ), - ATOM_ = LBIT( 18 ), + _9 = LBIT( 17 ), + XEON_ = LBIT( 18 ), + ATOM_ = LBIT( 19 ), }; typedef enum _intel_bits_t intel_bits_t; diff --git a/src/3rdparty/libcpuid/libcpuid_types.h b/src/3rdparty/libcpuid/libcpuid_types.h index 9e897275..0e667aa6 100644 --- a/src/3rdparty/libcpuid/libcpuid_types.h +++ b/src/3rdparty/libcpuid/libcpuid_types.h @@ -32,6 +32,32 @@ #ifndef __LIBCPUID_TYPES_H__ #define __LIBCPUID_TYPES_H__ -#include +#if !defined(_MSC_VER) || _MSC_VER >= 1600 +# include +#else +/* we have to provide our own: */ +# if !defined(__int32_t_defined) +typedef int int32_t; +# endif + +# if !defined(__uint32_t_defined) +typedef unsigned uint32_t; +# endif + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed short int16_t; +typedef unsigned short uint16_t; +#if (defined _MSC_VER) && (_MSC_VER <= 1300) + /* MSVC 6.0: no long longs ... */ + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; +#else + /* all other sane compilers: */ + typedef signed long long int64_t; + typedef unsigned long long uint64_t; +#endif + +#endif #endif /* __LIBCPUID_TYPES_H__ */ diff --git a/src/3rdparty/libcpuid/recog_amd.c b/src/3rdparty/libcpuid/recog_amd.c index 352d733b..4726f633 100644 --- a/src/3rdparty/libcpuid/recog_amd.c +++ b/src/3rdparty/libcpuid/recog_amd.c @@ -49,6 +49,10 @@ enum _amd_model_codes_t { _1400, _1500, _1600, + _1900, + _2400, + _2500, + _2700, }; static void load_amd_features(struct cpu_raw_data_t* raw, struct cpu_id_t* data) diff --git a/src/3rdparty/libcpuid/recog_intel.c b/src/3rdparty/libcpuid/recog_intel.c index 5467c19f..397d750e 100644 --- a/src/3rdparty/libcpuid/recog_intel.c +++ b/src/3rdparty/libcpuid/recog_intel.c @@ -376,7 +376,7 @@ static intel_code_and_bits_t get_brand_code_and_bits(struct cpu_id_t* data) bits |= bit_matchtable[i].bit; } - if ((i = match_pattern(bs, "Core(TM) [im][357]")) != 0) { + if ((i = match_pattern(bs, "Core(TM) [im][3579]")) != 0) { bits |= CORE_; i--; switch (bs[i + 9]) { @@ -387,6 +387,7 @@ static intel_code_and_bits_t get_brand_code_and_bits(struct cpu_id_t* data) case '3': bits |= _3; break; case '5': bits |= _5; break; case '7': bits |= _7; break; + case '9': bits |= _9; break; } } for (i = 0; i < COUNT_OF(matchtable); i++)