FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavutil/x86/cpu.c
Date: 2024-11-20 23:03:26
Exec Total Coverage
Lines: 69 95 72.6%
Functions: 2 2 100.0%
Branches: 45 118 38.1%

Line Branch Exec Source
1 /*
2 * CPU detection code, extracted from mmx.h
3 * (c)1997-99 by H. Dietz and R. Fisher
4 * Converted to C and improved by Fabrice Bellard.
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/cpu_internal.h"
30
31 #if HAVE_X86ASM
32
33 #define cpuid(index, eax, ebx, ecx, edx) \
34 ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
35
36 #define xgetbv(index, eax, edx) \
37 ff_cpu_xgetbv(index, &eax, &edx)
38
39 #elif HAVE_INLINE_ASM
40
41 /* ebx saving is necessary for PIC. gcc seems unable to see it alone */
42 #define cpuid(index, eax, ebx, ecx, edx) \
43 __asm__ volatile ( \
44 "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \
45 "cpuid \n\t" \
46 "xchg %%"FF_REG_b", %%"FF_REG_S \
47 : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
48 : "0" (index), "2"(0))
49
50 #define xgetbv(index, eax, edx) \
51 __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
52
53 #define get_eflags(x) \
54 __asm__ volatile ("pushfl \n" \
55 "pop %0 \n" \
56 : "=r"(x))
57
58 #define set_eflags(x) \
59 __asm__ volatile ("push %0 \n" \
60 "popfl \n" \
61 :: "r"(x))
62
63 #endif /* HAVE_INLINE_ASM */
64
65 #if ARCH_X86_64
66
67 #define cpuid_test() 1
68
69 #elif HAVE_X86ASM
70
71 #define cpuid_test ff_cpu_cpuid_test
72
73 #elif HAVE_INLINE_ASM
74
75 static int cpuid_test(void)
76 {
77 x86_reg a, c;
78
79 /* Check if CPUID is supported by attempting to toggle the ID bit in
80 * the EFLAGS register. */
81 get_eflags(a);
82 set_eflags(a ^ 0x200000);
83 get_eflags(c);
84
85 return a != c;
86 }
87 #endif
88
89 /* Function to test if multimedia instructions are supported... */
90 9481 int ff_get_cpu_flags_x86(void)
91 {
92 9481 int rval = 0;
93
94 #ifdef cpuid
95
96 int eax, ebx, ecx, edx;
97 9481 int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
98 9481 int family = 0, model = 0;
99 union { int i[3]; char c[12]; } vendor;
100 9481 int xcr0_lo = 0, xcr0_hi = 0;
101
102 if (!cpuid_test())
103 return 0; /* CPUID not supported */
104
105 9481 cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
106
107
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (max_std_level >= 1) {
108 9481 cpuid(1, eax, ebx, ecx, std_caps);
109 9481 family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
110 9481 model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
111
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (std_caps & (1 << 15))
112 9481 rval |= AV_CPU_FLAG_CMOV;
113
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (std_caps & (1 << 23))
114 9481 rval |= AV_CPU_FLAG_MMX;
115
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (std_caps & (1 << 25))
116 9481 rval |= AV_CPU_FLAG_MMXEXT;
117 #if HAVE_SSE
118
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (std_caps & (1 << 25))
119 9481 rval |= AV_CPU_FLAG_SSE;
120
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (std_caps & (1 << 26))
121 9481 rval |= AV_CPU_FLAG_SSE2;
122
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ecx & 1)
123 9481 rval |= AV_CPU_FLAG_SSE3;
124
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ecx & 0x00000200 )
125 9481 rval |= AV_CPU_FLAG_SSSE3;
126
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ecx & 0x00080000 )
127 9481 rval |= AV_CPU_FLAG_SSE4;
128
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ecx & 0x00100000 )
129 9481 rval |= AV_CPU_FLAG_SSE42;
130
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ecx & 0x02000000 )
131 9481 rval |= AV_CPU_FLAG_AESNI;
132 #if HAVE_AVX
133 /* Check OXSAVE and AVX bits */
134
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if ((ecx & 0x18000000) == 0x18000000) {
135 /* Check for OS support */
136 9481 xgetbv(0, xcr0_lo, xcr0_hi);
137
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if ((xcr0_lo & 0x6) == 0x6) {
138 9481 rval |= AV_CPU_FLAG_AVX;
139
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ecx & 0x00001000)
140 9481 rval |= AV_CPU_FLAG_FMA3;
141 }
142 }
143 #endif /* HAVE_AVX */
144 #endif /* HAVE_SSE */
145 }
146
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (max_std_level >= 7) {
147 9481 cpuid(7, eax, ebx, ecx, edx);
148 #if HAVE_AVX2
149
2/4
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 9481 times.
✗ Branch 3 not taken.
9481 if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
150 9481 rval |= AV_CPU_FLAG_AVX2;
151 #if HAVE_AVX512 /* F, CD, BW, DQ, VL */
152
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
153 if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) {
154 rval |= AV_CPU_FLAG_AVX512;
155 #if HAVE_AVX512ICL
156 if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42)
157 rval |= AV_CPU_FLAG_AVX512ICL;
158 #endif /* HAVE_AVX512ICL */
159 }
160 }
161 #endif /* HAVE_AVX512 */
162 #endif /* HAVE_AVX2 */
163 /* BMI1/2 don't need OS support */
164
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ebx & 0x00000008) {
165 9481 rval |= AV_CPU_FLAG_BMI1;
166
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (ebx & 0x00000100)
167 9481 rval |= AV_CPU_FLAG_BMI2;
168 }
169 }
170
171 9481 cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
172
173
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (max_ext_level >= 0x80000001) {
174 9481 cpuid(0x80000001, eax, ebx, ecx, ext_caps);
175
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (ext_caps & (1U << 31))
176 rval |= AV_CPU_FLAG_3DNOW;
177
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (ext_caps & (1 << 30))
178 rval |= AV_CPU_FLAG_3DNOWEXT;
179
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (ext_caps & (1 << 23))
180 rval |= AV_CPU_FLAG_MMX;
181
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (ext_caps & (1 << 22))
182 rval |= AV_CPU_FLAG_MMXEXT;
183
184
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
185 /* Allow for selectively disabling SSE2 functions on AMD processors
186 with SSE2 support but not SSE4a. This includes Athlon64, some
187 Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
188 than SSE2 often enough to utilize this special-case flag.
189 AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
190 so that SSE2 is used unless explicitly disabled by checking
191 AV_CPU_FLAG_SSE2SLOW. */
192 if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
193 rval |= AV_CPU_FLAG_SSE2SLOW;
194
195 /* Similar to the above but for AVX functions on AMD processors.
196 This is necessary only for functions using YMM registers on Bulldozer
197 and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
198 functions using XMM registers are always faster on them.
199 AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
200 used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
201 if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
202 rval |= AV_CPU_FLAG_AVXSLOW;
203
204 /* Zen 3 and earlier have slow gather */
205 if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2))
206 rval |= AV_CPU_FLAG_SLOW_GATHER;
207 }
208
209 /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
210 * used unless the OS has AVX support. */
211
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (rval & AV_CPU_FLAG_AVX) {
212
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (ecx & 0x00000800)
213 rval |= AV_CPU_FLAG_XOP;
214
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9481 times.
9481 if (ecx & 0x00010000)
215 rval |= AV_CPU_FLAG_FMA4;
216 }
217 }
218
219
1/2
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
9481 if (!strncmp(vendor.c, "GenuineIntel", 12)) {
220
4/8
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 9481 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 9481 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✓ Branch 7 taken 9481 times.
9481 if (family == 6 && (model == 9 || model == 13 || model == 14)) {
221 /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
222 * 6/14 (core1 "yonah") theoretically support sse2, but it's
223 * usually slower than mmx, so let's just pretend they don't.
224 * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
225 * enabled so that SSE2 is not used unless explicitly enabled
226 * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
227 * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
228 if (rval & AV_CPU_FLAG_SSE2)
229 rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
230 if (rval & AV_CPU_FLAG_SSE3)
231 rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
232 }
233 /* The Atom processor has SSSE3 support, which is useful in many cases,
234 * but sometimes the SSSE3 version is slower than the SSE2 equivalent
235 * on the Atom, but is generally faster on other processors supporting
236 * SSSE3. This flag allows for selectively disabling certain SSSE3
237 * functions on the Atom. */
238
2/4
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 9481 times.
9481 if (family == 6 && model == 28)
239 rval |= AV_CPU_FLAG_ATOM;
240
241 /* Conroe has a slow shuffle unit. Check the model number to ensure not
242 * to include crippled low-end Penryns and Nehalems that lack SSE4. */
243
2/6
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 9481 times.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
9481 if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
244 family == 6 && model < 23)
245 rval |= AV_CPU_FLAG_SSSE3SLOW;
246
247 /* Haswell has slow gather */
248
3/6
✓ Branch 0 taken 9481 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 9481 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 9481 times.
9481 if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 70)
249 rval |= AV_CPU_FLAG_SLOW_GATHER;
250 }
251
252 #endif /* cpuid */
253
254 9481 return rval;
255 }
256
257 330742 size_t ff_get_cpu_max_align_x86(void)
258 {
259 330742 int flags = av_get_cpu_flags();
260
261
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 330742 times.
330742 if (flags & AV_CPU_FLAG_AVX512)
262 return 64;
263
2/2
✓ Branch 0 taken 29348 times.
✓ Branch 1 taken 301394 times.
330742 if (flags & (AV_CPU_FLAG_AVX2 |
264 AV_CPU_FLAG_AVX |
265 AV_CPU_FLAG_XOP |
266 AV_CPU_FLAG_FMA4 |
267 AV_CPU_FLAG_FMA3 |
268 AV_CPU_FLAG_AVXSLOW))
269 29348 return 32;
270
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 301394 times.
301394 if (flags & (AV_CPU_FLAG_AESNI |
271 AV_CPU_FLAG_SSE42 |
272 AV_CPU_FLAG_SSE4 |
273 AV_CPU_FLAG_SSSE3 |
274 AV_CPU_FLAG_SSE3 |
275 AV_CPU_FLAG_SSE2 |
276 AV_CPU_FLAG_SSE |
277 AV_CPU_FLAG_ATOM |
278 AV_CPU_FLAG_SSSE3SLOW |
279 AV_CPU_FLAG_SSE3SLOW |
280 AV_CPU_FLAG_SSE2SLOW))
281 return 16;
282
283 301394 return 8;
284 }
285