FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavutil/x86/cpu.c
Date: 2026-01-14 15:50:55
Exec Total Coverage
Lines: 72 97 74.2%
Functions: 2 2 100.0%
Branches: 46 120 38.3%

Line Branch Exec Source
1 /*
2 * CPU detection code, extracted from mmx.h
3 * (c)1997-99 by H. Dietz and R. Fisher
4 * Converted to C and improved by Fabrice Bellard.
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/cpu_internal.h"
30
31 #if HAVE_X86ASM
32
33 #define cpuid(index, eax, ebx, ecx, edx) \
34 ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
35
36 #define xgetbv(index, eax, edx) \
37 ff_cpu_xgetbv(index, &eax, &edx)
38
39 #elif HAVE_INLINE_ASM
40
41 /* ebx saving is necessary for PIC. gcc seems unable to see it alone */
42 #define cpuid(index, eax, ebx, ecx, edx) \
43 __asm__ volatile ( \
44 "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \
45 "cpuid \n\t" \
46 "xchg %%"FF_REG_b", %%"FF_REG_S \
47 : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
48 : "0" (index), "2"(0))
49
50 #define xgetbv(index, eax, edx) \
51 __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
52
53 #define get_eflags(x) \
54 __asm__ volatile ("pushfl \n" \
55 "pop %0 \n" \
56 : "=r"(x))
57
58 #define set_eflags(x) \
59 __asm__ volatile ("push %0 \n" \
60 "popfl \n" \
61 :: "r"(x))
62
63 #endif /* HAVE_INLINE_ASM */
64
65 #if ARCH_X86_64
66
67 #define cpuid_test() 1
68
69 #elif HAVE_X86ASM
70
71 #define cpuid_test ff_cpu_cpuid_test
72
73 #elif HAVE_INLINE_ASM
74
75 static int cpuid_test(void)
76 {
77 x86_reg a, c;
78
79 /* Check if CPUID is supported by attempting to toggle the ID bit in
80 * the EFLAGS register. */
81 get_eflags(a);
82 set_eflags(a ^ 0x200000);
83 get_eflags(c);
84
85 return a != c;
86 }
87 #endif
88
89 /* Function to test if multimedia instructions are supported... */
90 10322 int ff_get_cpu_flags_x86(void)
91 {
92 10322 int rval = 0;
93
94 #ifdef cpuid
95
96 int eax, ebx, ecx, edx;
97 10322 int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
98 10322 int family = 0, model = 0;
99 union { int i[3]; char c[12]; } vendor;
100 10322 int xcr0_lo = 0, xcr0_hi = 0;
101
102 if (!cpuid_test())
103 return 0; /* CPUID not supported */
104
105 10322 cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
106
107
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (max_std_level >= 1) {
108 10322 cpuid(1, eax, ebx, ecx, std_caps);
109 10322 family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
110 10322 model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
111
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (std_caps & (1 << 15))
112 10322 rval |= AV_CPU_FLAG_CMOV;
113
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (std_caps & (1 << 23))
114 10322 rval |= AV_CPU_FLAG_MMX;
115
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (std_caps & (1 << 25))
116 10322 rval |= AV_CPU_FLAG_MMXEXT;
117 #if HAVE_SSE
118
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (std_caps & (1 << 25))
119 10322 rval |= AV_CPU_FLAG_SSE;
120
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (std_caps & (1 << 26))
121 10322 rval |= AV_CPU_FLAG_SSE2;
122
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 1)
123 10322 rval |= AV_CPU_FLAG_SSE3;
124
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 0x2)
125 10322 rval |= AV_CPU_FLAG_CLMUL;
126
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 0x00000200 )
127 10322 rval |= AV_CPU_FLAG_SSSE3;
128
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 0x00080000 )
129 10322 rval |= AV_CPU_FLAG_SSE4;
130
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 0x00100000 )
131 10322 rval |= AV_CPU_FLAG_SSE42;
132
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 0x02000000 )
133 10322 rval |= AV_CPU_FLAG_AESNI;
134 #if HAVE_AVX
135 /* Check OXSAVE and AVX bits */
136
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if ((ecx & 0x18000000) == 0x18000000) {
137 /* Check for OS support */
138 10322 xgetbv(0, xcr0_lo, xcr0_hi);
139
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if ((xcr0_lo & 0x6) == 0x6) {
140 10322 rval |= AV_CPU_FLAG_AVX;
141
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ecx & 0x00001000)
142 10322 rval |= AV_CPU_FLAG_FMA3;
143 }
144 }
145 #endif /* HAVE_AVX */
146 #endif /* HAVE_SSE */
147 }
148
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (max_std_level >= 7) {
149 10322 cpuid(7, eax, ebx, ecx, edx);
150 #if HAVE_AVX2
151
2/4
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 10322 times.
✗ Branch 3 not taken.
10322 if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
152 10322 rval |= AV_CPU_FLAG_AVX2;
153 #if HAVE_AVX512 /* F, CD, BW, DQ, VL */
154
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
155 if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) {
156 rval |= AV_CPU_FLAG_AVX512;
157 #if HAVE_AVX512ICL
158 if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42)
159 rval |= AV_CPU_FLAG_AVX512ICL;
160 #endif /* HAVE_AVX512ICL */
161 }
162 }
163 #endif /* HAVE_AVX512 */
164 #endif /* HAVE_AVX2 */
165 /* BMI1/2 don't need OS support */
166
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ebx & 0x00000008) {
167 10322 rval |= AV_CPU_FLAG_BMI1;
168
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (ebx & 0x00000100)
169 10322 rval |= AV_CPU_FLAG_BMI2;
170 }
171 }
172
173 10322 cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
174
175
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (max_ext_level >= 0x80000001) {
176 10322 cpuid(0x80000001, eax, ebx, ecx, ext_caps);
177
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (ext_caps & (1U << 31))
178 rval |= AV_CPU_FLAG_3DNOW;
179
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (ext_caps & (1 << 30))
180 rval |= AV_CPU_FLAG_3DNOWEXT;
181
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (ext_caps & (1 << 23))
182 rval |= AV_CPU_FLAG_MMX;
183
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (ext_caps & (1 << 22))
184 rval |= AV_CPU_FLAG_MMXEXT;
185
186
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
187 /* Allow for selectively disabling SSE2 functions on AMD processors
188 with SSE2 support but not SSE4a. This includes Athlon64, some
189 Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
190 than SSE2 often enough to utilize this special-case flag.
191 AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
192 so that SSE2 is used unless explicitly disabled by checking
193 AV_CPU_FLAG_SSE2SLOW. */
194 if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
195 rval |= AV_CPU_FLAG_SSE2SLOW;
196
197 /* Similar to the above but for AVX functions on AMD processors.
198 This is necessary only for functions using YMM registers on Bulldozer
199 and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
200 functions using XMM registers are always faster on them.
201 AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
202 used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
203 if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
204 rval |= AV_CPU_FLAG_AVXSLOW;
205
206 /* Zen 3 and earlier have slow gather */
207 if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2))
208 rval |= AV_CPU_FLAG_SLOW_GATHER;
209 }
210
211 /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
212 * used unless the OS has AVX support. */
213
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (rval & AV_CPU_FLAG_AVX) {
214
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (ecx & 0x00000800)
215 rval |= AV_CPU_FLAG_XOP;
216
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10322 times.
10322 if (ecx & 0x00010000)
217 rval |= AV_CPU_FLAG_FMA4;
218 }
219 }
220
221
1/2
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
10322 if (!strncmp(vendor.c, "GenuineIntel", 12)) {
222
4/8
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 10322 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 10322 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✓ Branch 7 taken 10322 times.
10322 if (family == 6 && (model == 9 || model == 13 || model == 14)) {
223 /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
224 * 6/14 (core1 "yonah") theoretically support sse2, but it's
225 * usually slower than mmx, so let's just pretend they don't.
226 * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
227 * enabled so that SSE2 is not used unless explicitly enabled
228 * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
229 * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
230 if (rval & AV_CPU_FLAG_SSE2)
231 rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
232 if (rval & AV_CPU_FLAG_SSE3)
233 rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
234 }
235 /* The Atom processor has SSSE3 support, which is useful in many cases,
236 * but sometimes the SSSE3 version is slower than the SSE2 equivalent
237 * on the Atom, but is generally faster on other processors supporting
238 * SSSE3. This flag allows for selectively disabling certain SSSE3
239 * functions on the Atom. */
240
2/4
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 10322 times.
10322 if (family == 6 && model == 28)
241 rval |= AV_CPU_FLAG_ATOM;
242
243 /* Conroe has a slow shuffle unit. Check the model number to ensure not
244 * to include crippled low-end Penryns and Nehalems that lack SSE4. */
245
2/6
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 10322 times.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
10322 if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
246 family == 6 && model < 23)
247 rval |= AV_CPU_FLAG_SSSE3SLOW;
248
249 /* Ice Lake and below have slow gather due to Gather Data Sampling
250 * mitigation. */
251
3/6
✓ Branch 0 taken 10322 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 10322 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 10322 times.
✗ Branch 5 not taken.
10322 if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 143)
252 10322 rval |= AV_CPU_FLAG_SLOW_GATHER;
253 }
254
255 #endif /* cpuid */
256
257 10322 return rval;
258 }
259
260 368214 size_t ff_get_cpu_max_align_x86(void)
261 {
262 368214 int flags = av_get_cpu_flags();
263
264
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 368214 times.
368214 if (flags & AV_CPU_FLAG_AVX512)
265 return 64;
266
2/2
✓ Branch 0 taken 29358 times.
✓ Branch 1 taken 338856 times.
368214 if (flags & (AV_CPU_FLAG_AVX2 |
267 AV_CPU_FLAG_AVX |
268 AV_CPU_FLAG_XOP |
269 AV_CPU_FLAG_FMA4 |
270 AV_CPU_FLAG_FMA3 |
271 AV_CPU_FLAG_AVXSLOW))
272 29358 return 32;
273
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 338856 times.
338856 if (flags & (AV_CPU_FLAG_AESNI |
274 AV_CPU_FLAG_SSE42 |
275 AV_CPU_FLAG_SSE4 |
276 AV_CPU_FLAG_SSSE3 |
277 AV_CPU_FLAG_SSE3 |
278 AV_CPU_FLAG_SSE2 |
279 AV_CPU_FLAG_SSE |
280 AV_CPU_FLAG_ATOM |
281 AV_CPU_FLAG_SSSE3SLOW |
282 AV_CPU_FLAG_SSE3SLOW |
283 AV_CPU_FLAG_SSE2SLOW))
284 return 16;
285
286 338856 return 8;
287 }
288