FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavutil/x86/intmath.h
Date: 2024-11-20 23:03:26
Exec Total Coverage
Lines: 6 6 100.0%
Functions: 2 2 100.0%
Branches: 0 0 -%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2015 James Almer
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #ifndef AVUTIL_X86_INTMATH_H
22 #define AVUTIL_X86_INTMATH_H
23
24 #include <stdint.h>
25 #include <stdlib.h>
26 #if HAVE_FAST_CLZ
27 #if defined(_MSC_VER)
28 #include <intrin.h>
29 #elif defined(__INTEL_COMPILER)
30 #include <immintrin.h>
31 #endif
32 #endif
33 #include "config.h"
34
35 #if HAVE_FAST_CLZ
36 #if (defined(__INTEL_COMPILER) && (__INTEL_COMPILER>=1216)) || defined(_MSC_VER)
37 # if defined(__INTEL_COMPILER)
38 # define ff_log2(x) (_bit_scan_reverse((x)|1))
39 # else
40 # define ff_log2 ff_log2_x86
41 static av_always_inline av_const int ff_log2_x86(unsigned int v)
42 {
43 unsigned long n;
44 _BitScanReverse(&n, v|1);
45 return n;
46 }
47 # endif
48 # define ff_log2_16bit av_log2
49
50 #if defined(__INTEL_COMPILER) || (defined(_MSC_VER) && (_MSC_VER >= 1700) && \
51 (defined(__BMI__) || !defined(__clang__)))
52 # define ff_ctz(v) _tzcnt_u32(v)
53
54 # if ARCH_X86_64
55 # define ff_ctzll(v) _tzcnt_u64(v)
56 # else
57 # define ff_ctzll ff_ctzll_x86
58 static av_always_inline av_const int ff_ctzll_x86(long long v)
59 {
60 return ((uint32_t)v == 0) ? _tzcnt_u32((uint32_t)(v >> 32)) + 32 : _tzcnt_u32((uint32_t)v);
61 }
62 # endif
63 #endif /* _MSC_VER */
64
65 #endif /* __INTEL_COMPILER */
66
67 #endif /* HAVE_FAST_CLZ */
68
69 #if defined(__GNUC__)
70
71 /* Our generic version of av_popcount is faster than GCC's built-in on
72 * CPUs that don't support the popcnt instruction.
73 */
74 #if defined(__POPCNT__)
75 #define av_popcount __builtin_popcount
76 #if ARCH_X86_64
77 #define av_popcount64 __builtin_popcountll
78 #endif
79
80 #endif /* __POPCNT__ */
81
82 #if defined(__BMI2__)
83
84 #if AV_GCC_VERSION_AT_LEAST(5,1)
85 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
86 #define av_zero_extend av_zero_extend_bmi2
87 static av_always_inline av_const unsigned av_zero_extend_bmi2(unsigned a, unsigned p)
88 {
89 if (p > 31) abort();
90 return __builtin_ia32_bzhi_si(a, p);
91 }
92 #else
93 #define av_zero_extend __builtin_ia32_bzhi_si
94 #endif
95 #elif HAVE_INLINE_ASM
96 /* GCC releases before 5.1.0 have a broken bzhi builtin, so for those we
97 * implement it using inline assembly
98 */
99 #define av_zero_extend av_zero_extend_bmi2
100 static av_always_inline av_const unsigned av_zero_extend_bmi2(unsigned a, unsigned p)
101 {
102 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
103 if (p > 31) abort();
104 #endif
105 if (av_builtin_constant_p(p))
106 return a & ((1U << p) - 1);
107 else {
108 unsigned x;
109 __asm__ ("bzhi %2, %1, %0 \n\t" : "=r"(x) : "rm"(a), "r"(p));
110 return x;
111 }
112 }
113 #endif /* AV_GCC_VERSION_AT_LEAST */
114
115 #endif /* __BMI2__ */
116
117 #if defined(__SSE2__) && !defined(__INTEL_COMPILER)
118
119 #define av_clipd av_clipd_sse2
120 2428025 static av_always_inline av_const double av_clipd_sse2(double a, double amin, double amax)
121 {
122 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
123 if (amin > amax) abort();
124 #endif
125 2428025 __asm__ ("maxsd %1, %0 \n\t"
126 "minsd %2, %0 \n\t"
127 : "+&x"(a) : "xm"(amin), "xm"(amax));
128 2428025 return a;
129 }
130
131 #endif /* __SSE2__ */
132
133 #if defined(__SSE__) && !defined(__INTEL_COMPILER)
134
135 #define av_clipf av_clipf_sse
136 29249949 static av_always_inline av_const float av_clipf_sse(float a, float amin, float amax)
137 {
138 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
139 if (amin > amax) abort();
140 #endif
141 29249949 __asm__ ("maxss %1, %0 \n\t"
142 "minss %2, %0 \n\t"
143 : "+&x"(a) : "xm"(amin), "xm"(amax));
144 29249949 return a;
145 }
146
147 #endif /* __SSE__ */
148
149 #if defined(__AVX__) && !defined(__INTEL_COMPILER)
150
151 #undef av_clipd
152 #define av_clipd av_clipd_avx
153 static av_always_inline av_const double av_clipd_avx(double a, double amin, double amax)
154 {
155 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
156 if (amin > amax) abort();
157 #endif
158 __asm__ ("vmaxsd %1, %0, %0 \n\t"
159 "vminsd %2, %0, %0 \n\t"
160 : "+&x"(a) : "xm"(amin), "xm"(amax));
161 return a;
162 }
163
164 #undef av_clipf
165 #define av_clipf av_clipf_avx
166 static av_always_inline av_const float av_clipf_avx(float a, float amin, float amax)
167 {
168 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
169 if (amin > amax) abort();
170 #endif
171 __asm__ ("vmaxss %1, %0, %0 \n\t"
172 "vminss %2, %0, %0 \n\t"
173 : "+&x"(a) : "xm"(amin), "xm"(amax));
174 return a;
175 }
176
177 #endif /* __AVX__ */
178
179 #endif /* __GNUC__ */
180
181 #endif /* AVUTIL_X86_INTMATH_H */
182