FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/mathops.h
Date: 2024-02-16 17:37:06
Exec Total Coverage
Lines: 35 35 100.0%
Functions: 9 9 100.0%
Branches: 14 14 100.0%

Line Branch Exec Source
1 /*
2 * simple math operations
3 * Copyright (c) 2001, 2002 Fabrice Bellard
4 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22 #ifndef AVCODEC_MATHOPS_H
23 #define AVCODEC_MATHOPS_H
24
25 #include <stdint.h>
26
27 #include "libavutil/attributes_internal.h"
28 #include "libavutil/common.h"
29 #include "config.h"
30
31 #define MAX_NEG_CROP 1024
32
33 extern const uint32_t ff_inverse[257];
34 extern const uint8_t ff_log2_run[41];
35 extern const uint8_t ff_sqrt_tab[256];
36 extern const uint8_t attribute_visibility_hidden ff_crop_tab[256 + 2 * MAX_NEG_CROP];
37 extern const uint8_t ff_zigzag_direct[64];
38 extern const uint8_t ff_zigzag_scan[16+1];
39
40 #if ARCH_ARM
41 # include "arm/mathops.h"
42 #elif ARCH_AVR32
43 # include "avr32/mathops.h"
44 #elif ARCH_MIPS
45 # include "mips/mathops.h"
46 #elif ARCH_PPC
47 # include "ppc/mathops.h"
48 #elif ARCH_X86
49 # include "x86/mathops.h"
50 #endif
51
52 /* generic implementation */
53
54 #ifndef MUL64
55 # define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
56 #endif
57
58 #ifndef MULL
59 # define MULL(a,b,s) (MUL64(a, b) >> (s))
60 #endif
61
62 #ifndef MULH
63 48324349 static av_always_inline int MULH(int a, int b){
64 48324349 return MUL64(a, b) >> 32;
65 }
66 #endif
67
68 #ifndef UMULH
69 499 static av_always_inline unsigned UMULH(unsigned a, unsigned b){
70 499 return ((uint64_t)(a) * (uint64_t)(b))>>32;
71 }
72 #endif
73
74 #ifndef MAC64
75 # define MAC64(d, a, b) ((d) += MUL64(a, b))
76 #endif
77
78 #ifndef MLS64
79 # define MLS64(d, a, b) ((d) -= MUL64(a, b))
80 #endif
81
82 /* signed 16x16 -> 32 multiply add accumulate */
83 #ifndef MAC16
84 # define MAC16(rt, ra, rb) rt += (ra) * (rb)
85 #endif
86
87 /* signed 16x16 -> 32 multiply */
88 #ifndef MUL16
89 # define MUL16(ra, rb) ((ra) * (rb))
90 #endif
91
92 #ifndef MLS16
93 # define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
94 #endif
95
96 /* median of 3 */
97 #ifndef mid_pred
98 #define mid_pred mid_pred
99 static inline av_const int mid_pred(int a, int b, int c)
100 {
101 if(a>b){
102 if(c>b){
103 if(c>a) b=a;
104 else b=c;
105 }
106 }else{
107 if(b>c){
108 if(c>a) b=c;
109 else b=a;
110 }
111 }
112 return b;
113 }
114 #endif
115
116 #ifndef median4
117 #define median4 median4
118 74348 static inline av_const int median4(int a, int b, int c, int d)
119 {
120
2/2
✓ Branch 0 taken 10403 times.
✓ Branch 1 taken 63945 times.
74348 if (a < b) {
121
2/2
✓ Branch 0 taken 2971 times.
✓ Branch 1 taken 7432 times.
10403 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
122 7432 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
123 } else {
124
2/2
✓ Branch 0 taken 5325 times.
✓ Branch 1 taken 58620 times.
63945 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
125 58620 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
126 }
127 }
128 #endif
129
130 #define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
131
132 #ifndef sign_extend
133 356649935 static inline av_const int sign_extend(int val, unsigned bits)
134 {
135 356649935 unsigned shift = 8 * sizeof(int) - bits;
136 356649935 union { unsigned u; int s; } v = { (unsigned) val << shift };
137 356649935 return v.s >> shift;
138 }
139 #endif
140
141 #ifndef sign_extend64
142 4624 static inline av_const int64_t sign_extend64(int64_t val, unsigned bits)
143 {
144 4624 unsigned shift = 8 * sizeof(int64_t) - bits;
145 4624 union { uint64_t u; int64_t s; } v = { (uint64_t) val << shift };
146 4624 return v.s >> shift;
147 }
148 #endif
149
150 #ifndef zero_extend
151 80308445 static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
152 {
153 80308445 return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
154 }
155 #endif
156
157 #ifndef COPY3_IF_LT
158 #define COPY3_IF_LT(x, y, a, b, c, d)\
159 if ((y) < (x)) {\
160 (x) = (y);\
161 (a) = (b);\
162 (c) = (d);\
163 }
164 #endif
165
166 #ifndef MASK_ABS
167 #define MASK_ABS(mask, level) do { \
168 mask = level >> 31; \
169 level = (level ^ mask) - mask; \
170 } while (0)
171 #endif
172
173 #ifndef NEG_SSR32
174 # define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
175 #endif
176
177 #ifndef NEG_USR32
178 # define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
179 #endif
180
181 #if HAVE_BIGENDIAN
182 # ifndef PACK_2U8
183 # define PACK_2U8(a,b) (((a) << 8) | (b))
184 # endif
185 # ifndef PACK_4U8
186 # define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
187 # endif
188 # ifndef PACK_2U16
189 # define PACK_2U16(a,b) (((a) << 16) | (b))
190 # endif
191 #else
192 # ifndef PACK_2U8
193 # define PACK_2U8(a,b) (((b) << 8) | (a))
194 # endif
195 # ifndef PACK_4U2
196 # define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
197 # endif
198 # ifndef PACK_2U16
199 # define PACK_2U16(a,b) (((b) << 16) | (a))
200 # endif
201 #endif
202
203 #ifndef PACK_2S8
204 # define PACK_2S8(a,b) PACK_2U8((a)&255, (b)&255)
205 #endif
206 #ifndef PACK_4S8
207 # define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
208 #endif
209 #ifndef PACK_2S16
210 # define PACK_2S16(a,b) PACK_2U16((a)&0xffff, (b)&0xffff)
211 #endif
212
213 #ifndef FASTDIV
214 # define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
215 #endif /* FASTDIV */
216
217 #ifndef ff_sqrt
218 #define ff_sqrt ff_sqrt
219 6960425 static inline av_const unsigned int ff_sqrt(unsigned int a)
220 {
221 unsigned int b;
222
223
2/2
✓ Branch 0 taken 596607 times.
✓ Branch 1 taken 6363818 times.
6960425 if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
224
2/2
✓ Branch 0 taken 853252 times.
✓ Branch 1 taken 5510566 times.
6363818 else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
225 #if !CONFIG_SMALL
226
2/2
✓ Branch 0 taken 1055913 times.
✓ Branch 1 taken 4454653 times.
5510566 else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
227
2/2
✓ Branch 0 taken 1026941 times.
✓ Branch 1 taken 3427712 times.
4454653 else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8] ;
228 #endif
229 else {
230 3427712 int s = av_log2_16bit(a >> 16) >> 1;
231 3427712 unsigned int c = a >> (s + 2);
232 3427712 b = ff_sqrt_tab[c >> (s + 8)];
233 3427712 b = FASTDIV(c,b) + (b << s);
234 }
235
236 6363818 return b - (a < b * b);
237 }
238 #endif
239
240 30243 static inline av_const float ff_sqrf(float a)
241 {
242 30243 return a*a;
243 }
244
245 393216 static inline int8_t ff_u8_to_s8(uint8_t a)
246 {
247 union {
248 uint8_t u8;
249 int8_t s8;
250 } b;
251 393216 b.u8 = a;
252 393216 return b.s8;
253 }
254
255 #endif /* AVCODEC_MATHOPS_H */
256