FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/me_cmp.c
Date: 2025-06-23 20:06:14
Exec Total Coverage
Lines: 396 528 75.0%
Functions: 29 43 67.4%
Branches: 101 158 63.9%

Line Branch Exec Source
1 /*
2 * DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stddef.h>
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/mem_internal.h"
28 #include "avcodec.h"
29 #include "copy_block.h"
30 #include "simple_idct.h"
31 #include "me_cmp.h"
32 #include "mpegvideoenc.h"
33 #include "config.h"
34 #include "config_components.h"
35
36 32 static int sse4_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
37 ptrdiff_t stride, int h)
38 {
39 32 int s = 0, i;
40 32 const uint32_t *sq = ff_square_tab + 256;
41
42
2/2
✓ Branch 0 taken 376 times.
✓ Branch 1 taken 32 times.
408 for (i = 0; i < h; i++) {
43 376 s += sq[pix1[0] - pix2[0]];
44 376 s += sq[pix1[1] - pix2[1]];
45 376 s += sq[pix1[2] - pix2[2]];
46 376 s += sq[pix1[3] - pix2[3]];
47 376 pix1 += stride;
48 376 pix2 += stride;
49 }
50 32 return s;
51 }
52
53 5322992 static int sse8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
54 ptrdiff_t stride, int h)
55 {
56 5322992 int s = 0, i;
57 5322992 const uint32_t *sq = ff_square_tab + 256;
58
59
2/2
✓ Branch 0 taken 48725378 times.
✓ Branch 1 taken 5322992 times.
54048370 for (i = 0; i < h; i++) {
60 48725378 s += sq[pix1[0] - pix2[0]];
61 48725378 s += sq[pix1[1] - pix2[1]];
62 48725378 s += sq[pix1[2] - pix2[2]];
63 48725378 s += sq[pix1[3] - pix2[3]];
64 48725378 s += sq[pix1[4] - pix2[4]];
65 48725378 s += sq[pix1[5] - pix2[5]];
66 48725378 s += sq[pix1[6] - pix2[6]];
67 48725378 s += sq[pix1[7] - pix2[7]];
68 48725378 pix1 += stride;
69 48725378 pix2 += stride;
70 }
71 5322992 return s;
72 }
73
74 7194208 static int sse16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
75 ptrdiff_t stride, int h)
76 {
77 7194208 int s = 0, i;
78 7194208 const uint32_t *sq = ff_square_tab + 256;
79
80
2/2
✓ Branch 0 taken 115107044 times.
✓ Branch 1 taken 7194208 times.
122301252 for (i = 0; i < h; i++) {
81 115107044 s += sq[pix1[0] - pix2[0]];
82 115107044 s += sq[pix1[1] - pix2[1]];
83 115107044 s += sq[pix1[2] - pix2[2]];
84 115107044 s += sq[pix1[3] - pix2[3]];
85 115107044 s += sq[pix1[4] - pix2[4]];
86 115107044 s += sq[pix1[5] - pix2[5]];
87 115107044 s += sq[pix1[6] - pix2[6]];
88 115107044 s += sq[pix1[7] - pix2[7]];
89 115107044 s += sq[pix1[8] - pix2[8]];
90 115107044 s += sq[pix1[9] - pix2[9]];
91 115107044 s += sq[pix1[10] - pix2[10]];
92 115107044 s += sq[pix1[11] - pix2[11]];
93 115107044 s += sq[pix1[12] - pix2[12]];
94 115107044 s += sq[pix1[13] - pix2[13]];
95 115107044 s += sq[pix1[14] - pix2[14]];
96 115107044 s += sq[pix1[15] - pix2[15]];
97
98 115107044 pix1 += stride;
99 115107044 pix2 += stride;
100 }
101 7194208 return s;
102 }
103
104 static int sum_abs_dctelem_c(const int16_t *block)
105 {
106 int sum = 0, i;
107
108 for (i = 0; i < 64; i++)
109 sum += FFABS(block[i]);
110 return sum;
111 }
112
113 #define avg2(a, b) (((a) + (b) + 1) >> 1)
114 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
115
116 44232973 static inline int pix_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
117 ptrdiff_t stride, int h)
118 {
119 44232973 int s = 0, i;
120
121
2/2
✓ Branch 0 taken 627358500 times.
✓ Branch 1 taken 44232973 times.
671591473 for (i = 0; i < h; i++) {
122 627358500 s += abs(pix1[0] - pix2[0]);
123 627358500 s += abs(pix1[1] - pix2[1]);
124 627358500 s += abs(pix1[2] - pix2[2]);
125 627358500 s += abs(pix1[3] - pix2[3]);
126 627358500 s += abs(pix1[4] - pix2[4]);
127 627358500 s += abs(pix1[5] - pix2[5]);
128 627358500 s += abs(pix1[6] - pix2[6]);
129 627358500 s += abs(pix1[7] - pix2[7]);
130 627358500 s += abs(pix1[8] - pix2[8]);
131 627358500 s += abs(pix1[9] - pix2[9]);
132 627358500 s += abs(pix1[10] - pix2[10]);
133 627358500 s += abs(pix1[11] - pix2[11]);
134 627358500 s += abs(pix1[12] - pix2[12]);
135 627358500 s += abs(pix1[13] - pix2[13]);
136 627358500 s += abs(pix1[14] - pix2[14]);
137 627358500 s += abs(pix1[15] - pix2[15]);
138 627358500 pix1 += stride;
139 627358500 pix2 += stride;
140 }
141 44232973 return s;
142 }
143
144 32 static inline int pix_median_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
145 ptrdiff_t stride, int h)
146 {
147 32 int s = 0, i, j;
148
149 #define V(x) (pix1[x] - pix2[x])
150
151 32 s += abs(V(0));
152 32 s += abs(V(1) - V(0));
153 32 s += abs(V(2) - V(1));
154 32 s += abs(V(3) - V(2));
155 32 s += abs(V(4) - V(3));
156 32 s += abs(V(5) - V(4));
157 32 s += abs(V(6) - V(5));
158 32 s += abs(V(7) - V(6));
159 32 s += abs(V(8) - V(7));
160 32 s += abs(V(9) - V(8));
161 32 s += abs(V(10) - V(9));
162 32 s += abs(V(11) - V(10));
163 32 s += abs(V(12) - V(11));
164 32 s += abs(V(13) - V(12));
165 32 s += abs(V(14) - V(13));
166 32 s += abs(V(15) - V(14));
167
168 32 pix1 += stride;
169 32 pix2 += stride;
170
171
2/2
✓ Branch 0 taken 240 times.
✓ Branch 1 taken 32 times.
272 for (i = 1; i < h; i++) {
172 240 s += abs(V(0) - V(-stride));
173
2/2
✓ Branch 0 taken 3600 times.
✓ Branch 1 taken 240 times.
3840 for (j = 1; j < 16; j++)
174 3600 s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
175 240 pix1 += stride;
176 240 pix2 += stride;
177
178 }
179 #undef V
180 32 return s;
181 }
182
183 3304472 static int pix_abs16_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
184 ptrdiff_t stride, int h)
185 {
186 3304472 int s = 0, i;
187
188
2/2
✓ Branch 0 taken 43561734 times.
✓ Branch 1 taken 3304472 times.
46866206 for (i = 0; i < h; i++) {
189 43561734 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
190 43561734 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
191 43561734 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
192 43561734 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
193 43561734 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
194 43561734 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
195 43561734 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
196 43561734 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
197 43561734 s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
198 43561734 s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
199 43561734 s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
200 43561734 s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
201 43561734 s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
202 43561734 s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
203 43561734 s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
204 43561734 s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
205 43561734 pix1 += stride;
206 43561734 pix2 += stride;
207 }
208 3304472 return s;
209 }
210
211 3304472 static int pix_abs16_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
212 ptrdiff_t stride, int h)
213 {
214 3304472 int s = 0, i;
215 3304472 const uint8_t *pix3 = pix2 + stride;
216
217
2/2
✓ Branch 0 taken 43561686 times.
✓ Branch 1 taken 3304472 times.
46866158 for (i = 0; i < h; i++) {
218 43561686 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
219 43561686 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
220 43561686 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
221 43561686 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
222 43561686 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
223 43561686 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
224 43561686 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
225 43561686 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
226 43561686 s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
227 43561686 s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
228 43561686 s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
229 43561686 s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
230 43561686 s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
231 43561686 s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
232 43561686 s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
233 43561686 s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
234 43561686 pix1 += stride;
235 43561686 pix2 += stride;
236 43561686 pix3 += stride;
237 }
238 3304472 return s;
239 }
240
241 6608896 static int pix_abs16_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
242 ptrdiff_t stride, int h)
243 {
244 6608896 int s = 0, i;
245 6608896 const uint8_t *pix3 = pix2 + stride;
246
247
2/2
✓ Branch 0 taken 87122920 times.
✓ Branch 1 taken 6608896 times.
93731816 for (i = 0; i < h; i++) {
248 87122920 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
249 87122920 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
250 87122920 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
251 87122920 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
252 87122920 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
253 87122920 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
254 87122920 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
255 87122920 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
256 87122920 s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
257 87122920 s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
258 87122920 s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
259 87122920 s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
260 87122920 s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
261 87122920 s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
262 87122920 s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
263 87122920 s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
264 87122920 pix1 += stride;
265 87122920 pix2 += stride;
266 87122920 pix3 += stride;
267 }
268 6608896 return s;
269 }
270
271 33629477 static inline int pix_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
272 ptrdiff_t stride, int h)
273 {
274 33629477 int s = 0, i;
275
276
2/2
✓ Branch 0 taken 269035954 times.
✓ Branch 1 taken 33629477 times.
302665431 for (i = 0; i < h; i++) {
277 269035954 s += abs(pix1[0] - pix2[0]);
278 269035954 s += abs(pix1[1] - pix2[1]);
279 269035954 s += abs(pix1[2] - pix2[2]);
280 269035954 s += abs(pix1[3] - pix2[3]);
281 269035954 s += abs(pix1[4] - pix2[4]);
282 269035954 s += abs(pix1[5] - pix2[5]);
283 269035954 s += abs(pix1[6] - pix2[6]);
284 269035954 s += abs(pix1[7] - pix2[7]);
285 269035954 pix1 += stride;
286 269035954 pix2 += stride;
287 }
288 33629477 return s;
289 }
290
291 32 static inline int pix_median_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
292 ptrdiff_t stride, int h)
293 {
294 32 int s = 0, i, j;
295
296 #define V(x) (pix1[x] - pix2[x])
297
298 32 s += abs(V(0));
299 32 s += abs(V(1) - V(0));
300 32 s += abs(V(2) - V(1));
301 32 s += abs(V(3) - V(2));
302 32 s += abs(V(4) - V(3));
303 32 s += abs(V(5) - V(4));
304 32 s += abs(V(6) - V(5));
305 32 s += abs(V(7) - V(6));
306
307 32 pix1 += stride;
308 32 pix2 += stride;
309
310
2/2
✓ Branch 0 taken 248 times.
✓ Branch 1 taken 32 times.
280 for (i = 1; i < h; i++) {
311 248 s += abs(V(0) - V(-stride));
312
2/2
✓ Branch 0 taken 1736 times.
✓ Branch 1 taken 248 times.
1984 for (j = 1; j < 8; j++)
313 1736 s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
314 248 pix1 += stride;
315 248 pix2 += stride;
316
317 }
318 #undef V
319 32 return s;
320 }
321
322 837597 static int pix_abs8_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
323 ptrdiff_t stride, int h)
324 {
325 837597 int s = 0, i;
326
327
2/2
✓ Branch 0 taken 6700884 times.
✓ Branch 1 taken 837597 times.
7538481 for (i = 0; i < h; i++) {
328 6700884 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
329 6700884 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
330 6700884 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
331 6700884 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
332 6700884 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
333 6700884 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
334 6700884 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
335 6700884 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
336 6700884 pix1 += stride;
337 6700884 pix2 += stride;
338 }
339 837597 return s;
340 }
341
342 837597 static int pix_abs8_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
343 ptrdiff_t stride, int h)
344 {
345 837597 int s = 0, i;
346 837597 const uint8_t *pix3 = pix2 + stride;
347
348
2/2
✓ Branch 0 taken 6700852 times.
✓ Branch 1 taken 837597 times.
7538449 for (i = 0; i < h; i++) {
349 6700852 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
350 6700852 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
351 6700852 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
352 6700852 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
353 6700852 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
354 6700852 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
355 6700852 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
356 6700852 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
357 6700852 pix1 += stride;
358 6700852 pix2 += stride;
359 6700852 pix3 += stride;
360 }
361 837597 return s;
362 }
363
364 1675146 static int pix_abs8_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
365 ptrdiff_t stride, int h)
366 {
367 1675146 int s = 0, i;
368 1675146 const uint8_t *pix3 = pix2 + stride;
369
370
2/2
✓ Branch 0 taken 13401138 times.
✓ Branch 1 taken 1675146 times.
15076284 for (i = 0; i < h; i++) {
371 13401138 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
372 13401138 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
373 13401138 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
374 13401138 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
375 13401138 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
376 13401138 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
377 13401138 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
378 13401138 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
379 13401138 pix1 += stride;
380 13401138 pix2 += stride;
381 13401138 pix3 += stride;
382 }
383 1675146 return s;
384 }
385
386 748189 static int nsse16_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2,
387 ptrdiff_t stride, int h)
388 {
389 748189 int score1 = 0, score2 = 0, x, y;
390
391
2/2
✓ Branch 0 taken 11970702 times.
✓ Branch 1 taken 748189 times.
12718891 for (y = 0; y < h; y++) {
392
2/2
✓ Branch 0 taken 191531232 times.
✓ Branch 1 taken 11970702 times.
203501934 for (x = 0; x < 16; x++)
393 191531232 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
394
2/2
✓ Branch 0 taken 11222513 times.
✓ Branch 1 taken 748189 times.
11970702 if (y + 1 < h) {
395
2/2
✓ Branch 0 taken 168337695 times.
✓ Branch 1 taken 11222513 times.
179560208 for (x = 0; x < 15; x++)
396 168337695 score2 += FFABS(s1[x] - s1[x + stride] -
397 168337695 s1[x + 1] + s1[x + stride + 1]) -
398 168337695 FFABS(s2[x] - s2[x + stride] -
399 s2[x + 1] + s2[x + stride + 1]);
400 }
401 11970702 s1 += stride;
402 11970702 s2 += stride;
403 }
404
405
2/2
✓ Branch 0 taken 748141 times.
✓ Branch 1 taken 48 times.
748189 if (c)
406 748141 return score1 + FFABS(score2) * c->c.avctx->nsse_weight;
407 else
408 48 return score1 + FFABS(score2) * 8;
409 }
410
411 48 static int nsse8_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2,
412 ptrdiff_t stride, int h)
413 {
414 48 int score1 = 0, score2 = 0, x, y;
415
416
2/2
✓ Branch 0 taken 478 times.
✓ Branch 1 taken 48 times.
526 for (y = 0; y < h; y++) {
417
2/2
✓ Branch 0 taken 3824 times.
✓ Branch 1 taken 478 times.
4302 for (x = 0; x < 8; x++)
418 3824 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
419
2/2
✓ Branch 0 taken 430 times.
✓ Branch 1 taken 48 times.
478 if (y + 1 < h) {
420
2/2
✓ Branch 0 taken 3010 times.
✓ Branch 1 taken 430 times.
3440 for (x = 0; x < 7; x++)
421 3010 score2 += FFABS(s1[x] - s1[x + stride] -
422 3010 s1[x + 1] + s1[x + stride + 1]) -
423 3010 FFABS(s2[x] - s2[x + stride] -
424 s2[x + 1] + s2[x + stride + 1]);
425 }
426 478 s1 += stride;
427 478 s2 += stride;
428 }
429
430
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 48 times.
48 if (c)
431 return score1 + FFABS(score2) * c->c.avctx->nsse_weight;
432 else
433 48 return score1 + FFABS(score2) * 8;
434 }
435
436 static int zero_cmp(MPVEncContext *s, const uint8_t *a, const uint8_t *b,
437 ptrdiff_t stride, int h)
438 {
439 return 0;
440 }
441
442 1097 av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
443 {
444 #define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY) \
445 [FF_CMP_ ## CMP_FLAG] = { \
446 .offset = offsetof(MECmpContext, ARRAY), \
447 .mpv_only = MPVENC_ONLY, \
448 .available = 1, \
449 }
450 static const struct {
451 char available;
452 char mpv_only;
453 uint16_t offset;
454 } cmp_func_list[] = {
455 ENTRY(SAD, sad, 0),
456 ENTRY(SSE, sse, 0),
457 ENTRY(SATD, hadamard8_diff, 0),
458 ENTRY(DCT, dct_sad, 1),
459 ENTRY(PSNR, quant_psnr, 1),
460 ENTRY(BIT, bit, 1),
461 ENTRY(RD, rd, 1),
462 ENTRY(VSAD, vsad, 0),
463 ENTRY(VSSE, vsse, 0),
464 ENTRY(NSSE, nsse, 0),
465 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
466 ENTRY(W53, w53, 0),
467 ENTRY(W97, w97, 0),
468 #endif
469 ENTRY(DCTMAX, dct_max, 1),
470 #if CONFIG_GPL
471 ENTRY(DCT264, dct264_sad, 1),
472 #endif
473 ENTRY(MEDIAN_SAD, median_sad, 0),
474 };
475 const me_cmp_func *me_cmp_func_array;
476
477 1097 type &= 0xFF;
478
479
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1097 times.
1097 if (type == FF_CMP_ZERO) {
480 for (int i = 0; i < 6; i++)
481 cmp[i] = zero_cmp;
482 return 0;
483 }
484
1/2
✓ Branch 0 taken 1097 times.
✗ Branch 1 not taken.
1097 if (type >= FF_ARRAY_ELEMS(cmp_func_list) ||
485
3/4
✓ Branch 0 taken 1097 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 60 times.
✓ Branch 3 taken 1037 times.
1097 !cmp_func_list[type].available ||
486
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
60 !mpvenc && cmp_func_list[type].mpv_only) {
487 av_log(NULL, AV_LOG_ERROR,
488 "invalid cmp function selection\n");
489 return AVERROR(EINVAL);
490 }
491 1097 me_cmp_func_array = (const me_cmp_func*)(((const char*)c) + cmp_func_list[type].offset);
492
2/2
✓ Branch 0 taken 6582 times.
✓ Branch 1 taken 1097 times.
7679 for (int i = 0; i < 6; i++)
493 6582 cmp[i] = me_cmp_func_array[i];
494
495 1097 return 0;
496 }
497
498 #define BUTTERFLY2(o1, o2, i1, i2) \
499 o1 = (i1) + (i2); \
500 o2 = (i1) - (i2);
501
502 #define BUTTERFLY1(x, y) \
503 { \
504 int a, b; \
505 a = x; \
506 b = y; \
507 x = a + b; \
508 y = a - b; \
509 }
510
511 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
512
513 15264260 static int hadamard8_diff8x8_c(MPVEncContext *unused, const uint8_t *dst,
514 const uint8_t *src, ptrdiff_t stride, int h)
515 {
516 15264260 int i, temp[64], sum = 0;
517
518
2/2
✓ Branch 0 taken 122114080 times.
✓ Branch 1 taken 15264260 times.
137378340 for (i = 0; i < 8; i++) {
519 // FIXME: try pointer walks
520 122114080 BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
521 src[stride * i + 0] - dst[stride * i + 0],
522 src[stride * i + 1] - dst[stride * i + 1]);
523 122114080 BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
524 src[stride * i + 2] - dst[stride * i + 2],
525 src[stride * i + 3] - dst[stride * i + 3]);
526 122114080 BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
527 src[stride * i + 4] - dst[stride * i + 4],
528 src[stride * i + 5] - dst[stride * i + 5]);
529 122114080 BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
530 src[stride * i + 6] - dst[stride * i + 6],
531 src[stride * i + 7] - dst[stride * i + 7]);
532
533 122114080 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
534 122114080 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
535 122114080 BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
536 122114080 BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
537
538 122114080 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
539 122114080 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
540 122114080 BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
541 122114080 BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
542 }
543
544
2/2
✓ Branch 0 taken 122114080 times.
✓ Branch 1 taken 15264260 times.
137378340 for (i = 0; i < 8; i++) {
545 122114080 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
546 122114080 BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
547 122114080 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
548 122114080 BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
549
550 122114080 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
551 122114080 BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
552 122114080 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
553 122114080 BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
554
555 122114080 sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
556 122114080 BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
557 122114080 BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
558 122114080 BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
559 }
560 15264260 return sum;
561 }
562
563 100 static int hadamard8_intra8x8_c(MPVEncContext *unused, const uint8_t *src,
564 const uint8_t *dummy, ptrdiff_t stride, int h)
565 {
566 100 int i, temp[64], sum = 0;
567
568
2/2
✓ Branch 0 taken 800 times.
✓ Branch 1 taken 100 times.
900 for (i = 0; i < 8; i++) {
569 // FIXME: try pointer walks
570 800 BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
571 src[stride * i + 0], src[stride * i + 1]);
572 800 BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
573 src[stride * i + 2], src[stride * i + 3]);
574 800 BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
575 src[stride * i + 4], src[stride * i + 5]);
576 800 BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
577 src[stride * i + 6], src[stride * i + 7]);
578
579 800 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
580 800 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
581 800 BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
582 800 BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
583
584 800 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
585 800 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
586 800 BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
587 800 BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
588 }
589
590
2/2
✓ Branch 0 taken 800 times.
✓ Branch 1 taken 100 times.
900 for (i = 0; i < 8; i++) {
591 800 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
592 800 BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
593 800 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
594 800 BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
595
596 800 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
597 800 BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
598 800 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
599 800 BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
600
601 800 sum +=
602 800 BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
603 800 + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
604 800 + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
605 800 + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
606 }
607
608 100 sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
609
610 100 return sum;
611 }
612
613 static int dct_sad8x8_c(MPVEncContext *const s, const uint8_t *src1,
614 const uint8_t *src2, ptrdiff_t stride, int h)
615 {
616 LOCAL_ALIGNED_16(int16_t, temp, [64]);
617
618 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
619 s->fdsp.fdct(temp);
620 return s->sum_abs_dctelem(temp);
621 }
622
623 #if CONFIG_GPL
624 #define DCT8_1D \
625 { \
626 const int s07 = SRC(0) + SRC(7); \
627 const int s16 = SRC(1) + SRC(6); \
628 const int s25 = SRC(2) + SRC(5); \
629 const int s34 = SRC(3) + SRC(4); \
630 const int a0 = s07 + s34; \
631 const int a1 = s16 + s25; \
632 const int a2 = s07 - s34; \
633 const int a3 = s16 - s25; \
634 const int d07 = SRC(0) - SRC(7); \
635 const int d16 = SRC(1) - SRC(6); \
636 const int d25 = SRC(2) - SRC(5); \
637 const int d34 = SRC(3) - SRC(4); \
638 const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
639 const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
640 const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
641 const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
642 DST(0, a0 + a1); \
643 DST(1, a4 + (a7 >> 2)); \
644 DST(2, a2 + (a3 >> 1)); \
645 DST(3, a5 + (a6 >> 2)); \
646 DST(4, a0 - a1); \
647 DST(5, a6 - (a5 >> 2)); \
648 DST(6, (a2 >> 1) - a3); \
649 DST(7, (a4 >> 2) - a7); \
650 }
651
652 static int dct264_sad8x8_c(MPVEncContext *const s, const uint8_t *src1,
653 const uint8_t *src2, ptrdiff_t stride, int h)
654 {
655 int16_t dct[8][8];
656 int i, sum = 0;
657
658 s->pdsp.diff_pixels_unaligned(dct[0], src1, src2, stride);
659
660 #define SRC(x) dct[i][x]
661 #define DST(x, v) dct[i][x] = v
662 for (i = 0; i < 8; i++)
663 DCT8_1D
664 #undef SRC
665 #undef DST
666
667 #define SRC(x) dct[x][i]
668 #define DST(x, v) sum += FFABS(v)
669 for (i = 0; i < 8; i++)
670 DCT8_1D
671 #undef SRC
672 #undef DST
673 return sum;
674 }
675 #endif
676
677 static int dct_max8x8_c(MPVEncContext *const s, const uint8_t *src1,
678 const uint8_t *src2, ptrdiff_t stride, int h)
679 {
680 LOCAL_ALIGNED_16(int16_t, temp, [64]);
681 int sum = 0, i;
682
683 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
684 s->fdsp.fdct(temp);
685
686 for (i = 0; i < 64; i++)
687 sum = FFMAX(sum, FFABS(temp[i]));
688
689 return sum;
690 }
691
692 static int quant_psnr8x8_c(MPVEncContext *const s, const uint8_t *src1,
693 const uint8_t *src2, ptrdiff_t stride, int h)
694 {
695 LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
696 int16_t *const bak = temp + 64;
697 int sum = 0, i;
698
699 s->c.mb_intra = 0;
700
701 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
702
703 memcpy(bak, temp, 64 * sizeof(int16_t));
704
705 s->c.block_last_index[0 /* FIXME */] =
706 s->dct_quantize(s, temp, 0 /* FIXME */, s->c.qscale, &i);
707 s->c.dct_unquantize_inter(&s->c, temp, 0, s->c.qscale);
708 ff_simple_idct_int16_8bit(temp); // FIXME
709
710 for (i = 0; i < 64; i++)
711 sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
712
713 return sum;
714 }
715
716 static int rd8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2,
717 ptrdiff_t stride, int h)
718 {
719 const uint8_t *scantable = s->c.intra_scantable.permutated;
720 LOCAL_ALIGNED_16(int16_t, temp, [64]);
721 LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
722 LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
723 int i, last, run, bits, level, distortion, start_i;
724 const int esc_length = s->ac_esc_length;
725 const uint8_t *length, *last_length;
726
727 copy_block8(lsrc1, src1, 8, stride, 8);
728 copy_block8(lsrc2, src2, 8, stride, 8);
729
730 s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8);
731
732 s->c.block_last_index[0 /* FIXME */] =
733 last =
734 s->dct_quantize(s, temp, 0 /* FIXME */, s->c.qscale, &i);
735
736 bits = 0;
737
738 if (s->c.mb_intra) {
739 start_i = 1;
740 length = s->intra_ac_vlc_length;
741 last_length = s->intra_ac_vlc_last_length;
742 bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
743 } else {
744 start_i = 0;
745 length = s->inter_ac_vlc_length;
746 last_length = s->inter_ac_vlc_last_length;
747 }
748
749 if (last >= start_i) {
750 run = 0;
751 for (i = start_i; i < last; i++) {
752 int j = scantable[i];
753 level = temp[j];
754
755 if (level) {
756 level += 64;
757 if ((level & (~127)) == 0)
758 bits += length[UNI_AC_ENC_INDEX(run, level)];
759 else
760 bits += esc_length;
761 run = 0;
762 } else
763 run++;
764 }
765 i = scantable[last];
766
767 level = temp[i] + 64;
768
769 av_assert2(level - 64);
770
771 if ((level & (~127)) == 0) {
772 bits += last_length[UNI_AC_ENC_INDEX(run, level)];
773 } else
774 bits += esc_length;
775 }
776
777 if (last >= 0) {
778 if (s->c.mb_intra)
779 s->c.dct_unquantize_intra(&s->c, temp, 0, s->c.qscale);
780 else
781 s->c.dct_unquantize_inter(&s->c, temp, 0, s->c.qscale);
782 }
783
784 s->c.idsp.idct_add(lsrc2, 8, temp);
785
786 distortion = s->sse_cmp[1](NULL, lsrc2, lsrc1, 8, 8);
787
788 return distortion + ((bits * s->c.qscale * s->c.qscale * 109 + 64) >> 7);
789 }
790
791 static int bit8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2,
792 ptrdiff_t stride, int h)
793 {
794 const uint8_t *scantable = s->c.intra_scantable.permutated;
795 LOCAL_ALIGNED_16(int16_t, temp, [64]);
796 int i, last, run, bits, level, start_i;
797 const int esc_length = s->ac_esc_length;
798 const uint8_t *length, *last_length;
799
800 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
801
802 s->c.block_last_index[0 /* FIXME */] =
803 last =
804 s->dct_quantize(s, temp, 0 /* FIXME */, s->c.qscale, &i);
805
806 bits = 0;
807
808 if (s->c.mb_intra) {
809 start_i = 1;
810 length = s->intra_ac_vlc_length;
811 last_length = s->intra_ac_vlc_last_length;
812 bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
813 } else {
814 start_i = 0;
815 length = s->inter_ac_vlc_length;
816 last_length = s->inter_ac_vlc_last_length;
817 }
818
819 if (last >= start_i) {
820 run = 0;
821 for (i = start_i; i < last; i++) {
822 int j = scantable[i];
823 level = temp[j];
824
825 if (level) {
826 level += 64;
827 if ((level & (~127)) == 0)
828 bits += length[UNI_AC_ENC_INDEX(run, level)];
829 else
830 bits += esc_length;
831 run = 0;
832 } else
833 run++;
834 }
835 i = scantable[last];
836
837 level = temp[i] + 64;
838
839 av_assert2(level - 64);
840
841 if ((level & (~127)) == 0)
842 bits += last_length[UNI_AC_ENC_INDEX(run, level)];
843 else
844 bits += esc_length;
845 }
846
847 return bits;
848 }
849
850 #define VSAD_INTRA(size) \
851 static int vsad_intra ## size ## _c(MPVEncContext *unused, \
852 const uint8_t *s, const uint8_t *dummy, \
853 ptrdiff_t stride, int h) \
854 { \
855 int score = 0, x, y; \
856 \
857 for (y = 1; y < h; y++) { \
858 for (x = 0; x < size; x += 4) { \
859 score += FFABS(s[x] - s[x + stride]) + \
860 FFABS(s[x + 1] - s[x + stride + 1]) + \
861 FFABS(s[x + 2] - s[x + 2 + stride]) + \
862 FFABS(s[x + 3] - s[x + 3 + stride]); \
863 } \
864 s += stride; \
865 } \
866 \
867 return score; \
868 }
869
4/4
✓ Branch 0 taken 788 times.
✓ Branch 1 taken 394 times.
✓ Branch 2 taken 394 times.
✓ Branch 3 taken 48 times.
1230 VSAD_INTRA(8)
870
4/4
✓ Branch 0 taken 34475232 times.
✓ Branch 1 taken 8618808 times.
✓ Branch 2 taken 8618808 times.
✓ Branch 3 taken 1231246 times.
44325286 VSAD_INTRA(16)
871
872 #define VSAD(size) \
873 static int vsad ## size ## _c(MPVEncContext *unused, \
874 const uint8_t *s1, const uint8_t *s2, \
875 ptrdiff_t stride, int h) \
876 { \
877 int score = 0, x, y; \
878 \
879 for (y = 1; y < h; y++) { \
880 for (x = 0; x < size; x++) \
881 score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
882 s1 += stride; \
883 s2 += stride; \
884 } \
885 \
886 return score; \
887 }
888
4/4
✓ Branch 0 taken 1920 times.
✓ Branch 1 taken 240 times.
✓ Branch 2 taken 240 times.
✓ Branch 3 taken 32 times.
2192 VSAD(8)
889
4/4
✓ Branch 0 taken 213086112 times.
✓ Branch 1 taken 13317882 times.
✓ Branch 2 taken 13317882 times.
✓ Branch 3 taken 1902546 times.
228306540 VSAD(16)
890
891 #define SQ(a) ((a) * (a))
892 #define VSSE_INTRA(size) \
893 static int vsse_intra ## size ## _c(MPVEncContext *unused, \
894 const uint8_t *s, const uint8_t *dummy, \
895 ptrdiff_t stride, int h) \
896 { \
897 int score = 0, x, y; \
898 \
899 for (y = 1; y < h; y++) { \
900 for (x = 0; x < size; x += 4) { \
901 score += SQ(s[x] - s[x + stride]) + \
902 SQ(s[x + 1] - s[x + stride + 1]) + \
903 SQ(s[x + 2] - s[x + stride + 2]) + \
904 SQ(s[x + 3] - s[x + stride + 3]); \
905 } \
906 s += stride; \
907 } \
908 \
909 return score; \
910 }
911
4/4
✓ Branch 0 taken 560 times.
✓ Branch 1 taken 280 times.
✓ Branch 2 taken 280 times.
✓ Branch 3 taken 32 times.
872 VSSE_INTRA(8)
912
4/4
✓ Branch 0 taken 1152 times.
✓ Branch 1 taken 288 times.
✓ Branch 2 taken 288 times.
✓ Branch 3 taken 32 times.
1472 VSSE_INTRA(16)
913
914 #define VSSE(size) \
915 static int vsse ## size ## _c(MPVEncContext *unused, const uint8_t *s1, \
916 const uint8_t *s2, ptrdiff_t stride, int h) \
917 { \
918 int score = 0, x, y; \
919 \
920 for (y = 1; y < h; y++) { \
921 for (x = 0; x < size; x++) \
922 score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
923 s1 += stride; \
924 s2 += stride; \
925 } \
926 \
927 return score; \
928 }
929
4/4
✓ Branch 0 taken 2368 times.
✓ Branch 1 taken 296 times.
✓ Branch 2 taken 296 times.
✓ Branch 3 taken 32 times.
2696 VSSE(8)
930
4/4
✓ Branch 0 taken 4224 times.
✓ Branch 1 taken 264 times.
✓ Branch 2 taken 264 times.
✓ Branch 3 taken 32 times.
4520 VSSE(16)
931
932 #define WRAPPER8_16_SQ(name8, name16) \
933 static int name16(MPVEncContext *const s, const uint8_t *dst, \
934 const uint8_t *src, ptrdiff_t stride, int h) \
935 { \
936 int score = 0; \
937 \
938 score += name8(s, dst, src, stride, 8); \
939 score += name8(s, dst + 8, src + 8, stride, 8); \
940 if (h == 16) { \
941 dst += 8 * stride; \
942 src += 8 * stride; \
943 score += name8(s, dst, src, stride, 8); \
944 score += name8(s, dst + 8, src + 8, stride, 8); \
945 } \
946 return score; \
947 }
948
949
2/2
✓ Branch 2 taken 3502681 times.
✓ Branch 3 taken 46 times.
3502727 WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
950
2/2
✓ Branch 2 taken 2 times.
✓ Branch 3 taken 30 times.
32 WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
951 WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
952 #if CONFIG_GPL
953 WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
954 #endif
955 WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
956 WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
957 WRAPPER8_16_SQ(rd8x8_c, rd16_c)
958 WRAPPER8_16_SQ(bit8x8_c, bit16_c)
959
960 1063 av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
961 {
962 1063 memset(c, 0, sizeof(*c));
963
964 1063 c->sum_abs_dctelem = sum_abs_dctelem_c;
965
966 /* TODO [0] 16 [1] 8 */
967 1063 c->pix_abs[0][0] = pix_abs16_c;
968 1063 c->pix_abs[0][1] = pix_abs16_x2_c;
969 1063 c->pix_abs[0][2] = pix_abs16_y2_c;
970 1063 c->pix_abs[0][3] = pix_abs16_xy2_c;
971 1063 c->pix_abs[1][0] = pix_abs8_c;
972 1063 c->pix_abs[1][1] = pix_abs8_x2_c;
973 1063 c->pix_abs[1][2] = pix_abs8_y2_c;
974 1063 c->pix_abs[1][3] = pix_abs8_xy2_c;
975
976 #define SET_CMP_FUNC(name) \
977 c->name[0] = name ## 16_c; \
978 c->name[1] = name ## 8x8_c;
979
980 1063 SET_CMP_FUNC(hadamard8_diff)
981 1063 c->hadamard8_diff[4] = hadamard8_intra16_c;
982 1063 c->hadamard8_diff[5] = hadamard8_intra8x8_c;
983 1063 SET_CMP_FUNC(dct_sad)
984 1063 SET_CMP_FUNC(dct_max)
985 #if CONFIG_GPL
986 1063 SET_CMP_FUNC(dct264_sad)
987 #endif
988 1063 c->sad[0] = pix_abs16_c;
989 1063 c->sad[1] = pix_abs8_c;
990 1063 c->sse[0] = sse16_c;
991 1063 c->sse[1] = sse8_c;
992 1063 c->sse[2] = sse4_c;
993 1063 SET_CMP_FUNC(quant_psnr)
994 1063 SET_CMP_FUNC(rd)
995 1063 SET_CMP_FUNC(bit)
996 1063 c->vsad[0] = vsad16_c;
997 1063 c->vsad[1] = vsad8_c;
998 1063 c->vsad[4] = vsad_intra16_c;
999 1063 c->vsad[5] = vsad_intra8_c;
1000 1063 c->vsse[0] = vsse16_c;
1001 1063 c->vsse[1] = vsse8_c;
1002 1063 c->vsse[4] = vsse_intra16_c;
1003 1063 c->vsse[5] = vsse_intra8_c;
1004 1063 c->nsse[0] = nsse16_c;
1005 1063 c->nsse[1] = nsse8_c;
1006 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
1007 1063 ff_dsputil_init_dwt(c);
1008 #endif
1009
1010 1063 c->median_sad[0] = pix_median_abs16_c;
1011 1063 c->median_sad[1] = pix_median_abs8_c;
1012
1013 #if ARCH_AARCH64
1014 ff_me_cmp_init_aarch64(c, avctx);
1015 #elif ARCH_ARM
1016 ff_me_cmp_init_arm(c, avctx);
1017 #elif ARCH_PPC
1018 ff_me_cmp_init_ppc(c, avctx);
1019 #elif ARCH_RISCV
1020 ff_me_cmp_init_riscv(c, avctx);
1021 #elif ARCH_X86
1022 1063 ff_me_cmp_init_x86(c, avctx);
1023 #elif ARCH_MIPS
1024 ff_me_cmp_init_mips(c, avctx);
1025 #endif
1026
1027 1063 }
1028