FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/x86/fdct.c
Date: 2021-09-24 20:55:06
Exec Total Coverage
Lines: 38 38 100.0%
Branches: 4 4 100.0%

Line Branch Exec Source
1 /*
2 * SIMD-optimized forward DCT
3 * The gcc porting is Copyright (c) 2001 Fabrice Bellard.
4 * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 * SSE2 optimization is Copyright (c) 2004 Denes Balatoni.
6 *
7 * from fdctam32.c - AP922 MMX(3D-Now) forward-DCT
8 *
9 * Intel Application Note AP-922 - fast, precise implementation of DCT
10 * http://developer.intel.com/vtune/cbts/appnotes.htm
11 *
12 * Also of inspiration:
13 * a page about fdct at http://www.geocities.com/ssavekar/dct.htm
14 * Skal's fdct at http://skal.planet-d.net/coding/dct.html
15 *
16 * This file is part of FFmpeg.
17 *
18 * FFmpeg is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU Lesser General Public
20 * License as published by the Free Software Foundation; either
21 * version 2.1 of the License, or (at your option) any later version.
22 *
23 * FFmpeg is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * Lesser General Public License for more details.
27 *
28 * You should have received a copy of the GNU Lesser General Public
29 * License along with FFmpeg; if not, write to the Free Software
30 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 */
32
33 #include "libavutil/attributes.h"
34 #include "libavutil/common.h"
35 #include "libavutil/mem_internal.h"
36 #include "libavutil/x86/asm.h"
37 #include "fdct.h"
38
39 #if HAVE_MMX_INLINE
40
41 //////////////////////////////////////////////////////////////////////
42 //
43 // constants for the forward DCT
44 // -----------------------------
45 //
46 // Be sure to check that your compiler is aligning all constants to QWORD
47 // (8-byte) memory boundaries! Otherwise the unaligned memory access will
48 // severely stall MMX execution.
49 //
50 //////////////////////////////////////////////////////////////////////
51
52 #define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
53 #define SHIFT_FRW_COL BITS_FRW_ACC
54 #define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3)
55 #define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
56 //#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
57
58 #define X8(x) x,x,x,x,x,x,x,x
59
60 //concatenated table, for forward DCT transformation
61 DECLARE_ALIGNED(16, static const int16_t, fdct_tg_all_16)[24] = {
62 X8(13036), // tg * (2<<16) + 0.5
63 X8(27146), // tg * (2<<16) + 0.5
64 X8(-21746) // tg * (2<<16) + 0.5
65 };
66
67 DECLARE_ALIGNED(16, static const int16_t, ocos_4_16)[8] = {
68 X8(23170) //cos * (2<<15) + 0.5
69 };
70
71 DECLARE_ALIGNED(16, static const int16_t, fdct_one_corr)[8] = { X8(1) };
72
73 DECLARE_ALIGNED(8, static const int32_t, fdct_r_row)[2] = {RND_FRW_ROW, RND_FRW_ROW };
74
75 static const struct
76 {
77 DECLARE_ALIGNED(16, const int32_t, fdct_r_row_sse2)[4];
78 } fdct_r_row_sse2 =
79 {{
80 RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW
81 }};
82 //DECLARE_ALIGNED(16, static const long, fdct_r_row_sse2)[4] = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW};
83
84 DECLARE_ALIGNED(8, static const int16_t, tab_frw_01234567)[] = { // forward_dct coeff table
85 16384, 16384, 22725, 19266,
86 16384, 16384, 12873, 4520,
87 21407, 8867, 19266, -4520,
88 -8867, -21407, -22725, -12873,
89 16384, -16384, 12873, -22725,
90 -16384, 16384, 4520, 19266,
91 8867, -21407, 4520, -12873,
92 21407, -8867, 19266, -22725,
93
94 22725, 22725, 31521, 26722,
95 22725, 22725, 17855, 6270,
96 29692, 12299, 26722, -6270,
97 -12299, -29692, -31521, -17855,
98 22725, -22725, 17855, -31521,
99 -22725, 22725, 6270, 26722,
100 12299, -29692, 6270, -17855,
101 29692, -12299, 26722, -31521,
102
103 21407, 21407, 29692, 25172,
104 21407, 21407, 16819, 5906,
105 27969, 11585, 25172, -5906,
106 -11585, -27969, -29692, -16819,
107 21407, -21407, 16819, -29692,
108 -21407, 21407, 5906, 25172,
109 11585, -27969, 5906, -16819,
110 27969, -11585, 25172, -29692,
111
112 19266, 19266, 26722, 22654,
113 19266, 19266, 15137, 5315,
114 25172, 10426, 22654, -5315,
115 -10426, -25172, -26722, -15137,
116 19266, -19266, 15137, -26722,
117 -19266, 19266, 5315, 22654,
118 10426, -25172, 5315, -15137,
119 25172, -10426, 22654, -26722,
120
121 16384, 16384, 22725, 19266,
122 16384, 16384, 12873, 4520,
123 21407, 8867, 19266, -4520,
124 -8867, -21407, -22725, -12873,
125 16384, -16384, 12873, -22725,
126 -16384, 16384, 4520, 19266,
127 8867, -21407, 4520, -12873,
128 21407, -8867, 19266, -22725,
129
130 19266, 19266, 26722, 22654,
131 19266, 19266, 15137, 5315,
132 25172, 10426, 22654, -5315,
133 -10426, -25172, -26722, -15137,
134 19266, -19266, 15137, -26722,
135 -19266, 19266, 5315, 22654,
136 10426, -25172, 5315, -15137,
137 25172, -10426, 22654, -26722,
138
139 21407, 21407, 29692, 25172,
140 21407, 21407, 16819, 5906,
141 27969, 11585, 25172, -5906,
142 -11585, -27969, -29692, -16819,
143 21407, -21407, 16819, -29692,
144 -21407, 21407, 5906, 25172,
145 11585, -27969, 5906, -16819,
146 27969, -11585, 25172, -29692,
147
148 22725, 22725, 31521, 26722,
149 22725, 22725, 17855, 6270,
150 29692, 12299, 26722, -6270,
151 -12299, -29692, -31521, -17855,
152 22725, -22725, 17855, -31521,
153 -22725, 22725, 6270, 26722,
154 12299, -29692, 6270, -17855,
155 29692, -12299, 26722, -31521,
156 };
157
158 static const struct
159 {
160 DECLARE_ALIGNED(16, const int16_t, tab_frw_01234567_sse2)[256];
161 } tab_frw_01234567_sse2 =
162 {{
163 //DECLARE_ALIGNED(16, static const int16_t, tab_frw_01234567_sse2)[] = { // forward_dct coeff table
164 #define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \
165 C4, C4, C5, C7, C2, C6, C3, -C7, \
166 -C4, C4, C7, C3, C6, -C2, C7, -C5, \
167 C4, -C4, C5, -C1, C2, -C6, C3, -C1,
168 // c1..c7 * cos(pi/4) * 2^15
169 #define C1 22725
170 #define C2 21407
171 #define C3 19266
172 #define C4 16384
173 #define C5 12873
174 #define C6 8867
175 #define C7 4520
176 TABLE_SSE2
177
178 #undef C1
179 #undef C2
180 #undef C3
181 #undef C4
182 #undef C5
183 #undef C6
184 #undef C7
185 #define C1 31521
186 #define C2 29692
187 #define C3 26722
188 #define C4 22725
189 #define C5 17855
190 #define C6 12299
191 #define C7 6270
192 TABLE_SSE2
193
194 #undef C1
195 #undef C2
196 #undef C3
197 #undef C4
198 #undef C5
199 #undef C6
200 #undef C7
201 #define C1 29692
202 #define C2 27969
203 #define C3 25172
204 #define C4 21407
205 #define C5 16819
206 #define C6 11585
207 #define C7 5906
208 TABLE_SSE2
209
210 #undef C1
211 #undef C2
212 #undef C3
213 #undef C4
214 #undef C5
215 #undef C6
216 #undef C7
217 #define C1 26722
218 #define C2 25172
219 #define C3 22654
220 #define C4 19266
221 #define C5 15137
222 #define C6 10426
223 #define C7 5315
224 TABLE_SSE2
225
226 #undef C1
227 #undef C2
228 #undef C3
229 #undef C4
230 #undef C5
231 #undef C6
232 #undef C7
233 #define C1 22725
234 #define C2 21407
235 #define C3 19266
236 #define C4 16384
237 #define C5 12873
238 #define C6 8867
239 #define C7 4520
240 TABLE_SSE2
241
242 #undef C1
243 #undef C2
244 #undef C3
245 #undef C4
246 #undef C5
247 #undef C6
248 #undef C7
249 #define C1 26722
250 #define C2 25172
251 #define C3 22654
252 #define C4 19266
253 #define C5 15137
254 #define C6 10426
255 #define C7 5315
256 TABLE_SSE2
257
258 #undef C1
259 #undef C2
260 #undef C3
261 #undef C4
262 #undef C5
263 #undef C6
264 #undef C7
265 #define C1 29692
266 #define C2 27969
267 #define C3 25172
268 #define C4 21407
269 #define C5 16819
270 #define C6 11585
271 #define C7 5906
272 TABLE_SSE2
273
274 #undef C1
275 #undef C2
276 #undef C3
277 #undef C4
278 #undef C5
279 #undef C6
280 #undef C7
281 #define C1 31521
282 #define C2 29692
283 #define C3 26722
284 #define C4 22725
285 #define C5 17855
286 #define C6 12299
287 #define C7 6270
288 TABLE_SSE2
289 }};
290
291 #define S(s) AV_TOSTRING(s) //AV_STRINGIFY is too long
292
293 #define FDCT_COL(cpu, mm, mov)\
294 static av_always_inline void fdct_col_##cpu(const int16_t *in, int16_t *out, int offset)\
295 {\
296 __asm__ volatile (\
297 #mov" 16(%0), %%"#mm"0 \n\t" \
298 #mov" 96(%0), %%"#mm"1 \n\t" \
299 #mov" %%"#mm"0, %%"#mm"2 \n\t" \
300 #mov" 32(%0), %%"#mm"3 \n\t" \
301 "paddsw %%"#mm"1, %%"#mm"0 \n\t" \
302 #mov" 80(%0), %%"#mm"4 \n\t" \
303 "psllw $"S(SHIFT_FRW_COL)", %%"#mm"0 \n\t" \
304 #mov" (%0), %%"#mm"5 \n\t" \
305 "paddsw %%"#mm"3, %%"#mm"4 \n\t" \
306 "paddsw 112(%0), %%"#mm"5 \n\t" \
307 "psllw $"S(SHIFT_FRW_COL)", %%"#mm"4 \n\t" \
308 #mov" %%"#mm"0, %%"#mm"6 \n\t" \
309 "psubsw %%"#mm"1, %%"#mm"2 \n\t" \
310 #mov" 16(%1), %%"#mm"1 \n\t" \
311 "psubsw %%"#mm"4, %%"#mm"0 \n\t" \
312 #mov" 48(%0), %%"#mm"7 \n\t" \
313 "pmulhw %%"#mm"0, %%"#mm"1 \n\t" \
314 "paddsw 64(%0), %%"#mm"7 \n\t" \
315 "psllw $"S(SHIFT_FRW_COL)", %%"#mm"5 \n\t" \
316 "paddsw %%"#mm"4, %%"#mm"6 \n\t" \
317 "psllw $"S(SHIFT_FRW_COL)", %%"#mm"7 \n\t" \
318 #mov" %%"#mm"5, %%"#mm"4 \n\t" \
319 "psubsw %%"#mm"7, %%"#mm"5 \n\t" \
320 "paddsw %%"#mm"5, %%"#mm"1 \n\t" \
321 "paddsw %%"#mm"7, %%"#mm"4 \n\t" \
322 "por (%2), %%"#mm"1 \n\t" \
323 "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"2 \n\t" \
324 "pmulhw 16(%1), %%"#mm"5 \n\t" \
325 #mov" %%"#mm"4, %%"#mm"7 \n\t" \
326 "psubsw 80(%0), %%"#mm"3 \n\t" \
327 "psubsw %%"#mm"6, %%"#mm"4 \n\t" \
328 #mov" %%"#mm"1, 32(%3) \n\t" \
329 "paddsw %%"#mm"6, %%"#mm"7 \n\t" \
330 #mov" 48(%0), %%"#mm"1 \n\t" \
331 "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"3 \n\t" \
332 "psubsw 64(%0), %%"#mm"1 \n\t" \
333 #mov" %%"#mm"2, %%"#mm"6 \n\t" \
334 #mov" %%"#mm"4, 64(%3) \n\t" \
335 "paddsw %%"#mm"3, %%"#mm"2 \n\t" \
336 "pmulhw (%4), %%"#mm"2 \n\t" \
337 "psubsw %%"#mm"3, %%"#mm"6 \n\t" \
338 "pmulhw (%4), %%"#mm"6 \n\t" \
339 "psubsw %%"#mm"0, %%"#mm"5 \n\t" \
340 "por (%2), %%"#mm"5 \n\t" \
341 "psllw $"S(SHIFT_FRW_COL)", %%"#mm"1 \n\t" \
342 "por (%2), %%"#mm"2 \n\t" \
343 #mov" %%"#mm"1, %%"#mm"4 \n\t" \
344 #mov" (%0), %%"#mm"3 \n\t" \
345 "paddsw %%"#mm"6, %%"#mm"1 \n\t" \
346 "psubsw 112(%0), %%"#mm"3 \n\t" \
347 "psubsw %%"#mm"6, %%"#mm"4 \n\t" \
348 #mov" (%1), %%"#mm"0 \n\t" \
349 "psllw $"S(SHIFT_FRW_COL)", %%"#mm"3 \n\t" \
350 #mov" 32(%1), %%"#mm"6 \n\t" \
351 "pmulhw %%"#mm"1, %%"#mm"0 \n\t" \
352 #mov" %%"#mm"7, (%3) \n\t" \
353 "pmulhw %%"#mm"4, %%"#mm"6 \n\t" \
354 #mov" %%"#mm"5, 96(%3) \n\t" \
355 #mov" %%"#mm"3, %%"#mm"7 \n\t" \
356 #mov" 32(%1), %%"#mm"5 \n\t" \
357 "psubsw %%"#mm"2, %%"#mm"7 \n\t" \
358 "paddsw %%"#mm"2, %%"#mm"3 \n\t" \
359 "pmulhw %%"#mm"7, %%"#mm"5 \n\t" \
360 "paddsw %%"#mm"3, %%"#mm"0 \n\t" \
361 "paddsw %%"#mm"4, %%"#mm"6 \n\t" \
362 "pmulhw (%1), %%"#mm"3 \n\t" \
363 "por (%2), %%"#mm"0 \n\t" \
364 "paddsw %%"#mm"7, %%"#mm"5 \n\t" \
365 "psubsw %%"#mm"6, %%"#mm"7 \n\t" \
366 #mov" %%"#mm"0, 16(%3) \n\t" \
367 "paddsw %%"#mm"4, %%"#mm"5 \n\t" \
368 #mov" %%"#mm"7, 48(%3) \n\t" \
369 "psubsw %%"#mm"1, %%"#mm"3 \n\t" \
370 #mov" %%"#mm"5, 80(%3) \n\t" \
371 #mov" %%"#mm"3, 112(%3) \n\t" \
372 : \
373 : "r" (in + offset), "r" (fdct_tg_all_16), "r" (fdct_one_corr), \
374 "r" (out + offset), "r" (ocos_4_16)); \
375 }
376
377 80000 FDCT_COL(mmx, mm, movq)
378 20000 FDCT_COL(sse2, xmm, movdqa)
379
380 20000 static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out)
381 {
382 20000 __asm__ volatile(
383 #define FDCT_ROW_SSE2_H1(i,t) \
384 "movq " #i "(%0), %%xmm2 \n\t" \
385 "movq " #i "+8(%0), %%xmm0 \n\t" \
386 "movdqa " #t "+32(%1), %%xmm3 \n\t" \
387 "movdqa " #t "+48(%1), %%xmm7 \n\t" \
388 "movdqa " #t "(%1), %%xmm4 \n\t" \
389 "movdqa " #t "+16(%1), %%xmm5 \n\t"
390
391 #define FDCT_ROW_SSE2_H2(i,t) \
392 "movq " #i "(%0), %%xmm2 \n\t" \
393 "movq " #i "+8(%0), %%xmm0 \n\t" \
394 "movdqa " #t "+32(%1), %%xmm3 \n\t" \
395 "movdqa " #t "+48(%1), %%xmm7 \n\t"
396
397 #define FDCT_ROW_SSE2(i) \
398 "movq %%xmm2, %%xmm1 \n\t" \
399 "pshuflw $27, %%xmm0, %%xmm0 \n\t" \
400 "paddsw %%xmm0, %%xmm1 \n\t" \
401 "psubsw %%xmm0, %%xmm2 \n\t" \
402 "punpckldq %%xmm2, %%xmm1 \n\t" \
403 "pshufd $78, %%xmm1, %%xmm2 \n\t" \
404 "pmaddwd %%xmm2, %%xmm3 \n\t" \
405 "pmaddwd %%xmm1, %%xmm7 \n\t" \
406 "pmaddwd %%xmm5, %%xmm2 \n\t" \
407 "pmaddwd %%xmm4, %%xmm1 \n\t" \
408 "paddd %%xmm7, %%xmm3 \n\t" \
409 "paddd %%xmm2, %%xmm1 \n\t" \
410 "paddd %%xmm6, %%xmm3 \n\t" \
411 "paddd %%xmm6, %%xmm1 \n\t" \
412 "psrad %3, %%xmm3 \n\t" \
413 "psrad %3, %%xmm1 \n\t" \
414 "packssdw %%xmm3, %%xmm1 \n\t" \
415 "movdqa %%xmm1, " #i "(%4) \n\t"
416
417 "movdqa (%2), %%xmm6 \n\t"
418 FDCT_ROW_SSE2_H1(0,0)
419 FDCT_ROW_SSE2(0)
420 FDCT_ROW_SSE2_H2(64,0)
421 FDCT_ROW_SSE2(64)
422
423 FDCT_ROW_SSE2_H1(16,64)
424 FDCT_ROW_SSE2(16)
425 FDCT_ROW_SSE2_H2(112,64)
426 FDCT_ROW_SSE2(112)
427
428 FDCT_ROW_SSE2_H1(32,128)
429 FDCT_ROW_SSE2(32)
430 FDCT_ROW_SSE2_H2(96,128)
431 FDCT_ROW_SSE2(96)
432
433 FDCT_ROW_SSE2_H1(48,192)
434 FDCT_ROW_SSE2(48)
435 FDCT_ROW_SSE2_H2(80,192)
436 FDCT_ROW_SSE2(80)
437 :
438 : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2),
439 "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out)
440 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
441 "%xmm4", "%xmm5", "%xmm6", "%xmm7")
442 );
443 20000 }
444
445 160000 static av_always_inline void fdct_row_mmxext(const int16_t *in, int16_t *out,
446 const int16_t *table)
447 {
448 160000 __asm__ volatile (
449 "pshufw $0x1B, 8(%0), %%mm5 \n\t"
450 "movq (%0), %%mm0 \n\t"
451 "movq %%mm0, %%mm1 \n\t"
452 "paddsw %%mm5, %%mm0 \n\t"
453 "psubsw %%mm5, %%mm1 \n\t"
454 "movq %%mm0, %%mm2 \n\t"
455 "punpckldq %%mm1, %%mm0 \n\t"
456 "punpckhdq %%mm1, %%mm2 \n\t"
457 "movq (%1), %%mm1 \n\t"
458 "movq 8(%1), %%mm3 \n\t"
459 "movq 16(%1), %%mm4 \n\t"
460 "movq 24(%1), %%mm5 \n\t"
461 "movq 32(%1), %%mm6 \n\t"
462 "movq 40(%1), %%mm7 \n\t"
463 "pmaddwd %%mm0, %%mm1 \n\t"
464 "pmaddwd %%mm2, %%mm3 \n\t"
465 "pmaddwd %%mm0, %%mm4 \n\t"
466 "pmaddwd %%mm2, %%mm5 \n\t"
467 "pmaddwd %%mm0, %%mm6 \n\t"
468 "pmaddwd %%mm2, %%mm7 \n\t"
469 "pmaddwd 48(%1), %%mm0 \n\t"
470 "pmaddwd 56(%1), %%mm2 \n\t"
471 "paddd %%mm1, %%mm3 \n\t"
472 "paddd %%mm4, %%mm5 \n\t"
473 "paddd %%mm6, %%mm7 \n\t"
474 "paddd %%mm0, %%mm2 \n\t"
475 "movq (%2), %%mm0 \n\t"
476 "paddd %%mm0, %%mm3 \n\t"
477 "paddd %%mm0, %%mm5 \n\t"
478 "paddd %%mm0, %%mm7 \n\t"
479 "paddd %%mm0, %%mm2 \n\t"
480 "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
481 "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
482 "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
483 "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
484 "packssdw %%mm5, %%mm3 \n\t"
485 "packssdw %%mm2, %%mm7 \n\t"
486 "movq %%mm3, (%3) \n\t"
487 "movq %%mm7, 8(%3) \n\t"
488 :
489 : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
490 160000 }
491
492 160000 static av_always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table)
493 {
494 //FIXME reorder (I do not have an old MMX-only CPU here to benchmark ...)
495 160000 __asm__ volatile(
496 "movd 12(%0), %%mm1 \n\t"
497 "punpcklwd 8(%0), %%mm1 \n\t"
498 "movq %%mm1, %%mm2 \n\t"
499 "psrlq $0x20, %%mm1 \n\t"
500 "movq 0(%0), %%mm0 \n\t"
501 "punpcklwd %%mm2, %%mm1 \n\t"
502 "movq %%mm0, %%mm5 \n\t"
503 "paddsw %%mm1, %%mm0 \n\t"
504 "psubsw %%mm1, %%mm5 \n\t"
505 "movq %%mm0, %%mm2 \n\t"
506 "punpckldq %%mm5, %%mm0 \n\t"
507 "punpckhdq %%mm5, %%mm2 \n\t"
508 "movq 0(%1), %%mm1 \n\t"
509 "movq 8(%1), %%mm3 \n\t"
510 "movq 16(%1), %%mm4 \n\t"
511 "movq 24(%1), %%mm5 \n\t"
512 "movq 32(%1), %%mm6 \n\t"
513 "movq 40(%1), %%mm7 \n\t"
514 "pmaddwd %%mm0, %%mm1 \n\t"
515 "pmaddwd %%mm2, %%mm3 \n\t"
516 "pmaddwd %%mm0, %%mm4 \n\t"
517 "pmaddwd %%mm2, %%mm5 \n\t"
518 "pmaddwd %%mm0, %%mm6 \n\t"
519 "pmaddwd %%mm2, %%mm7 \n\t"
520 "pmaddwd 48(%1), %%mm0 \n\t"
521 "pmaddwd 56(%1), %%mm2 \n\t"
522 "paddd %%mm1, %%mm3 \n\t"
523 "paddd %%mm4, %%mm5 \n\t"
524 "paddd %%mm6, %%mm7 \n\t"
525 "paddd %%mm0, %%mm2 \n\t"
526 "movq (%2), %%mm0 \n\t"
527 "paddd %%mm0, %%mm3 \n\t"
528 "paddd %%mm0, %%mm5 \n\t"
529 "paddd %%mm0, %%mm7 \n\t"
530 "paddd %%mm0, %%mm2 \n\t"
531 "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
532 "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
533 "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
534 "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
535 "packssdw %%mm5, %%mm3 \n\t"
536 "packssdw %%mm2, %%mm7 \n\t"
537 "movq %%mm3, 0(%3) \n\t"
538 "movq %%mm7, 8(%3) \n\t"
539 :
540 : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
541 160000 }
542
543 20000 void ff_fdct_mmx(int16_t *block)
544 {
545 DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
546 20000 int16_t * block1= (int16_t*)align_tmp;
547 20000 const int16_t *table= tab_frw_01234567;
548 int i;
549
550 20000 fdct_col_mmx(block, block1, 0);
551 20000 fdct_col_mmx(block, block1, 4);
552
553
2/2
✓ Branch 0 taken 160000 times.
✓ Branch 1 taken 20000 times.
180000 for(i=8;i>0;i--) {
554 160000 fdct_row_mmx(block1, block, table);
555 160000 block1 += 8;
556 160000 table += 32;
557 160000 block += 8;
558 }
559 20000 }
560
561 #endif /* HAVE_MMX_INLINE */
562
563 #if HAVE_MMXEXT_INLINE
564
565 20000 void ff_fdct_mmxext(int16_t *block)
566 {
567 DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
568 20000 int16_t *block1= (int16_t*)align_tmp;
569 20000 const int16_t *table= tab_frw_01234567;
570 int i;
571
572 20000 fdct_col_mmx(block, block1, 0);
573 20000 fdct_col_mmx(block, block1, 4);
574
575
2/2
✓ Branch 0 taken 160000 times.
✓ Branch 1 taken 20000 times.
180000 for(i=8;i>0;i--) {
576 160000 fdct_row_mmxext(block1, block, table);
577 160000 block1 += 8;
578 160000 table += 32;
579 160000 block += 8;
580 }
581 20000 }
582
583 #endif /* HAVE_MMXEXT_INLINE */
584
585 #if HAVE_SSE2_INLINE
586
587 20000 void ff_fdct_sse2(int16_t *block)
588 {
589 DECLARE_ALIGNED(16, int64_t, align_tmp)[16];
590 20000 int16_t * const block1= (int16_t*)align_tmp;
591
592 20000 fdct_col_sse2(block, block1, 0);
593 20000 fdct_row_sse2(block1, block);
594 20000 }
595
596 #endif /* HAVE_SSE2_INLINE */
597