GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavcodec/x86/cavsdsp.c Lines: 8 68 11.8 %
Date: 2020-08-14 10:39:37 Branches: 5 26 19.2 %

Line Branch Exec Source
1
/*
2
 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3
 * Copyright (c) 2006  Stefan Gehrer <stefan.gehrer@gmx.de>
4
 *
5
 * MMX-optimized DSP functions, based on H.264 optimizations by
6
 * Michael Niedermayer and Loren Merritt
7
 *
8
 * This file is part of FFmpeg.
9
 *
10
 * FFmpeg is free software; you can redistribute it and/or
11
 * modify it under the terms of the GNU Lesser General Public
12
 * License as published by the Free Software Foundation; either
13
 * version 2.1 of the License, or (at your option) any later version.
14
 *
15
 * FFmpeg is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18
 * Lesser General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU Lesser General Public
21
 * License along with FFmpeg; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23
 */
24
25
#include "libavutil/attributes.h"
26
#include "libavutil/common.h"
27
#include "libavutil/cpu.h"
28
#include "libavutil/x86/asm.h"
29
#include "libavutil/x86/cpu.h"
30
#include "libavcodec/cavsdsp.h"
31
#include "libavcodec/idctdsp.h"
32
#include "constants.h"
33
#include "fpel.h"
34
#include "idctdsp.h"
35
#include "config.h"
36
37
38
#if HAVE_MMX_EXTERNAL
39
40
void ff_cavs_idct8_mmx(int16_t *out, const int16_t *in);
41
42
static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, ptrdiff_t stride)
43
{
44
    LOCAL_ALIGNED(16, int16_t, b2, [64]);
45
    ff_cavs_idct8_mmx(b2, block);
46
    ff_add_pixels_clamped_mmx(b2, dst, stride);
47
}
48
49
void ff_cavs_idct8_sse2(int16_t *out, const int16_t *in);
50
51
static void cavs_idct8_add_sse2(uint8_t *dst, int16_t *block, ptrdiff_t stride)
52
{
53
    LOCAL_ALIGNED(16, int16_t, b2, [64]);
54
    ff_cavs_idct8_sse2(b2, block);
55
    ff_add_pixels_clamped_sse2(b2, dst, stride);
56
}
57
58
#endif /* HAVE_MMX_EXTERNAL */
59
60
#if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
61
62
/*****************************************************************************
63
 *
64
 * motion compensation
65
 *
66
 ****************************************************************************/
67
68
/* vertical filter [-1 -2 96 42 -7  0]  */
69
#define QPEL_CAVSV1(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
70
        "movd (%0), "#F"            \n\t"\
71
        "movq "#C", %%mm6           \n\t"\
72
        "pmullw "MANGLE(MUL1)", %%mm6\n\t"\
73
        "movq "#D", %%mm7           \n\t"\
74
        "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
75
        "psllw $3, "#E"             \n\t"\
76
        "psubw "#E", %%mm6          \n\t"\
77
        "psraw $3, "#E"             \n\t"\
78
        "paddw %%mm7, %%mm6         \n\t"\
79
        "paddw "#E", %%mm6          \n\t"\
80
        "paddw "#B", "#B"           \n\t"\
81
        "pxor %%mm7, %%mm7          \n\t"\
82
        "add %2, %0                 \n\t"\
83
        "punpcklbw %%mm7, "#F"      \n\t"\
84
        "psubw "#B", %%mm6          \n\t"\
85
        "psraw $1, "#B"             \n\t"\
86
        "psubw "#A", %%mm6          \n\t"\
87
        "paddw "MANGLE(ADD)", %%mm6 \n\t"\
88
        "psraw $7, %%mm6            \n\t"\
89
        "packuswb %%mm6, %%mm6      \n\t"\
90
        OP(%%mm6, (%1), A, d)            \
91
        "add %3, %1                 \n\t"
92
93
/* vertical filter [ 0 -1  5  5 -1  0]  */
94
#define QPEL_CAVSV2(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
95
        "movd (%0), "#F"            \n\t"\
96
        "movq "#C", %%mm6           \n\t"\
97
        "paddw "#D", %%mm6          \n\t"\
98
        "pmullw "MANGLE(MUL1)", %%mm6\n\t"\
99
        "add %2, %0                 \n\t"\
100
        "punpcklbw %%mm7, "#F"      \n\t"\
101
        "psubw "#B", %%mm6          \n\t"\
102
        "psubw "#E", %%mm6          \n\t"\
103
        "paddw "MANGLE(ADD)", %%mm6 \n\t"\
104
        "psraw $3, %%mm6            \n\t"\
105
        "packuswb %%mm6, %%mm6      \n\t"\
106
        OP(%%mm6, (%1), A, d)            \
107
        "add %3, %1                 \n\t"
108
109
/* vertical filter [ 0 -7 42 96 -2 -1]  */
110
#define QPEL_CAVSV3(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
111
        "movd (%0), "#F"            \n\t"\
112
        "movq "#C", %%mm6           \n\t"\
113
        "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
114
        "movq "#D", %%mm7           \n\t"\
115
        "pmullw "MANGLE(MUL1)", %%mm7\n\t"\
116
        "psllw $3, "#B"             \n\t"\
117
        "psubw "#B", %%mm6          \n\t"\
118
        "psraw $3, "#B"             \n\t"\
119
        "paddw %%mm7, %%mm6         \n\t"\
120
        "paddw "#B", %%mm6          \n\t"\
121
        "paddw "#E", "#E"           \n\t"\
122
        "pxor %%mm7, %%mm7          \n\t"\
123
        "add %2, %0                 \n\t"\
124
        "punpcklbw %%mm7, "#F"      \n\t"\
125
        "psubw "#E", %%mm6          \n\t"\
126
        "psraw $1, "#E"             \n\t"\
127
        "psubw "#F", %%mm6          \n\t"\
128
        "paddw "MANGLE(ADD)", %%mm6 \n\t"\
129
        "psraw $7, %%mm6            \n\t"\
130
        "packuswb %%mm6, %%mm6      \n\t"\
131
        OP(%%mm6, (%1), A, d)            \
132
        "add %3, %1                 \n\t"
133
134
135
#define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
136
    int w= 2;\
137
    src -= 2*srcStride;\
138
    \
139
    while(w--){\
140
      __asm__ volatile(\
141
        "pxor %%mm7, %%mm7          \n\t"\
142
        "movd (%0), %%mm0           \n\t"\
143
        "add %2, %0                 \n\t"\
144
        "movd (%0), %%mm1           \n\t"\
145
        "add %2, %0                 \n\t"\
146
        "movd (%0), %%mm2           \n\t"\
147
        "add %2, %0                 \n\t"\
148
        "movd (%0), %%mm3           \n\t"\
149
        "add %2, %0                 \n\t"\
150
        "movd (%0), %%mm4           \n\t"\
151
        "add %2, %0                 \n\t"\
152
        "punpcklbw %%mm7, %%mm0     \n\t"\
153
        "punpcklbw %%mm7, %%mm1     \n\t"\
154
        "punpcklbw %%mm7, %%mm2     \n\t"\
155
        "punpcklbw %%mm7, %%mm3     \n\t"\
156
        "punpcklbw %%mm7, %%mm4     \n\t"\
157
        VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
158
        VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
159
        VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
160
        VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
161
        VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
162
        VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
163
        VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
164
        VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
165
        \
166
        : "+a"(src), "+c"(dst)\
167
        : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
168
          NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
169
        : "memory"\
170
     );\
171
     if(h==16){\
172
        __asm__ volatile(\
173
            VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
174
            VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
175
            VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
176
            VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
177
            VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
178
            VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
179
            VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
180
            VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
181
            \
182
           : "+a"(src), "+c"(dst)\
183
           : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
184
             NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
185
           : "memory"\
186
        );\
187
     }\
188
     src += 4-(h+5)*srcStride;\
189
     dst += 4-h*dstStride;\
190
   }
191
192
#define QPEL_CAVS(OPNAME, OP, MMX)\
193
static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
194
{\
195
    int h=8;\
196
    __asm__ volatile(\
197
        "pxor %%mm7, %%mm7          \n\t"\
198
        "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
199
        "1:                         \n\t"\
200
        "movq    (%0), %%mm0        \n\t"\
201
        "movq   1(%0), %%mm2        \n\t"\
202
        "movq %%mm0, %%mm1          \n\t"\
203
        "movq %%mm2, %%mm3          \n\t"\
204
        "punpcklbw %%mm7, %%mm0     \n\t"\
205
        "punpckhbw %%mm7, %%mm1     \n\t"\
206
        "punpcklbw %%mm7, %%mm2     \n\t"\
207
        "punpckhbw %%mm7, %%mm3     \n\t"\
208
        "paddw %%mm2, %%mm0         \n\t"\
209
        "paddw %%mm3, %%mm1         \n\t"\
210
        "pmullw %%mm6, %%mm0        \n\t"\
211
        "pmullw %%mm6, %%mm1        \n\t"\
212
        "movq   -1(%0), %%mm2       \n\t"\
213
        "movq    2(%0), %%mm4       \n\t"\
214
        "movq %%mm2, %%mm3          \n\t"\
215
        "movq %%mm4, %%mm5          \n\t"\
216
        "punpcklbw %%mm7, %%mm2     \n\t"\
217
        "punpckhbw %%mm7, %%mm3     \n\t"\
218
        "punpcklbw %%mm7, %%mm4     \n\t"\
219
        "punpckhbw %%mm7, %%mm5     \n\t"\
220
        "paddw %%mm4, %%mm2         \n\t"\
221
        "paddw %%mm3, %%mm5         \n\t"\
222
        "psubw %%mm2, %%mm0         \n\t"\
223
        "psubw %%mm5, %%mm1         \n\t"\
224
        "movq "MANGLE(ff_pw_4)", %%mm5\n\t"\
225
        "paddw %%mm5, %%mm0         \n\t"\
226
        "paddw %%mm5, %%mm1         \n\t"\
227
        "psraw $3, %%mm0            \n\t"\
228
        "psraw $3, %%mm1            \n\t"\
229
        "packuswb %%mm1, %%mm0      \n\t"\
230
        OP(%%mm0, (%1),%%mm5, q)         \
231
        "add %3, %0                 \n\t"\
232
        "add %4, %1                 \n\t"\
233
        "decl %2                    \n\t"\
234
        " jnz 1b                    \n\t"\
235
        : "+a"(src), "+c"(dst), "+m"(h)\
236
        : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
237
          NAMED_CONSTRAINTS_ADD(ff_pw_4,ff_pw_5)\
238
        : "memory"\
239
    );\
240
}\
241
\
242
static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)\
243
{                                                                       \
244
  QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42)      \
245
}\
246
\
247
static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)\
248
{                                                                       \
249
  QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_42)        \
250
}\
251
\
252
static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)\
253
{                                                                       \
254
  QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42)      \
255
}\
256
\
257
static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
258
{                                                                       \
259
    OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
260
}\
261
static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
262
{                                                                       \
263
    OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
264
    OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
265
}\
266
\
267
static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
268
{                                                                       \
269
    OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
270
}\
271
static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
272
{                                                                       \
273
    OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
274
    OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
275
}\
276
\
277
static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
278
{                                                                       \
279
    OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
280
}\
281
static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
282
{                                                                       \
283
    OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
284
    OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
285
}\
286
\
287
static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
288
{                                                                       \
289
    OPNAME ## cavs_qpel8_h_ ## MMX(dst  , src  , dstStride, srcStride);\
290
    OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
291
    src += 8*srcStride;\
292
    dst += 8*dstStride;\
293
    OPNAME ## cavs_qpel8_h_ ## MMX(dst  , src  , dstStride, srcStride);\
294
    OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
295
}\
296
297
#define CAVS_MC(OPNAME, SIZE, MMX) \
298
static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
299
{\
300
    OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
301
}\
302
\
303
static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
304
{\
305
    OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
306
}\
307
\
308
static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
309
{\
310
    OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
311
}\
312
\
313
static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
314
{\
315
    OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
316
}\
317
318
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "    \n\t"
319
#define AVG_3DNOW_OP(a,b,temp, size) \
320
"mov" #size " " #b ", " #temp "   \n\t"\
321
"pavgusb " #temp ", " #a "        \n\t"\
322
"mov" #size " " #a ", " #b "      \n\t"
323
#define AVG_MMXEXT_OP(a, b, temp, size) \
324
"mov" #size " " #b ", " #temp "   \n\t"\
325
"pavgb " #temp ", " #a "          \n\t"\
326
"mov" #size " " #a ", " #b "      \n\t"
327
328
#endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
329
330
#if HAVE_MMX_EXTERNAL
331
static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src,
332
                                    ptrdiff_t stride)
333
{
334
    ff_put_pixels8_mmx(dst, src, stride, 8);
335
}
336
337
static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src,
338
                                    ptrdiff_t stride)
339
{
340
    ff_avg_pixels8_mmx(dst, src, stride, 8);
341
}
342
343
static void avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, const uint8_t *src,
344
                                       ptrdiff_t stride)
345
{
346
    ff_avg_pixels8_mmxext(dst, src, stride, 8);
347
}
348
349
static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src,
350
                                     ptrdiff_t stride)
351
{
352
    ff_put_pixels16_mmx(dst, src, stride, 16);
353
}
354
355
static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src,
356
                                     ptrdiff_t stride)
357
{
358
    ff_avg_pixels16_mmx(dst, src, stride, 16);
359
}
360
361
static void avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, const uint8_t *src,
362
                                        ptrdiff_t stride)
363
{
364
    ff_avg_pixels16_mmxext(dst, src, stride, 16);
365
}
366
367
static void put_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src,
368
                                      ptrdiff_t stride)
369
{
370
    ff_put_pixels16_sse2(dst, src, stride, 16);
371
}
372
373
static void avg_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src,
374
                                      ptrdiff_t stride)
375
{
376
    ff_avg_pixels16_sse2(dst, src, stride, 16);
377
}
378
#endif
379
380
static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
381
                                     AVCodecContext *avctx)
382
{
383
#if HAVE_MMX_EXTERNAL
384
    c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
385
    c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
386
    c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
387
    c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
388
389
    c->cavs_idct8_add = cavs_idct8_add_mmx;
390
    c->idct_perm      = FF_IDCT_PERM_TRANSPOSE;
391
#endif /* HAVE_MMX_EXTERNAL */
392
}
393
394
#define DSPFUNC(PFX, IDX, NUM, EXT)                                                       \
395
    c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
396
    c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
397
    c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
398
    c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
399
400
#if HAVE_MMXEXT_INLINE
401
QPEL_CAVS(put_,        PUT_OP, mmxext)
402
QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
403
404
CAVS_MC(put_,  8, mmxext)
405
CAVS_MC(put_, 16, mmxext)
406
CAVS_MC(avg_,  8, mmxext)
407
CAVS_MC(avg_, 16, mmxext)
408
#endif /* HAVE_MMXEXT_INLINE */
409
410
#if HAVE_AMD3DNOW_INLINE
411
QPEL_CAVS(put_,       PUT_OP, 3dnow)
412
QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
413
414
CAVS_MC(put_, 8, 3dnow)
415
CAVS_MC(put_, 16,3dnow)
416
CAVS_MC(avg_, 8, 3dnow)
417
CAVS_MC(avg_, 16,3dnow)
418
419
static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
420
                                       AVCodecContext *avctx)
421
{
422
    DSPFUNC(put, 0, 16, 3dnow);
423
    DSPFUNC(put, 1,  8, 3dnow);
424
    DSPFUNC(avg, 0, 16, 3dnow);
425
    DSPFUNC(avg, 1,  8, 3dnow);
426
}
427
#endif /* HAVE_AMD3DNOW_INLINE */
428
429
4
av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
430
{
431
4
    av_unused int cpu_flags = av_get_cpu_flags();
432
433
4
    if (X86_MMX(cpu_flags))
434
        cavsdsp_init_mmx(c, avctx);
435
436
#if HAVE_AMD3DNOW_INLINE
437
4
    if (INLINE_AMD3DNOW(cpu_flags))
438
        cavsdsp_init_3dnow(c, avctx);
439
#endif /* HAVE_AMD3DNOW_INLINE */
440
#if HAVE_MMXEXT_INLINE
441
4
    if (INLINE_MMXEXT(cpu_flags)) {
442
        DSPFUNC(put, 0, 16, mmxext);
443
        DSPFUNC(put, 1,  8, mmxext);
444
        DSPFUNC(avg, 0, 16, mmxext);
445
        DSPFUNC(avg, 1,  8, mmxext);
446
    }
447
#endif
448
#if HAVE_MMX_EXTERNAL
449
4
    if (EXTERNAL_MMXEXT(cpu_flags)) {
450
        c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmxext;
451
        c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmxext;
452
    }
453
#endif
454
#if HAVE_SSE2_EXTERNAL
455
4
    if (EXTERNAL_SSE2(cpu_flags)) {
456
        c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_sse2;
457
        c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_sse2;
458
459
        c->cavs_idct8_add = cavs_idct8_add_sse2;
460
        c->idct_perm      = FF_IDCT_PERM_TRANSPOSE;
461
    }
462
#endif
463
4
}