GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavcodec/x86/mpegvideoenc.c Lines: 4 29 13.8 %
Date: 2021-01-22 05:18:52 Branches: 2 12 16.7 %

Line Branch Exec Source
1
/*
2
 * The simplest mpeg encoder (well, it was the simplest!)
3
 * Copyright (c) 2000,2001 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21
22
#include "libavutil/attributes.h"
23
#include "libavutil/cpu.h"
24
#include "libavutil/mem_internal.h"
25
#include "libavutil/x86/asm.h"
26
#include "libavutil/x86/cpu.h"
27
#include "libavcodec/avcodec.h"
28
#include "libavcodec/dct.h"
29
#include "libavcodec/mpegvideo.h"
30
31
/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
32
DECLARE_ALIGNED(16, static const uint16_t, inv_zigzag_direct16)[64] = {
33
    1,  2,  6,  7,  15, 16, 28, 29,
34
    3,  5,  8,  14, 17, 27, 30, 43,
35
    4,  9,  13, 18, 26, 31, 42, 44,
36
    10, 12, 19, 25, 32, 41, 45, 54,
37
    11, 20, 24, 33, 40, 46, 53, 55,
38
    21, 23, 34, 39, 47, 52, 56, 61,
39
    22, 35, 38, 48, 51, 57, 60, 62,
40
    36, 37, 49, 50, 58, 59, 63, 64,
41
};
42
43
#if HAVE_6REGS
44
45
#if HAVE_MMX_INLINE
46
#define COMPILE_TEMPLATE_MMXEXT 0
47
#define COMPILE_TEMPLATE_SSE2   0
48
#define COMPILE_TEMPLATE_SSSE3  0
49
#define RENAME(a)      a ## _mmx
50
#define RENAME_FDCT(a) a ## _mmx
51
#include "mpegvideoenc_template.c"
52
#endif /* HAVE_MMX_INLINE */
53
54
#if HAVE_MMXEXT_INLINE
55
#undef COMPILE_TEMPLATE_SSSE3
56
#undef COMPILE_TEMPLATE_SSE2
57
#undef COMPILE_TEMPLATE_MMXEXT
58
#define COMPILE_TEMPLATE_MMXEXT 1
59
#define COMPILE_TEMPLATE_SSE2   0
60
#define COMPILE_TEMPLATE_SSSE3  0
61
#undef RENAME
62
#undef RENAME_FDCT
63
#define RENAME(a)      a ## _mmxext
64
#define RENAME_FDCT(a) a ## _mmxext
65
#include "mpegvideoenc_template.c"
66
#endif /* HAVE_MMXEXT_INLINE */
67
68
#if HAVE_SSE2_INLINE
69
#undef COMPILE_TEMPLATE_MMXEXT
70
#undef COMPILE_TEMPLATE_SSE2
71
#undef COMPILE_TEMPLATE_SSSE3
72
#define COMPILE_TEMPLATE_MMXEXT 0
73
#define COMPILE_TEMPLATE_SSE2   1
74
#define COMPILE_TEMPLATE_SSSE3  0
75
#undef RENAME
76
#undef RENAME_FDCT
77
#define RENAME(a)      a ## _sse2
78
#define RENAME_FDCT(a) a ## _sse2
79
#include "mpegvideoenc_template.c"
80
#endif /* HAVE_SSE2_INLINE */
81
82
#if HAVE_SSSE3_INLINE
83
#undef COMPILE_TEMPLATE_MMXEXT
84
#undef COMPILE_TEMPLATE_SSE2
85
#undef COMPILE_TEMPLATE_SSSE3
86
#define COMPILE_TEMPLATE_MMXEXT 0
87
#define COMPILE_TEMPLATE_SSE2   1
88
#define COMPILE_TEMPLATE_SSSE3  1
89
#undef RENAME
90
#undef RENAME_FDCT
91
#define RENAME(a)      a ## _ssse3
92
#define RENAME_FDCT(a) a ## _sse2
93
#include "mpegvideoenc_template.c"
94
#endif /* HAVE_SSSE3_INLINE */
95
96
#endif /* HAVE_6REGS */
97
98
#if HAVE_INLINE_ASM
99
#if HAVE_MMX_INLINE
100
static void  denoise_dct_mmx(MpegEncContext *s, int16_t *block){
101
    const int intra= s->mb_intra;
102
    int *sum= s->dct_error_sum[intra];
103
    uint16_t *offset= s->dct_offset[intra];
104
105
    s->dct_count[intra]++;
106
107
    __asm__ volatile(
108
        "pxor %%mm7, %%mm7                      \n\t"
109
        "1:                                     \n\t"
110
        "pxor %%mm0, %%mm0                      \n\t"
111
        "pxor %%mm1, %%mm1                      \n\t"
112
        "movq (%0), %%mm2                       \n\t"
113
        "movq 8(%0), %%mm3                      \n\t"
114
        "pcmpgtw %%mm2, %%mm0                   \n\t"
115
        "pcmpgtw %%mm3, %%mm1                   \n\t"
116
        "pxor %%mm0, %%mm2                      \n\t"
117
        "pxor %%mm1, %%mm3                      \n\t"
118
        "psubw %%mm0, %%mm2                     \n\t"
119
        "psubw %%mm1, %%mm3                     \n\t"
120
        "movq %%mm2, %%mm4                      \n\t"
121
        "movq %%mm3, %%mm5                      \n\t"
122
        "psubusw (%2), %%mm2                    \n\t"
123
        "psubusw 8(%2), %%mm3                   \n\t"
124
        "pxor %%mm0, %%mm2                      \n\t"
125
        "pxor %%mm1, %%mm3                      \n\t"
126
        "psubw %%mm0, %%mm2                     \n\t"
127
        "psubw %%mm1, %%mm3                     \n\t"
128
        "movq %%mm2, (%0)                       \n\t"
129
        "movq %%mm3, 8(%0)                      \n\t"
130
        "movq %%mm4, %%mm2                      \n\t"
131
        "movq %%mm5, %%mm3                      \n\t"
132
        "punpcklwd %%mm7, %%mm4                 \n\t"
133
        "punpckhwd %%mm7, %%mm2                 \n\t"
134
        "punpcklwd %%mm7, %%mm5                 \n\t"
135
        "punpckhwd %%mm7, %%mm3                 \n\t"
136
        "paddd (%1), %%mm4                      \n\t"
137
        "paddd 8(%1), %%mm2                     \n\t"
138
        "paddd 16(%1), %%mm5                    \n\t"
139
        "paddd 24(%1), %%mm3                    \n\t"
140
        "movq %%mm4, (%1)                       \n\t"
141
        "movq %%mm2, 8(%1)                      \n\t"
142
        "movq %%mm5, 16(%1)                     \n\t"
143
        "movq %%mm3, 24(%1)                     \n\t"
144
        "add $16, %0                            \n\t"
145
        "add $32, %1                            \n\t"
146
        "add $16, %2                            \n\t"
147
        "cmp %3, %0                             \n\t"
148
            " jb 1b                             \n\t"
149
        : "+r" (block), "+r" (sum), "+r" (offset)
150
        : "r"(block+64)
151
    );
152
}
153
#endif /* HAVE_MMX_INLINE */
154
155
#if HAVE_SSE2_INLINE
156
static void  denoise_dct_sse2(MpegEncContext *s, int16_t *block){
157
    const int intra= s->mb_intra;
158
    int *sum= s->dct_error_sum[intra];
159
    uint16_t *offset= s->dct_offset[intra];
160
161
    s->dct_count[intra]++;
162
163
    __asm__ volatile(
164
        "pxor %%xmm7, %%xmm7                    \n\t"
165
        "1:                                     \n\t"
166
        "pxor %%xmm0, %%xmm0                    \n\t"
167
        "pxor %%xmm1, %%xmm1                    \n\t"
168
        "movdqa (%0), %%xmm2                    \n\t"
169
        "movdqa 16(%0), %%xmm3                  \n\t"
170
        "pcmpgtw %%xmm2, %%xmm0                 \n\t"
171
        "pcmpgtw %%xmm3, %%xmm1                 \n\t"
172
        "pxor %%xmm0, %%xmm2                    \n\t"
173
        "pxor %%xmm1, %%xmm3                    \n\t"
174
        "psubw %%xmm0, %%xmm2                   \n\t"
175
        "psubw %%xmm1, %%xmm3                   \n\t"
176
        "movdqa %%xmm2, %%xmm4                  \n\t"
177
        "movdqa %%xmm3, %%xmm5                  \n\t"
178
        "psubusw (%2), %%xmm2                   \n\t"
179
        "psubusw 16(%2), %%xmm3                 \n\t"
180
        "pxor %%xmm0, %%xmm2                    \n\t"
181
        "pxor %%xmm1, %%xmm3                    \n\t"
182
        "psubw %%xmm0, %%xmm2                   \n\t"
183
        "psubw %%xmm1, %%xmm3                   \n\t"
184
        "movdqa %%xmm2, (%0)                    \n\t"
185
        "movdqa %%xmm3, 16(%0)                  \n\t"
186
        "movdqa %%xmm4, %%xmm6                  \n\t"
187
        "movdqa %%xmm5, %%xmm0                  \n\t"
188
        "punpcklwd %%xmm7, %%xmm4               \n\t"
189
        "punpckhwd %%xmm7, %%xmm6               \n\t"
190
        "punpcklwd %%xmm7, %%xmm5               \n\t"
191
        "punpckhwd %%xmm7, %%xmm0               \n\t"
192
        "paddd (%1), %%xmm4                     \n\t"
193
        "paddd 16(%1), %%xmm6                   \n\t"
194
        "paddd 32(%1), %%xmm5                   \n\t"
195
        "paddd 48(%1), %%xmm0                   \n\t"
196
        "movdqa %%xmm4, (%1)                    \n\t"
197
        "movdqa %%xmm6, 16(%1)                  \n\t"
198
        "movdqa %%xmm5, 32(%1)                  \n\t"
199
        "movdqa %%xmm0, 48(%1)                  \n\t"
200
        "add $32, %0                            \n\t"
201
        "add $64, %1                            \n\t"
202
        "add $32, %2                            \n\t"
203
        "cmp %3, %0                             \n\t"
204
            " jb 1b                             \n\t"
205
        : "+r" (block), "+r" (sum), "+r" (offset)
206
        : "r"(block+64)
207
          XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
208
                            "%xmm4", "%xmm5", "%xmm6", "%xmm7")
209
    );
210
}
211
#endif /* HAVE_SSE2_INLINE */
212
#endif /* HAVE_INLINE_ASM */
213
214
255
av_cold void ff_dct_encode_init_x86(MpegEncContext *s)
215
{
216
255
    const int dct_algo = s->avctx->dct_algo;
217
218

255
    if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX) {
219
#if HAVE_MMX_INLINE
220
        int cpu_flags = av_get_cpu_flags();
221
        if (INLINE_MMX(cpu_flags)) {
222
#if HAVE_6REGS
223
            s->dct_quantize = dct_quantize_mmx;
224
#endif
225
            s->denoise_dct  = denoise_dct_mmx;
226
        }
227
#endif
228
#if HAVE_6REGS && HAVE_MMXEXT_INLINE
229
        if (INLINE_MMXEXT(cpu_flags))
230
            s->dct_quantize = dct_quantize_mmxext;
231
#endif
232
#if HAVE_SSE2_INLINE
233
        if (INLINE_SSE2(cpu_flags)) {
234
#if HAVE_6REGS
235
            s->dct_quantize = dct_quantize_sse2;
236
#endif
237
            s->denoise_dct  = denoise_dct_sse2;
238
        }
239
#endif
240
#if HAVE_6REGS && HAVE_SSSE3_INLINE
241
        if (INLINE_SSSE3(cpu_flags))
242
            s->dct_quantize = dct_quantize_ssse3;
243
#endif
244
    }
245
255
}