1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "libavutil/attributes.h" |
22 |
|
|
#include "libavutil/cpu.h" |
23 |
|
|
#include "libavutil/x86/asm.h" |
24 |
|
|
#include "libavutil/x86/cpu.h" |
25 |
|
|
#include "libavcodec/h264dsp.h" |
26 |
|
|
|
27 |
|
|
/***********************************/ |
28 |
|
|
/* IDCT */ |
29 |
|
|
#define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \ |
30 |
|
|
void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT(uint8_t *dst, \ |
31 |
|
|
int16_t *block, \ |
32 |
|
|
int stride); |
33 |
|
|
|
34 |
|
|
IDCT_ADD_FUNC(, 8, mmx) |
35 |
|
|
IDCT_ADD_FUNC(, 8, sse2) |
36 |
|
|
IDCT_ADD_FUNC(, 8, avx) |
37 |
|
|
IDCT_ADD_FUNC(, 10, sse2) |
38 |
|
|
IDCT_ADD_FUNC(_dc, 8, mmxext) |
39 |
|
|
IDCT_ADD_FUNC(_dc, 8, sse2) |
40 |
|
|
IDCT_ADD_FUNC(_dc, 8, avx) |
41 |
|
|
IDCT_ADD_FUNC(_dc, 10, mmxext) |
42 |
|
|
IDCT_ADD_FUNC(8_dc, 8, mmxext) |
43 |
|
|
IDCT_ADD_FUNC(8_dc, 10, sse2) |
44 |
|
|
IDCT_ADD_FUNC(8, 8, mmx) |
45 |
|
|
IDCT_ADD_FUNC(8, 8, sse2) |
46 |
|
|
IDCT_ADD_FUNC(8, 10, sse2) |
47 |
|
|
IDCT_ADD_FUNC(, 10, avx) |
48 |
|
|
IDCT_ADD_FUNC(8_dc, 10, avx) |
49 |
|
|
IDCT_ADD_FUNC(8, 10, avx) |
50 |
|
|
|
51 |
|
|
|
52 |
|
|
#define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \ |
53 |
|
|
void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \ |
54 |
|
|
(uint8_t *dst, const int *block_offset, \ |
55 |
|
|
int16_t *block, int stride, const uint8_t nnzc[6 * 8]); |
56 |
|
|
|
57 |
|
|
IDCT_ADD_REP_FUNC(8, 4, 8, mmx) |
58 |
|
|
IDCT_ADD_REP_FUNC(8, 4, 8, mmxext) |
59 |
|
|
IDCT_ADD_REP_FUNC(8, 4, 8, sse2) |
60 |
|
|
IDCT_ADD_REP_FUNC(8, 4, 10, sse2) |
61 |
|
|
IDCT_ADD_REP_FUNC(8, 4, 10, avx) |
62 |
|
|
IDCT_ADD_REP_FUNC(, 16, 8, mmx) |
63 |
|
|
IDCT_ADD_REP_FUNC(, 16, 8, mmxext) |
64 |
|
|
IDCT_ADD_REP_FUNC(, 16, 8, sse2) |
65 |
|
|
IDCT_ADD_REP_FUNC(, 16, 10, sse2) |
66 |
|
|
IDCT_ADD_REP_FUNC(, 16intra, 8, mmx) |
67 |
|
|
IDCT_ADD_REP_FUNC(, 16intra, 8, mmxext) |
68 |
|
|
IDCT_ADD_REP_FUNC(, 16intra, 8, sse2) |
69 |
|
|
IDCT_ADD_REP_FUNC(, 16intra, 10, sse2) |
70 |
|
|
IDCT_ADD_REP_FUNC(, 16, 10, avx) |
71 |
|
|
IDCT_ADD_REP_FUNC(, 16intra, 10, avx) |
72 |
|
|
|
73 |
|
|
|
74 |
|
|
#define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \ |
75 |
|
|
void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \ |
76 |
|
|
(uint8_t **dst, const int *block_offset, \ |
77 |
|
|
int16_t *block, int stride, const uint8_t nnzc[6 * 8]); |
78 |
|
|
|
79 |
|
|
IDCT_ADD_REP_FUNC2(, 8, 8, mmx) |
80 |
|
|
IDCT_ADD_REP_FUNC2(, 8, 8, mmxext) |
81 |
|
|
IDCT_ADD_REP_FUNC2(, 8, 8, sse2) |
82 |
|
|
IDCT_ADD_REP_FUNC2(, 8, 10, sse2) |
83 |
|
|
IDCT_ADD_REP_FUNC2(, 8, 10, avx) |
84 |
|
|
|
85 |
|
|
IDCT_ADD_REP_FUNC2(, 8_422, 8, mmx) |
86 |
|
|
|
87 |
|
|
IDCT_ADD_REP_FUNC2(, 8_422, 10, sse2) |
88 |
|
|
IDCT_ADD_REP_FUNC2(, 8_422, 10, avx) |
89 |
|
|
|
90 |
|
|
void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul); |
91 |
|
|
void ff_h264_luma_dc_dequant_idct_sse2(int16_t *output, int16_t *input, int qmul); |
92 |
|
|
|
93 |
|
|
/***********************************/ |
94 |
|
|
/* deblocking */ |
95 |
|
|
|
96 |
|
|
void ff_h264_loop_filter_strength_mmxext(int16_t bS[2][4][4], uint8_t nnz[40], |
97 |
|
|
int8_t ref[2][40], |
98 |
|
|
int16_t mv[2][40][2], |
99 |
|
|
int bidir, int edges, int step, |
100 |
|
|
int mask_mv0, int mask_mv1, int field); |
101 |
|
|
|
102 |
|
|
#define LF_FUNC(DIR, TYPE, DEPTH, OPT) \ |
103 |
|
|
void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \ |
104 |
|
|
ptrdiff_t stride, \ |
105 |
|
|
int alpha, \ |
106 |
|
|
int beta, \ |
107 |
|
|
int8_t *tc0); |
108 |
|
|
#define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \ |
109 |
|
|
void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \ |
110 |
|
|
ptrdiff_t stride, \ |
111 |
|
|
int alpha, \ |
112 |
|
|
int beta); |
113 |
|
|
|
114 |
|
|
#define LF_FUNCS(type, depth) \ |
115 |
|
|
LF_FUNC(h, chroma, depth, mmxext) \ |
116 |
|
|
LF_IFUNC(h, chroma_intra, depth, mmxext) \ |
117 |
|
|
LF_FUNC(h, chroma422, depth, mmxext) \ |
118 |
|
|
LF_IFUNC(h, chroma422_intra, depth, mmxext) \ |
119 |
|
|
LF_FUNC(v, chroma, depth, mmxext) \ |
120 |
|
|
LF_IFUNC(v, chroma_intra, depth, mmxext) \ |
121 |
|
|
LF_FUNC(h, luma, depth, mmxext) \ |
122 |
|
|
LF_IFUNC(h, luma_intra, depth, mmxext) \ |
123 |
|
|
LF_FUNC(h, luma, depth, sse2) \ |
124 |
|
|
LF_IFUNC(h, luma_intra, depth, sse2) \ |
125 |
|
|
LF_FUNC(v, luma, depth, sse2) \ |
126 |
|
|
LF_IFUNC(v, luma_intra, depth, sse2) \ |
127 |
|
|
LF_FUNC(h, chroma, depth, sse2) \ |
128 |
|
|
LF_IFUNC(h, chroma_intra, depth, sse2) \ |
129 |
|
|
LF_FUNC(h, chroma422, depth, sse2) \ |
130 |
|
|
LF_IFUNC(h, chroma422_intra, depth, sse2) \ |
131 |
|
|
LF_FUNC(v, chroma, depth, sse2) \ |
132 |
|
|
LF_IFUNC(v, chroma_intra, depth, sse2) \ |
133 |
|
|
LF_FUNC(h, luma, depth, avx) \ |
134 |
|
|
LF_IFUNC(h, luma_intra, depth, avx) \ |
135 |
|
|
LF_FUNC(v, luma, depth, avx) \ |
136 |
|
|
LF_IFUNC(v, luma_intra, depth, avx) \ |
137 |
|
|
LF_FUNC(h, chroma, depth, avx) \ |
138 |
|
|
LF_IFUNC(h, chroma_intra, depth, avx) \ |
139 |
|
|
LF_FUNC(h, chroma422, depth, avx) \ |
140 |
|
|
LF_IFUNC(h, chroma422_intra, depth, avx) \ |
141 |
|
|
LF_FUNC(v, chroma, depth, avx) \ |
142 |
|
|
LF_IFUNC(v, chroma_intra, depth, avx) |
143 |
|
|
|
144 |
|
|
LF_FUNC(h, luma_mbaff, 8, sse2) |
145 |
|
|
LF_FUNC(h, luma_mbaff, 8, avx) |
146 |
|
|
|
147 |
|
|
LF_FUNCS(uint8_t, 8) |
148 |
|
|
LF_FUNCS(uint16_t, 10) |
149 |
|
|
|
150 |
|
|
#if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL |
151 |
|
|
LF_FUNC(v8, luma, 8, mmxext) |
152 |
|
|
static void deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, |
153 |
|
|
int beta, int8_t *tc0) |
154 |
|
|
{ |
155 |
|
|
if ((tc0[0] & tc0[1]) >= 0) |
156 |
|
|
ff_deblock_v8_luma_8_mmxext(pix + 0, stride, alpha, beta, tc0); |
157 |
|
|
if ((tc0[2] & tc0[3]) >= 0) |
158 |
|
|
ff_deblock_v8_luma_8_mmxext(pix + 8, stride, alpha, beta, tc0 + 2); |
159 |
|
|
} |
160 |
|
|
LF_IFUNC(v8, luma_intra, 8, mmxext) |
161 |
|
|
static void deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, |
162 |
|
|
int alpha, int beta) |
163 |
|
|
{ |
164 |
|
|
ff_deblock_v8_luma_intra_8_mmxext(pix + 0, stride, alpha, beta); |
165 |
|
|
ff_deblock_v8_luma_intra_8_mmxext(pix + 8, stride, alpha, beta); |
166 |
|
|
} |
167 |
|
|
#endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */ |
168 |
|
|
|
169 |
|
|
LF_FUNC(v, luma, 10, mmxext) |
170 |
|
|
LF_IFUNC(v, luma_intra, 10, mmxext) |
171 |
|
|
|
172 |
|
|
/***********************************/ |
173 |
|
|
/* weighted prediction */ |
174 |
|
|
|
175 |
|
|
#define H264_WEIGHT(W, OPT) \ |
176 |
|
|
void ff_h264_weight_ ## W ## _ ## OPT(uint8_t *dst, ptrdiff_t stride, \ |
177 |
|
|
int height, int log2_denom, \ |
178 |
|
|
int weight, int offset); |
179 |
|
|
|
180 |
|
|
#define H264_BIWEIGHT(W, OPT) \ |
181 |
|
|
void ff_h264_biweight_ ## W ## _ ## OPT(uint8_t *dst, uint8_t *src, \ |
182 |
|
|
ptrdiff_t stride, int height, \ |
183 |
|
|
int log2_denom, int weightd, \ |
184 |
|
|
int weights, int offset); |
185 |
|
|
|
186 |
|
|
#define H264_BIWEIGHT_MMX(W) \ |
187 |
|
|
H264_WEIGHT(W, mmxext) \ |
188 |
|
|
H264_BIWEIGHT(W, mmxext) |
189 |
|
|
|
190 |
|
|
#define H264_BIWEIGHT_MMX_SSE(W) \ |
191 |
|
|
H264_BIWEIGHT_MMX(W) \ |
192 |
|
|
H264_WEIGHT(W, sse2) \ |
193 |
|
|
H264_BIWEIGHT(W, sse2) \ |
194 |
|
|
H264_BIWEIGHT(W, ssse3) |
195 |
|
|
|
196 |
|
|
H264_BIWEIGHT_MMX_SSE(16) |
197 |
|
|
H264_BIWEIGHT_MMX_SSE(8) |
198 |
|
|
H264_BIWEIGHT_MMX(4) |
199 |
|
|
|
200 |
|
|
#define H264_WEIGHT_10(W, DEPTH, OPT) \ |
201 |
|
|
void ff_h264_weight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \ |
202 |
|
|
ptrdiff_t stride, \ |
203 |
|
|
int height, \ |
204 |
|
|
int log2_denom, \ |
205 |
|
|
int weight, \ |
206 |
|
|
int offset); |
207 |
|
|
|
208 |
|
|
#define H264_BIWEIGHT_10(W, DEPTH, OPT) \ |
209 |
|
|
void ff_h264_biweight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \ |
210 |
|
|
uint8_t *src, \ |
211 |
|
|
ptrdiff_t stride, \ |
212 |
|
|
int height, \ |
213 |
|
|
int log2_denom, \ |
214 |
|
|
int weightd, \ |
215 |
|
|
int weights, \ |
216 |
|
|
int offset); |
217 |
|
|
|
218 |
|
|
#define H264_BIWEIGHT_10_SSE(W, DEPTH) \ |
219 |
|
|
H264_WEIGHT_10(W, DEPTH, sse2) \ |
220 |
|
|
H264_WEIGHT_10(W, DEPTH, sse4) \ |
221 |
|
|
H264_BIWEIGHT_10(W, DEPTH, sse2) \ |
222 |
|
|
H264_BIWEIGHT_10(W, DEPTH, sse4) |
223 |
|
|
|
224 |
|
|
H264_BIWEIGHT_10_SSE(16, 10) |
225 |
|
|
H264_BIWEIGHT_10_SSE(8, 10) |
226 |
|
|
H264_BIWEIGHT_10_SSE(4, 10) |
227 |
|
|
|
228 |
|
1068 |
av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, |
229 |
|
|
const int chroma_format_idc) |
230 |
|
|
{ |
231 |
|
|
#if HAVE_X86ASM |
232 |
|
1068 |
int cpu_flags = av_get_cpu_flags(); |
233 |
|
|
|
234 |
✓✓✓✓
|
1068 |
if (EXTERNAL_MMXEXT(cpu_flags) && chroma_format_idc <= 1) |
235 |
|
183 |
c->h264_loop_filter_strength = ff_h264_loop_filter_strength_mmxext; |
236 |
|
|
|
237 |
✓✓ |
1068 |
if (bit_depth == 8) { |
238 |
✓✓ |
870 |
if (EXTERNAL_MMX(cpu_flags)) { |
239 |
|
128 |
c->h264_idct_dc_add = |
240 |
|
128 |
c->h264_idct_add = ff_h264_idct_add_8_mmx; |
241 |
|
128 |
c->h264_idct8_dc_add = |
242 |
|
128 |
c->h264_idct8_add = ff_h264_idct8_add_8_mmx; |
243 |
|
|
|
244 |
|
128 |
c->h264_idct_add16 = ff_h264_idct_add16_8_mmx; |
245 |
|
128 |
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx; |
246 |
✓✓ |
128 |
if (chroma_format_idc <= 1) { |
247 |
|
99 |
c->h264_idct_add8 = ff_h264_idct_add8_8_mmx; |
248 |
|
|
} else { |
249 |
|
29 |
c->h264_idct_add8 = ff_h264_idct_add8_422_8_mmx; |
250 |
|
|
} |
251 |
|
128 |
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx; |
252 |
✓✗ |
128 |
if (cpu_flags & AV_CPU_FLAG_CMOV) |
253 |
|
128 |
c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_mmx; |
254 |
|
|
} |
255 |
✓✓ |
870 |
if (EXTERNAL_MMXEXT(cpu_flags)) { |
256 |
|
122 |
c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmxext; |
257 |
|
122 |
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmxext; |
258 |
|
122 |
c->h264_idct_add16 = ff_h264_idct_add16_8_mmxext; |
259 |
|
122 |
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmxext; |
260 |
✓✓ |
122 |
if (chroma_format_idc <= 1) |
261 |
|
95 |
c->h264_idct_add8 = ff_h264_idct_add8_8_mmxext; |
262 |
|
122 |
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmxext; |
263 |
|
|
|
264 |
|
122 |
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_mmxext; |
265 |
|
122 |
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_mmxext; |
266 |
✓✓ |
122 |
if (chroma_format_idc <= 1) { |
267 |
|
95 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_mmxext; |
268 |
|
95 |
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext; |
269 |
|
|
} else { |
270 |
|
27 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma422_8_mmxext; |
271 |
|
27 |
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma422_intra_8_mmxext; |
272 |
|
|
} |
273 |
|
|
#if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL |
274 |
|
|
c->h264_v_loop_filter_luma = deblock_v_luma_8_mmxext; |
275 |
|
|
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmxext; |
276 |
|
|
c->h264_v_loop_filter_luma_intra = deblock_v_luma_intra_8_mmxext; |
277 |
|
|
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext; |
278 |
|
|
#endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */ |
279 |
|
122 |
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmxext; |
280 |
|
122 |
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_mmxext; |
281 |
|
122 |
c->weight_h264_pixels_tab[2] = ff_h264_weight_4_mmxext; |
282 |
|
|
|
283 |
|
122 |
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_mmxext; |
284 |
|
122 |
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_mmxext; |
285 |
|
122 |
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_mmxext; |
286 |
|
|
} |
287 |
✓✓ |
870 |
if (EXTERNAL_SSE2(cpu_flags)) { |
288 |
|
110 |
c->h264_idct8_add = ff_h264_idct8_add_8_sse2; |
289 |
|
|
|
290 |
|
110 |
c->h264_idct_add16 = ff_h264_idct_add16_8_sse2; |
291 |
|
110 |
c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2; |
292 |
✓✓ |
110 |
if (chroma_format_idc <= 1) |
293 |
|
87 |
c->h264_idct_add8 = ff_h264_idct_add8_8_sse2; |
294 |
|
110 |
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2; |
295 |
|
110 |
c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_sse2; |
296 |
|
|
|
297 |
|
110 |
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_sse2; |
298 |
|
110 |
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_sse2; |
299 |
|
|
|
300 |
|
110 |
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_sse2; |
301 |
|
110 |
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_sse2; |
302 |
|
|
|
303 |
|
110 |
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2; |
304 |
|
110 |
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2; |
305 |
|
110 |
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2; |
306 |
|
110 |
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2; |
307 |
|
|
|
308 |
|
|
#if ARCH_X86_64 |
309 |
|
110 |
c->h264_h_loop_filter_luma_mbaff = ff_deblock_h_luma_mbaff_8_sse2; |
310 |
|
|
#endif |
311 |
|
|
|
312 |
|
110 |
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_sse2; |
313 |
|
110 |
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_sse2; |
314 |
✓✓ |
110 |
if (chroma_format_idc <= 1) { |
315 |
|
87 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_sse2; |
316 |
|
87 |
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_sse2; |
317 |
|
|
} else { |
318 |
|
23 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma422_8_sse2; |
319 |
|
23 |
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma422_intra_8_sse2; |
320 |
|
|
} |
321 |
|
|
|
322 |
|
110 |
c->h264_idct_add = ff_h264_idct_add_8_sse2; |
323 |
|
110 |
c->h264_idct_dc_add = ff_h264_idct_dc_add_8_sse2; |
324 |
|
|
} |
325 |
✓✓ |
870 |
if (EXTERNAL_SSSE3(cpu_flags)) { |
326 |
|
98 |
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_ssse3; |
327 |
|
98 |
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_ssse3; |
328 |
|
|
} |
329 |
✓✓ |
870 |
if (EXTERNAL_AVX(cpu_flags)) { |
330 |
|
74 |
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx; |
331 |
|
74 |
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx; |
332 |
|
74 |
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx; |
333 |
|
74 |
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx; |
334 |
|
|
#if ARCH_X86_64 |
335 |
|
74 |
c->h264_h_loop_filter_luma_mbaff = ff_deblock_h_luma_mbaff_8_avx; |
336 |
|
|
#endif |
337 |
|
|
|
338 |
|
74 |
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_avx; |
339 |
|
74 |
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_avx; |
340 |
✓✓ |
74 |
if (chroma_format_idc <= 1) { |
341 |
|
63 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_avx; |
342 |
|
63 |
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_avx; |
343 |
|
|
} else { |
344 |
|
11 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma422_8_avx; |
345 |
|
11 |
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma422_intra_8_avx; |
346 |
|
|
} |
347 |
|
|
|
348 |
|
74 |
c->h264_idct_add = ff_h264_idct_add_8_avx; |
349 |
|
74 |
c->h264_idct_dc_add = ff_h264_idct_dc_add_8_avx; |
350 |
|
|
} |
351 |
✓✓ |
198 |
} else if (bit_depth == 10) { |
352 |
✓✓ |
115 |
if (EXTERNAL_MMXEXT(cpu_flags)) { |
353 |
|
|
#if ARCH_X86_32 |
354 |
|
|
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_mmxext; |
355 |
|
|
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_mmxext; |
356 |
|
|
if (chroma_format_idc <= 1) { |
357 |
|
|
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_10_mmxext; |
358 |
|
|
} else { |
359 |
|
|
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma422_10_mmxext; |
360 |
|
|
} |
361 |
|
|
c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_mmxext; |
362 |
|
|
c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_mmxext; |
363 |
|
|
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext; |
364 |
|
|
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext; |
365 |
|
|
#endif /* ARCH_X86_32 */ |
366 |
|
66 |
c->h264_idct_dc_add = ff_h264_idct_dc_add_10_mmxext; |
367 |
|
|
} |
368 |
✓✓ |
115 |
if (EXTERNAL_SSE2(cpu_flags)) { |
369 |
|
54 |
c->h264_idct_add = ff_h264_idct_add_10_sse2; |
370 |
|
54 |
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2; |
371 |
|
|
|
372 |
|
54 |
c->h264_idct_add16 = ff_h264_idct_add16_10_sse2; |
373 |
✓✓ |
54 |
if (chroma_format_idc <= 1) { |
374 |
|
36 |
c->h264_idct_add8 = ff_h264_idct_add8_10_sse2; |
375 |
|
|
} else { |
376 |
|
18 |
c->h264_idct_add8 = ff_h264_idct_add8_422_10_sse2; |
377 |
|
|
} |
378 |
|
54 |
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2; |
379 |
|
|
#if HAVE_ALIGNED_STACK |
380 |
|
54 |
c->h264_idct8_add = ff_h264_idct8_add_10_sse2; |
381 |
|
54 |
c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2; |
382 |
|
|
#endif /* HAVE_ALIGNED_STACK */ |
383 |
|
|
|
384 |
|
54 |
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2; |
385 |
|
54 |
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2; |
386 |
|
54 |
c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2; |
387 |
|
|
|
388 |
|
54 |
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2; |
389 |
|
54 |
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2; |
390 |
|
54 |
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2; |
391 |
|
|
|
392 |
|
54 |
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_sse2; |
393 |
|
54 |
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_sse2; |
394 |
✓✓ |
54 |
if (chroma_format_idc <= 1) { |
395 |
|
36 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_10_sse2; |
396 |
|
|
} else { |
397 |
|
18 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma422_10_sse2; |
398 |
|
|
} |
399 |
|
|
#if HAVE_ALIGNED_STACK |
400 |
|
54 |
c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2; |
401 |
|
54 |
c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2; |
402 |
|
54 |
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2; |
403 |
|
54 |
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2; |
404 |
|
|
#endif /* HAVE_ALIGNED_STACK */ |
405 |
|
|
} |
406 |
✓✓ |
115 |
if (EXTERNAL_SSE4(cpu_flags)) { |
407 |
|
36 |
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4; |
408 |
|
36 |
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4; |
409 |
|
36 |
c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4; |
410 |
|
|
|
411 |
|
36 |
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4; |
412 |
|
36 |
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4; |
413 |
|
36 |
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4; |
414 |
|
|
} |
415 |
✓✓ |
115 |
if (EXTERNAL_AVX(cpu_flags)) { |
416 |
|
18 |
c->h264_idct_dc_add = |
417 |
|
18 |
c->h264_idct_add = ff_h264_idct_add_10_avx; |
418 |
|
18 |
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx; |
419 |
|
|
|
420 |
|
18 |
c->h264_idct_add16 = ff_h264_idct_add16_10_avx; |
421 |
✓✓ |
18 |
if (chroma_format_idc <= 1) { |
422 |
|
12 |
c->h264_idct_add8 = ff_h264_idct_add8_10_avx; |
423 |
|
|
} else { |
424 |
|
6 |
c->h264_idct_add8 = ff_h264_idct_add8_422_10_avx; |
425 |
|
|
} |
426 |
|
18 |
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx; |
427 |
|
|
#if HAVE_ALIGNED_STACK |
428 |
|
18 |
c->h264_idct8_add = ff_h264_idct8_add_10_avx; |
429 |
|
18 |
c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx; |
430 |
|
|
#endif /* HAVE_ALIGNED_STACK */ |
431 |
|
|
|
432 |
|
18 |
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_avx; |
433 |
|
18 |
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_avx; |
434 |
✓✓ |
18 |
if (chroma_format_idc <= 1) { |
435 |
|
12 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_10_avx; |
436 |
|
|
} else { |
437 |
|
6 |
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma422_10_avx; |
438 |
|
|
} |
439 |
|
|
#if HAVE_ALIGNED_STACK |
440 |
|
18 |
c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx; |
441 |
|
18 |
c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx; |
442 |
|
18 |
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx; |
443 |
|
18 |
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx; |
444 |
|
|
#endif /* HAVE_ALIGNED_STACK */ |
445 |
|
|
} |
446 |
|
|
} |
447 |
|
|
#endif |
448 |
|
1068 |
} |