1 |
|
|
/* |
2 |
|
|
* VC-1 and WMV3 - DSP functions MMX-optimized |
3 |
|
|
* Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr> |
4 |
|
|
* |
5 |
|
|
* Permission is hereby granted, free of charge, to any person |
6 |
|
|
* obtaining a copy of this software and associated documentation |
7 |
|
|
* files (the "Software"), to deal in the Software without |
8 |
|
|
* restriction, including without limitation the rights to use, |
9 |
|
|
* copy, modify, merge, publish, distribute, sublicense, and/or sell |
10 |
|
|
* copies of the Software, and to permit persons to whom the |
11 |
|
|
* Software is furnished to do so, subject to the following |
12 |
|
|
* conditions: |
13 |
|
|
* |
14 |
|
|
* The above copyright notice and this permission notice shall be |
15 |
|
|
* included in all copies or substantial portions of the Software. |
16 |
|
|
* |
17 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
18 |
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES |
19 |
|
|
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
20 |
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT |
21 |
|
|
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
22 |
|
|
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
23 |
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
24 |
|
|
* OTHER DEALINGS IN THE SOFTWARE. |
25 |
|
|
*/ |
26 |
|
|
|
27 |
|
|
#include "libavutil/attributes.h" |
28 |
|
|
#include "libavutil/cpu.h" |
29 |
|
|
#include "libavutil/x86/cpu.h" |
30 |
|
|
#include "libavutil/x86/asm.h" |
31 |
|
|
#include "libavcodec/vc1dsp.h" |
32 |
|
|
#include "fpel.h" |
33 |
|
|
#include "vc1dsp.h" |
34 |
|
|
#include "config.h" |
35 |
|
|
|
36 |
|
|
#define LOOP_FILTER(EXT) \ |
37 |
|
|
void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \ |
38 |
|
|
void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \ |
39 |
|
|
void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \ |
40 |
|
|
void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \ |
41 |
|
|
\ |
42 |
|
|
static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \ |
43 |
|
|
{ \ |
44 |
|
|
ff_vc1_v_loop_filter8_ ## EXT(src, stride, pq); \ |
45 |
|
|
ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \ |
46 |
|
|
} \ |
47 |
|
|
\ |
48 |
|
|
static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \ |
49 |
|
|
{ \ |
50 |
|
|
ff_vc1_h_loop_filter8_ ## EXT(src, stride, pq); \ |
51 |
|
|
ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \ |
52 |
|
|
} |
53 |
|
|
|
54 |
|
|
#if HAVE_X86ASM |
55 |
|
|
LOOP_FILTER(mmxext) |
56 |
|
|
LOOP_FILTER(sse2) |
57 |
|
|
LOOP_FILTER(ssse3) |
58 |
|
|
|
59 |
|
|
void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq); |
60 |
|
|
|
61 |
|
|
static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq) |
62 |
|
|
{ |
63 |
|
|
ff_vc1_h_loop_filter8_sse4(src, stride, pq); |
64 |
|
|
ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq); |
65 |
|
|
} |
66 |
|
|
|
67 |
|
|
#define DECLARE_FUNCTION(OP, DEPTH, INSN) \ |
68 |
|
|
static void OP##vc1_mspel_mc00_##DEPTH##INSN(uint8_t *dst, \ |
69 |
|
|
const uint8_t *src, ptrdiff_t stride, int rnd) \ |
70 |
|
|
{ \ |
71 |
|
|
ff_ ## OP ## pixels ## DEPTH ## INSN(dst, src, stride, DEPTH); \ |
72 |
|
|
} |
73 |
|
|
|
74 |
|
|
DECLARE_FUNCTION(put_, 8, _mmx) |
75 |
|
|
DECLARE_FUNCTION(put_, 16, _mmx) |
76 |
|
|
DECLARE_FUNCTION(avg_, 8, _mmx) |
77 |
|
|
DECLARE_FUNCTION(avg_, 16, _mmx) |
78 |
|
|
DECLARE_FUNCTION(avg_, 8, _mmxext) |
79 |
|
|
DECLARE_FUNCTION(avg_, 16, _mmxext) |
80 |
|
|
DECLARE_FUNCTION(put_, 16, _sse2) |
81 |
|
|
DECLARE_FUNCTION(avg_, 16, _sse2) |
82 |
|
|
|
83 |
|
|
#endif /* HAVE_X86ASM */ |
84 |
|
|
|
85 |
|
|
void ff_put_vc1_chroma_mc8_nornd_mmx (uint8_t *dst, uint8_t *src, |
86 |
|
|
ptrdiff_t stride, int h, int x, int y); |
87 |
|
|
void ff_avg_vc1_chroma_mc8_nornd_mmxext(uint8_t *dst, uint8_t *src, |
88 |
|
|
ptrdiff_t stride, int h, int x, int y); |
89 |
|
|
void ff_avg_vc1_chroma_mc8_nornd_3dnow(uint8_t *dst, uint8_t *src, |
90 |
|
|
ptrdiff_t stride, int h, int x, int y); |
91 |
|
|
void ff_put_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src, |
92 |
|
|
ptrdiff_t stride, int h, int x, int y); |
93 |
|
|
void ff_avg_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src, |
94 |
|
|
ptrdiff_t stride, int h, int x, int y); |
95 |
|
|
void ff_vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, |
96 |
|
|
int16_t *block); |
97 |
|
|
void ff_vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, |
98 |
|
|
int16_t *block); |
99 |
|
|
void ff_vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, |
100 |
|
|
int16_t *block); |
101 |
|
|
void ff_vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, |
102 |
|
|
int16_t *block); |
103 |
|
|
|
104 |
|
|
|
105 |
|
50 |
av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp) |
106 |
|
|
{ |
107 |
|
50 |
int cpu_flags = av_get_cpu_flags(); |
108 |
|
|
|
109 |
✗✓ |
50 |
if (HAVE_6REGS && INLINE_MMX(cpu_flags)) |
110 |
|
|
if (EXTERNAL_MMX(cpu_flags)) |
111 |
|
|
ff_vc1dsp_init_mmx(dsp); |
112 |
|
|
|
113 |
✗✓ |
50 |
if (HAVE_6REGS && INLINE_MMXEXT(cpu_flags)) |
114 |
|
|
if (EXTERNAL_MMXEXT(cpu_flags)) |
115 |
|
|
ff_vc1dsp_init_mmxext(dsp); |
116 |
|
|
|
117 |
|
|
#define ASSIGN_LF(EXT) \ |
118 |
|
|
dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \ |
119 |
|
|
dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT; \ |
120 |
|
|
dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \ |
121 |
|
|
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \ |
122 |
|
|
dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \ |
123 |
|
|
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT |
124 |
|
|
|
125 |
|
|
#if HAVE_X86ASM |
126 |
✗✓ |
50 |
if (EXTERNAL_MMX(cpu_flags)) { |
127 |
|
|
dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_mmx; |
128 |
|
|
|
129 |
|
|
dsp->put_vc1_mspel_pixels_tab[1][0] = put_vc1_mspel_mc00_8_mmx; |
130 |
|
|
dsp->put_vc1_mspel_pixels_tab[0][0] = put_vc1_mspel_mc00_16_mmx; |
131 |
|
|
dsp->avg_vc1_mspel_pixels_tab[1][0] = avg_vc1_mspel_mc00_8_mmx; |
132 |
|
|
dsp->avg_vc1_mspel_pixels_tab[0][0] = avg_vc1_mspel_mc00_16_mmx; |
133 |
|
|
} |
134 |
✗✓ |
50 |
if (EXTERNAL_AMD3DNOW(cpu_flags)) { |
135 |
|
|
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_3dnow; |
136 |
|
|
} |
137 |
✗✓ |
50 |
if (EXTERNAL_MMXEXT(cpu_flags)) { |
138 |
|
|
ASSIGN_LF(mmxext); |
139 |
|
|
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_mmxext; |
140 |
|
|
|
141 |
|
|
dsp->avg_vc1_mspel_pixels_tab[1][0] = avg_vc1_mspel_mc00_8_mmxext; |
142 |
|
|
dsp->avg_vc1_mspel_pixels_tab[0][0] = avg_vc1_mspel_mc00_16_mmxext; |
143 |
|
|
|
144 |
|
|
dsp->vc1_inv_trans_8x8_dc = ff_vc1_inv_trans_8x8_dc_mmxext; |
145 |
|
|
dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_mmxext; |
146 |
|
|
dsp->vc1_inv_trans_8x4_dc = ff_vc1_inv_trans_8x4_dc_mmxext; |
147 |
|
|
dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_mmxext; |
148 |
|
|
} |
149 |
✗✓ |
50 |
if (EXTERNAL_SSE2(cpu_flags)) { |
150 |
|
|
dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2; |
151 |
|
|
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2; |
152 |
|
|
dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2; |
153 |
|
|
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2; |
154 |
|
|
|
155 |
|
|
dsp->put_vc1_mspel_pixels_tab[0][0] = put_vc1_mspel_mc00_16_sse2; |
156 |
|
|
dsp->avg_vc1_mspel_pixels_tab[0][0] = avg_vc1_mspel_mc00_16_sse2; |
157 |
|
|
} |
158 |
✗✓ |
50 |
if (EXTERNAL_SSSE3(cpu_flags)) { |
159 |
|
|
ASSIGN_LF(ssse3); |
160 |
|
|
dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_ssse3; |
161 |
|
|
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_ssse3; |
162 |
|
|
} |
163 |
✗✓ |
50 |
if (EXTERNAL_SSE4(cpu_flags)) { |
164 |
|
|
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4; |
165 |
|
|
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4; |
166 |
|
|
} |
167 |
|
|
#endif /* HAVE_X86ASM */ |
168 |
|
50 |
} |