Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2013 Vittorio Giovara | ||
3 | * | ||
4 | * This file is part of FFmpeg. | ||
5 | * | ||
6 | * FFmpeg is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Lesser General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2.1 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * FFmpeg is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Lesser General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Lesser General Public | ||
17 | * License along with FFmpeg; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | */ | ||
20 | |||
21 | /** | ||
22 | * @file | ||
23 | * Generate a frame packed video, by combining two views in a single surface. | ||
24 | */ | ||
25 | |||
26 | #include <string.h> | ||
27 | |||
28 | #include "libavutil/common.h" | ||
29 | #include "libavutil/imgutils.h" | ||
30 | #include "libavutil/opt.h" | ||
31 | #include "libavutil/pixdesc.h" | ||
32 | #include "libavutil/rational.h" | ||
33 | #include "libavutil/stereo3d.h" | ||
34 | |||
35 | #include "avfilter.h" | ||
36 | #include "filters.h" | ||
37 | #include "video.h" | ||
38 | |||
39 | #define LEFT 0 | ||
40 | #define RIGHT 1 | ||
41 | |||
42 | typedef struct FramepackContext { | ||
43 | const AVClass *class; | ||
44 | |||
45 | int depth; | ||
46 | const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format | ||
47 | |||
48 | enum AVStereo3DType format; ///< frame pack type output | ||
49 | |||
50 | AVFrame *input_views[2]; ///< input frames | ||
51 | } FramepackContext; | ||
52 | |||
53 | static const enum AVPixelFormat formats_supported[] = { | ||
54 | AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, | ||
55 | AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, | ||
56 | AV_PIX_FMT_GRAY16, | ||
57 | AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, | ||
58 | AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, | ||
59 | AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, | ||
60 | AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, | ||
61 | AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P, | ||
62 | AV_PIX_FMT_YUVJ411P, | ||
63 | AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, | ||
64 | AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, | ||
65 | AV_PIX_FMT_YUV440P10, | ||
66 | AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, | ||
67 | AV_PIX_FMT_YUV440P12, | ||
68 | AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14, | ||
69 | AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, | ||
70 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, | ||
71 | AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16, | ||
72 | AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, | ||
73 | AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16, | ||
74 | AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16, | ||
75 | AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16, | ||
76 | AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16, | ||
77 | AV_PIX_FMT_NONE | ||
78 | }; | ||
79 | |||
80 | 10 | static av_cold void framepack_uninit(AVFilterContext *ctx) | |
81 | { | ||
82 | 10 | FramepackContext *s = ctx->priv; | |
83 | |||
84 | // clean any leftover frame | ||
85 | 10 | av_frame_free(&s->input_views[LEFT]); | |
86 | 10 | av_frame_free(&s->input_views[RIGHT]); | |
87 | 10 | } | |
88 | |||
89 | 5 | static int config_output(AVFilterLink *outlink) | |
90 | { | ||
91 | 5 | AVFilterContext *ctx = outlink->src; | |
92 | 5 | FramepackContext *s = outlink->src->priv; | |
93 | 5 | FilterLink *leftl = ff_filter_link(ctx->inputs[LEFT]); | |
94 | 5 | FilterLink *rightl = ff_filter_link(ctx->inputs[RIGHT]); | |
95 | 5 | FilterLink *ol = ff_filter_link(outlink); | |
96 | |||
97 | 5 | int width = ctx->inputs[LEFT]->w; | |
98 | 5 | int height = ctx->inputs[LEFT]->h; | |
99 | 5 | AVRational time_base = ctx->inputs[LEFT]->time_base; | |
100 | 5 | AVRational frame_rate = leftl->frame_rate; | |
101 | |||
102 | // check size and fps match on the other input | ||
103 |
1/2✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
|
5 | if (width != ctx->inputs[RIGHT]->w || |
104 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | height != ctx->inputs[RIGHT]->h) { |
105 | ✗ | av_log(ctx, AV_LOG_ERROR, | |
106 | "Left and right sizes differ (%dx%d vs %dx%d).\n", | ||
107 | width, height, | ||
108 | ✗ | ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h); | |
109 | ✗ | return AVERROR_INVALIDDATA; | |
110 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
|
5 | } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) { |
111 | ✗ | av_log(ctx, AV_LOG_ERROR, | |
112 | "Left and right time bases differ (%d/%d vs %d/%d).\n", | ||
113 | time_base.num, time_base.den, | ||
114 | ✗ | ctx->inputs[RIGHT]->time_base.num, | |
115 | ✗ | ctx->inputs[RIGHT]->time_base.den); | |
116 | ✗ | return AVERROR_INVALIDDATA; | |
117 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
|
5 | } else if (av_cmp_q(frame_rate, rightl->frame_rate) != 0) { |
118 | ✗ | av_log(ctx, AV_LOG_ERROR, | |
119 | "Left and right framerates differ (%d/%d vs %d/%d).\n", | ||
120 | frame_rate.num, frame_rate.den, | ||
121 | rightl->frame_rate.num, | ||
122 | rightl->frame_rate.den); | ||
123 | ✗ | return AVERROR_INVALIDDATA; | |
124 | } | ||
125 | |||
126 | 5 | s->pix_desc = av_pix_fmt_desc_get(outlink->format); | |
127 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | if (!s->pix_desc) |
128 | ✗ | return AVERROR_BUG; | |
129 | 5 | s->depth = s->pix_desc->comp[0].depth; | |
130 | |||
131 | // modify output properties as needed | ||
132 |
3/4✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 2 times.
✗ Branch 3 not taken.
|
5 | switch (s->format) { |
133 | 1 | case AV_STEREO3D_FRAMESEQUENCE: | |
134 | 1 | time_base.den *= 2; | |
135 | 1 | frame_rate.num *= 2; | |
136 | 1 | break; | |
137 | 2 | case AV_STEREO3D_COLUMNS: | |
138 | case AV_STEREO3D_SIDEBYSIDE: | ||
139 | 2 | width *= 2; | |
140 | 2 | break; | |
141 | 2 | case AV_STEREO3D_LINES: | |
142 | case AV_STEREO3D_TOPBOTTOM: | ||
143 | 2 | height *= 2; | |
144 | 2 | break; | |
145 | ✗ | default: | |
146 | ✗ | av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.\n"); | |
147 | ✗ | return AVERROR_INVALIDDATA; | |
148 | } | ||
149 | |||
150 | 5 | outlink->w = width; | |
151 | 5 | outlink->h = height; | |
152 | 5 | outlink->time_base = time_base; | |
153 | 5 | ol->frame_rate = frame_rate; | |
154 | |||
155 | 5 | return 0; | |
156 | } | ||
157 | |||
158 | 30 | static void horizontal_frame_pack(AVFilterLink *outlink, | |
159 | AVFrame *out, | ||
160 | int interleaved) | ||
161 | { | ||
162 | 30 | AVFilterContext *ctx = outlink->src; | |
163 | 30 | FramepackContext *s = ctx->priv; | |
164 | int i, plane; | ||
165 | |||
166 |
3/4✓ Branch 0 taken 15 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✗ Branch 3 not taken.
|
45 | if (interleaved && s->depth <= 8) { |
167 | 15 | const uint8_t *leftp = s->input_views[LEFT]->data[0]; | |
168 | 15 | const uint8_t *rightp = s->input_views[RIGHT]->data[0]; | |
169 | 15 | uint8_t *dstp = out->data[0]; | |
170 | 15 | int length = out->width / 2; | |
171 | 15 | int lines = out->height; | |
172 | |||
173 |
2/2✓ Branch 0 taken 45 times.
✓ Branch 1 taken 15 times.
|
60 | for (plane = 0; plane < s->pix_desc->nb_components; plane++) { |
174 |
4/4✓ Branch 0 taken 30 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 15 times.
|
45 | if (plane == 1 || plane == 2) { |
175 | 30 | length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w); | |
176 | 30 | lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h); | |
177 | } | ||
178 |
2/2✓ Branch 0 taken 8640 times.
✓ Branch 1 taken 45 times.
|
8685 | for (i = 0; i < lines; i++) { |
179 | int j; | ||
180 | 8640 | leftp = s->input_views[LEFT]->data[plane] + | |
181 | 8640 | s->input_views[LEFT]->linesize[plane] * i; | |
182 | 8640 | rightp = s->input_views[RIGHT]->data[plane] + | |
183 | 8640 | s->input_views[RIGHT]->linesize[plane] * i; | |
184 | 8640 | dstp = out->data[plane] + out->linesize[plane] * i; | |
185 |
2/2✓ Branch 0 taken 2280960 times.
✓ Branch 1 taken 8640 times.
|
2289600 | for (j = 0; j < length; j++) { |
186 | // interpolate chroma as necessary | ||
187 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2280960 times.
|
2280960 | if ((s->pix_desc->log2_chroma_w || |
188 |
2/4✗ Branch 0 not taken.
✗ Branch 1 not taken.
✓ Branch 2 taken 1900800 times.
✓ Branch 3 taken 380160 times.
|
2280960 | s->pix_desc->log2_chroma_h) && |
189 |
2/2✓ Branch 0 taken 380160 times.
✓ Branch 1 taken 1520640 times.
|
1900800 | (plane == 1 || plane == 2)) { |
190 | 760320 | *dstp++ = (*leftp + *rightp) / 2; | |
191 | 760320 | *dstp++ = (*leftp + *rightp) / 2; | |
192 | } else { | ||
193 | 1520640 | *dstp++ = *leftp; | |
194 | 1520640 | *dstp++ = *rightp; | |
195 | } | ||
196 | 2280960 | leftp += 1; | |
197 | 2280960 | rightp += 1; | |
198 | } | ||
199 | } | ||
200 | } | ||
201 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 15 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
15 | } else if (interleaved && s->depth > 8) { |
202 | ✗ | const uint16_t *leftp = (const uint16_t *)s->input_views[LEFT]->data[0]; | |
203 | ✗ | const uint16_t *rightp = (const uint16_t *)s->input_views[RIGHT]->data[0]; | |
204 | ✗ | uint16_t *dstp = (uint16_t *)out->data[0]; | |
205 | ✗ | int length = out->width / 2; | |
206 | ✗ | int lines = out->height; | |
207 | |||
208 | ✗ | for (plane = 0; plane < s->pix_desc->nb_components; plane++) { | |
209 | ✗ | if (plane == 1 || plane == 2) { | |
210 | ✗ | length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w); | |
211 | ✗ | lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h); | |
212 | } | ||
213 | ✗ | for (i = 0; i < lines; i++) { | |
214 | int j; | ||
215 | ✗ | leftp = (const uint16_t *)s->input_views[LEFT]->data[plane] + | |
216 | ✗ | s->input_views[LEFT]->linesize[plane] * i / 2; | |
217 | ✗ | rightp = (const uint16_t *)s->input_views[RIGHT]->data[plane] + | |
218 | ✗ | s->input_views[RIGHT]->linesize[plane] * i / 2; | |
219 | ✗ | dstp = (uint16_t *)out->data[plane] + out->linesize[plane] * i / 2; | |
220 | ✗ | for (j = 0; j < length; j++) { | |
221 | // interpolate chroma as necessary | ||
222 | ✗ | if ((s->pix_desc->log2_chroma_w || | |
223 | ✗ | s->pix_desc->log2_chroma_h) && | |
224 | ✗ | (plane == 1 || plane == 2)) { | |
225 | ✗ | *dstp++ = (*leftp + *rightp) / 2; | |
226 | ✗ | *dstp++ = (*leftp + *rightp) / 2; | |
227 | } else { | ||
228 | ✗ | *dstp++ = *leftp; | |
229 | ✗ | *dstp++ = *rightp; | |
230 | } | ||
231 | ✗ | leftp += 1; | |
232 | ✗ | rightp += 1; | |
233 | } | ||
234 | } | ||
235 | } | ||
236 | } else { | ||
237 |
2/2✓ Branch 0 taken 30 times.
✓ Branch 1 taken 15 times.
|
45 | for (i = 0; i < 2; i++) { |
238 | 30 | const AVFrame *const input_view = s->input_views[i]; | |
239 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 30 times.
|
30 | const int psize = 1 + (s->depth > 8); |
240 | uint8_t *dst[4]; | ||
241 | 30 | int sub_w = psize * input_view->width >> s->pix_desc->log2_chroma_w; | |
242 | |||
243 | 30 | dst[0] = out->data[0] + i * input_view->width * psize; | |
244 | 30 | dst[1] = out->data[1] + i * sub_w; | |
245 | 30 | dst[2] = out->data[2] + i * sub_w; | |
246 | |||
247 | 30 | av_image_copy2(dst, out->linesize, | |
248 | 30 | input_view->data, input_view->linesize, | |
249 | 30 | input_view->format, | |
250 | 30 | input_view->width, | |
251 | 30 | input_view->height); | |
252 | } | ||
253 | } | ||
254 | 30 | } | |
255 | |||
256 | 30 | static void vertical_frame_pack(AVFilterLink *outlink, | |
257 | AVFrame *out, | ||
258 | int interleaved) | ||
259 | { | ||
260 | 30 | AVFilterContext *ctx = outlink->src; | |
261 | 30 | FramepackContext *s = ctx->priv; | |
262 | int i; | ||
263 | |||
264 |
2/2✓ Branch 0 taken 60 times.
✓ Branch 1 taken 30 times.
|
90 | for (i = 0; i < 2; i++) { |
265 | 60 | const AVFrame *const input_view = s->input_views[i]; | |
266 | uint8_t *dst[4]; | ||
267 | int linesizes[4]; | ||
268 | 60 | int sub_h = input_view->height >> s->pix_desc->log2_chroma_h; | |
269 | |||
270 | 60 | dst[0] = out->data[0] + i * out->linesize[0] * | |
271 | 60 | (interleaved + input_view->height * (1 - interleaved)); | |
272 | 60 | dst[1] = out->data[1] + i * out->linesize[1] * | |
273 | 60 | (interleaved + sub_h * (1 - interleaved)); | |
274 | 60 | dst[2] = out->data[2] + i * out->linesize[2] * | |
275 | 60 | (interleaved + sub_h * (1 - interleaved)); | |
276 | |||
277 | 60 | linesizes[0] = out->linesize[0] + | |
278 | 60 | interleaved * out->linesize[0]; | |
279 | 60 | linesizes[1] = out->linesize[1] + | |
280 | 60 | interleaved * out->linesize[1]; | |
281 | 60 | linesizes[2] = out->linesize[2] + | |
282 | 60 | interleaved * out->linesize[2]; | |
283 | |||
284 | 60 | av_image_copy2(dst, linesizes, | |
285 | 60 | input_view->data, input_view->linesize, | |
286 | 60 | input_view->format, | |
287 | 60 | input_view->width, | |
288 | 60 | input_view->height); | |
289 | } | ||
290 | 30 | } | |
291 | |||
292 | 60 | static av_always_inline void spatial_frame_pack(AVFilterLink *outlink, | |
293 | AVFrame *dst) | ||
294 | { | ||
295 | 60 | AVFilterContext *ctx = outlink->src; | |
296 | 60 | FramepackContext *s = ctx->priv; | |
297 |
4/5✓ Branch 0 taken 15 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 15 times.
✗ Branch 4 not taken.
|
60 | switch (s->format) { |
298 | 15 | case AV_STEREO3D_SIDEBYSIDE: | |
299 | 15 | horizontal_frame_pack(outlink, dst, 0); | |
300 | 15 | break; | |
301 | 15 | case AV_STEREO3D_COLUMNS: | |
302 | 15 | horizontal_frame_pack(outlink, dst, 1); | |
303 | 15 | break; | |
304 | 15 | case AV_STEREO3D_TOPBOTTOM: | |
305 | 15 | vertical_frame_pack(outlink, dst, 0); | |
306 | 15 | break; | |
307 | 15 | case AV_STEREO3D_LINES: | |
308 | 15 | vertical_frame_pack(outlink, dst, 1); | |
309 | 15 | break; | |
310 | } | ||
311 | 60 | } | |
312 | |||
313 | 68 | static int try_push_frame(AVFilterContext *ctx) | |
314 | { | ||
315 | 68 | FramepackContext *s = ctx->priv; | |
316 | 68 | AVFilterLink *outlink = ctx->outputs[0]; | |
317 | 68 | FilterLink *l = ff_filter_link(outlink); | |
318 | AVStereo3D *stereo; | ||
319 | int ret, i; | ||
320 | |||
321 |
2/4✓ Branch 0 taken 68 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 68 times.
|
68 | if (!(s->input_views[0] && s->input_views[1])) |
322 | ✗ | return 0; | |
323 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 60 times.
|
68 | if (s->format == AV_STEREO3D_FRAMESEQUENCE) { |
324 | 8 | int64_t pts = s->input_views[0]->pts; | |
325 | |||
326 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 8 times.
|
24 | for (i = 0; i < 2; i++) { |
327 | // set correct timestamps | ||
328 |
1/2✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
|
16 | if (pts != AV_NOPTS_VALUE) { |
329 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
|
16 | s->input_views[i]->pts = i == 0 ? pts * 2 : pts * 2 + av_rescale_q(1, av_inv_q(l->frame_rate), outlink->time_base); |
330 | 16 | s->input_views[i]->duration = av_rescale_q(1, av_inv_q(l->frame_rate), outlink->time_base); | |
331 | } | ||
332 | |||
333 | // set stereo3d side data | ||
334 | 16 | stereo = av_stereo3d_create_side_data(s->input_views[i]); | |
335 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
|
16 | if (!stereo) |
336 | ✗ | return AVERROR(ENOMEM); | |
337 | 16 | stereo->type = s->format; | |
338 | 16 | stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT | |
339 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
|
16 | : AV_STEREO3D_VIEW_RIGHT; |
340 | |||
341 | // filter the frame and immediately relinquish its pointer | ||
342 | 16 | ret = ff_filter_frame(outlink, s->input_views[i]); | |
343 | 16 | s->input_views[i] = NULL; | |
344 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
|
16 | if (ret < 0) |
345 | ✗ | return ret; | |
346 | } | ||
347 | 8 | return ret; | |
348 | } else { | ||
349 | 60 | AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
350 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
|
60 | if (!dst) |
351 | ✗ | return AVERROR(ENOMEM); | |
352 | |||
353 | 60 | spatial_frame_pack(outlink, dst); | |
354 | |||
355 | // get any property from the original frame | ||
356 | 60 | ret = av_frame_copy_props(dst, s->input_views[LEFT]); | |
357 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
|
60 | if (ret < 0) { |
358 | ✗ | av_frame_free(&dst); | |
359 | ✗ | return ret; | |
360 | } | ||
361 | |||
362 |
2/2✓ Branch 0 taken 120 times.
✓ Branch 1 taken 60 times.
|
180 | for (i = 0; i < 2; i++) |
363 | 120 | av_frame_free(&s->input_views[i]); | |
364 | |||
365 | // set stereo3d side data | ||
366 | 60 | stereo = av_stereo3d_create_side_data(dst); | |
367 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
|
60 | if (!stereo) { |
368 | ✗ | av_frame_free(&dst); | |
369 | ✗ | return AVERROR(ENOMEM); | |
370 | } | ||
371 | 60 | stereo->type = s->format; | |
372 | |||
373 | 60 | return ff_filter_frame(outlink, dst); | |
374 | } | ||
375 | } | ||
376 | |||
377 | 188 | static int activate(AVFilterContext *ctx) | |
378 | { | ||
379 | 188 | AVFilterLink *outlink = ctx->outputs[0]; | |
380 | 188 | FramepackContext *s = ctx->priv; | |
381 | int ret; | ||
382 | |||
383 |
1/4✗ Branch 1 not taken.
✓ Branch 2 taken 188 times.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
|
188 | FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); |
384 | |||
385 |
2/2✓ Branch 0 taken 123 times.
✓ Branch 1 taken 65 times.
|
188 | if (!s->input_views[0]) { |
386 | 123 | ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_views[0]); | |
387 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 123 times.
|
123 | if (ret < 0) |
388 | ✗ | return ret; | |
389 | } | ||
390 | |||
391 |
2/2✓ Branch 0 taken 138 times.
✓ Branch 1 taken 50 times.
|
188 | if (!s->input_views[1]) { |
392 | 138 | ret = ff_inlink_consume_frame(ctx->inputs[1], &s->input_views[1]); | |
393 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 138 times.
|
138 | if (ret < 0) |
394 | ✗ | return ret; | |
395 | } | ||
396 | |||
397 |
4/4✓ Branch 0 taken 133 times.
✓ Branch 1 taken 55 times.
✓ Branch 2 taken 68 times.
✓ Branch 3 taken 65 times.
|
188 | if (s->input_views[0] && s->input_views[1]) |
398 | 68 | return try_push_frame(ctx); | |
399 | |||
400 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 120 times.
|
120 | FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink); |
401 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 120 times.
|
120 | FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink); |
402 | |||
403 |
1/2✓ Branch 1 taken 120 times.
✗ Branch 2 not taken.
|
120 | if (ff_outlink_frame_wanted(ctx->outputs[0]) && |
404 |
2/2✓ Branch 0 taken 55 times.
✓ Branch 1 taken 65 times.
|
120 | !s->input_views[0]) { |
405 | 55 | ff_inlink_request_frame(ctx->inputs[0]); | |
406 | 55 | return 0; | |
407 | } | ||
408 | |||
409 |
1/2✓ Branch 1 taken 65 times.
✗ Branch 2 not taken.
|
65 | if (ff_outlink_frame_wanted(ctx->outputs[0]) && |
410 |
1/2✓ Branch 0 taken 65 times.
✗ Branch 1 not taken.
|
65 | !s->input_views[1]) { |
411 | 65 | ff_inlink_request_frame(ctx->inputs[1]); | |
412 | 65 | return 0; | |
413 | } | ||
414 | |||
415 | ✗ | return FFERROR_NOT_READY; | |
416 | } | ||
417 | |||
418 | #define OFFSET(x) offsetof(FramepackContext, x) | ||
419 | #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | ||
420 | static const AVOption framepack_options[] = { | ||
421 | { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT, | ||
422 | { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = VF, .unit = "format" }, | ||
423 | { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST, | ||
424 | { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" }, | ||
425 | { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST, | ||
426 | { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" }, | ||
427 | { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST, | ||
428 | { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" }, | ||
429 | { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST, | ||
430 | { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" }, | ||
431 | { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST, | ||
432 | { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" }, | ||
433 | { NULL }, | ||
434 | }; | ||
435 | |||
436 | AVFILTER_DEFINE_CLASS(framepack); | ||
437 | |||
438 | static const AVFilterPad framepack_inputs[] = { | ||
439 | { | ||
440 | .name = "left", | ||
441 | .type = AVMEDIA_TYPE_VIDEO, | ||
442 | }, | ||
443 | { | ||
444 | .name = "right", | ||
445 | .type = AVMEDIA_TYPE_VIDEO, | ||
446 | }, | ||
447 | }; | ||
448 | |||
449 | static const AVFilterPad framepack_outputs[] = { | ||
450 | { | ||
451 | .name = "packed", | ||
452 | .type = AVMEDIA_TYPE_VIDEO, | ||
453 | .config_props = config_output, | ||
454 | }, | ||
455 | }; | ||
456 | |||
457 | const AVFilter ff_vf_framepack = { | ||
458 | .name = "framepack", | ||
459 | .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."), | ||
460 | .priv_size = sizeof(FramepackContext), | ||
461 | .priv_class = &framepack_class, | ||
462 | FILTER_INPUTS(framepack_inputs), | ||
463 | FILTER_OUTPUTS(framepack_outputs), | ||
464 | FILTER_PIXFMTS_ARRAY(formats_supported), | ||
465 | .activate = activate, | ||
466 | .uninit = framepack_uninit, | ||
467 | }; | ||
468 |