FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_framepack.c
Date: 2022-12-09 07:38:14
Exec Total Coverage
Lines: 175 226 77.4%
Functions: 7 7 100.0%
Branches: 77 127 60.6%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2013 Vittorio Giovara
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Generate a frame packed video, by combining two views in a single surface.
24 */
25
26 #include <string.h>
27
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/rational.h"
33 #include "libavutil/stereo3d.h"
34
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "formats.h"
38 #include "internal.h"
39 #include "video.h"
40
41 #define LEFT 0
42 #define RIGHT 1
43
44 typedef struct FramepackContext {
45 const AVClass *class;
46
47 int depth;
48 const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
49
50 enum AVStereo3DType format; ///< frame pack type output
51
52 AVFrame *input_views[2]; ///< input frames
53 } FramepackContext;
54
55 static const enum AVPixelFormat formats_supported[] = {
56 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9,
57 AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14,
58 AV_PIX_FMT_GRAY16,
59 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
60 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
61 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
62 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
63 AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
64 AV_PIX_FMT_YUVJ411P,
65 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
66 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
67 AV_PIX_FMT_YUV440P10,
68 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
69 AV_PIX_FMT_YUV440P12,
70 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
71 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
72 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
73 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
74 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
75 AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
76 AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
77 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
78 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
79 AV_PIX_FMT_NONE
80 };
81
82 10 static av_cold void framepack_uninit(AVFilterContext *ctx)
83 {
84 10 FramepackContext *s = ctx->priv;
85
86 // clean any leftover frame
87 10 av_frame_free(&s->input_views[LEFT]);
88 10 av_frame_free(&s->input_views[RIGHT]);
89 10 }
90
91 5 static int config_output(AVFilterLink *outlink)
92 {
93 5 AVFilterContext *ctx = outlink->src;
94 5 FramepackContext *s = outlink->src->priv;
95
96 5 int width = ctx->inputs[LEFT]->w;
97 5 int height = ctx->inputs[LEFT]->h;
98 5 AVRational time_base = ctx->inputs[LEFT]->time_base;
99 5 AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
100
101 // check size and fps match on the other input
102
1/2
✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
5 if (width != ctx->inputs[RIGHT]->w ||
103
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
5 height != ctx->inputs[RIGHT]->h) {
104 av_log(ctx, AV_LOG_ERROR,
105 "Left and right sizes differ (%dx%d vs %dx%d).\n",
106 width, height,
107 ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
108 return AVERROR_INVALIDDATA;
109
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
5 } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
110 av_log(ctx, AV_LOG_ERROR,
111 "Left and right time bases differ (%d/%d vs %d/%d).\n",
112 time_base.num, time_base.den,
113 ctx->inputs[RIGHT]->time_base.num,
114 ctx->inputs[RIGHT]->time_base.den);
115 return AVERROR_INVALIDDATA;
116
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
5 } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
117 av_log(ctx, AV_LOG_ERROR,
118 "Left and right framerates differ (%d/%d vs %d/%d).\n",
119 frame_rate.num, frame_rate.den,
120 ctx->inputs[RIGHT]->frame_rate.num,
121 ctx->inputs[RIGHT]->frame_rate.den);
122 return AVERROR_INVALIDDATA;
123 }
124
125 5 s->pix_desc = av_pix_fmt_desc_get(outlink->format);
126
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
5 if (!s->pix_desc)
127 return AVERROR_BUG;
128 5 s->depth = s->pix_desc->comp[0].depth;
129
130 // modify output properties as needed
131
3/4
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 2 times.
✗ Branch 3 not taken.
5 switch (s->format) {
132 1 case AV_STEREO3D_FRAMESEQUENCE:
133 1 time_base.den *= 2;
134 1 frame_rate.num *= 2;
135 1 break;
136 2 case AV_STEREO3D_COLUMNS:
137 case AV_STEREO3D_SIDEBYSIDE:
138 2 width *= 2;
139 2 break;
140 2 case AV_STEREO3D_LINES:
141 case AV_STEREO3D_TOPBOTTOM:
142 2 height *= 2;
143 2 break;
144 default:
145 av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
146 return AVERROR_INVALIDDATA;
147 }
148
149 5 outlink->w = width;
150 5 outlink->h = height;
151 5 outlink->time_base = time_base;
152 5 outlink->frame_rate = frame_rate;
153
154 5 return 0;
155 }
156
157 30 static void horizontal_frame_pack(AVFilterLink *outlink,
158 AVFrame *out,
159 int interleaved)
160 {
161 30 AVFilterContext *ctx = outlink->src;
162 30 FramepackContext *s = ctx->priv;
163 int i, plane;
164
165
3/4
✓ Branch 0 taken 15 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✗ Branch 3 not taken.
45 if (interleaved && s->depth <= 8) {
166 15 const uint8_t *leftp = s->input_views[LEFT]->data[0];
167 15 const uint8_t *rightp = s->input_views[RIGHT]->data[0];
168 15 uint8_t *dstp = out->data[0];
169 15 int length = out->width / 2;
170 15 int lines = out->height;
171
172
2/2
✓ Branch 0 taken 45 times.
✓ Branch 1 taken 15 times.
60 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
173
4/4
✓ Branch 0 taken 30 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 15 times.
45 if (plane == 1 || plane == 2) {
174 30 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
175 30 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
176 }
177
2/2
✓ Branch 0 taken 8640 times.
✓ Branch 1 taken 45 times.
8685 for (i = 0; i < lines; i++) {
178 int j;
179 8640 leftp = s->input_views[LEFT]->data[plane] +
180 8640 s->input_views[LEFT]->linesize[plane] * i;
181 8640 rightp = s->input_views[RIGHT]->data[plane] +
182 8640 s->input_views[RIGHT]->linesize[plane] * i;
183 8640 dstp = out->data[plane] + out->linesize[plane] * i;
184
2/2
✓ Branch 0 taken 2280960 times.
✓ Branch 1 taken 8640 times.
2289600 for (j = 0; j < length; j++) {
185 // interpolate chroma as necessary
186
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2280960 times.
2280960 if ((s->pix_desc->log2_chroma_w ||
187
2/4
✗ Branch 0 not taken.
✗ Branch 1 not taken.
✓ Branch 2 taken 1900800 times.
✓ Branch 3 taken 380160 times.
2280960 s->pix_desc->log2_chroma_h) &&
188
2/2
✓ Branch 0 taken 380160 times.
✓ Branch 1 taken 1520640 times.
1900800 (plane == 1 || plane == 2)) {
189 760320 *dstp++ = (*leftp + *rightp) / 2;
190 760320 *dstp++ = (*leftp + *rightp) / 2;
191 } else {
192 1520640 *dstp++ = *leftp;
193 1520640 *dstp++ = *rightp;
194 }
195 2280960 leftp += 1;
196 2280960 rightp += 1;
197 }
198 }
199 }
200
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 15 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
15 } else if (interleaved && s->depth > 8) {
201 const uint16_t *leftp = (const uint16_t *)s->input_views[LEFT]->data[0];
202 const uint16_t *rightp = (const uint16_t *)s->input_views[RIGHT]->data[0];
203 uint16_t *dstp = (uint16_t *)out->data[0];
204 int length = out->width / 2;
205 int lines = out->height;
206
207 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
208 if (plane == 1 || plane == 2) {
209 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
210 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
211 }
212 for (i = 0; i < lines; i++) {
213 int j;
214 leftp = (const uint16_t *)s->input_views[LEFT]->data[plane] +
215 s->input_views[LEFT]->linesize[plane] * i / 2;
216 rightp = (const uint16_t *)s->input_views[RIGHT]->data[plane] +
217 s->input_views[RIGHT]->linesize[plane] * i / 2;
218 dstp = (uint16_t *)out->data[plane] + out->linesize[plane] * i / 2;
219 for (j = 0; j < length; j++) {
220 // interpolate chroma as necessary
221 if ((s->pix_desc->log2_chroma_w ||
222 s->pix_desc->log2_chroma_h) &&
223 (plane == 1 || plane == 2)) {
224 *dstp++ = (*leftp + *rightp) / 2;
225 *dstp++ = (*leftp + *rightp) / 2;
226 } else {
227 *dstp++ = *leftp;
228 *dstp++ = *rightp;
229 }
230 leftp += 1;
231 rightp += 1;
232 }
233 }
234 }
235 } else {
236
2/2
✓ Branch 0 taken 30 times.
✓ Branch 1 taken 15 times.
45 for (i = 0; i < 2; i++) {
237
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 30 times.
30 const int psize = 1 + (s->depth > 8);
238 const uint8_t *src[4];
239 uint8_t *dst[4];
240 30 int sub_w = psize * s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
241
242 30 src[0] = s->input_views[i]->data[0];
243 30 src[1] = s->input_views[i]->data[1];
244 30 src[2] = s->input_views[i]->data[2];
245
246 30 dst[0] = out->data[0] + i * s->input_views[i]->width * psize;
247 30 dst[1] = out->data[1] + i * sub_w;
248 30 dst[2] = out->data[2] + i * sub_w;
249
250 30 av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
251 30 s->input_views[i]->format,
252 30 s->input_views[i]->width,
253 30 s->input_views[i]->height);
254 }
255 }
256 30 }
257
258 30 static void vertical_frame_pack(AVFilterLink *outlink,
259 AVFrame *out,
260 int interleaved)
261 {
262 30 AVFilterContext *ctx = outlink->src;
263 30 FramepackContext *s = ctx->priv;
264 int i;
265
266
2/2
✓ Branch 0 taken 60 times.
✓ Branch 1 taken 30 times.
90 for (i = 0; i < 2; i++) {
267 const uint8_t *src[4];
268 uint8_t *dst[4];
269 int linesizes[4];
270 60 int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
271
272 60 src[0] = s->input_views[i]->data[0];
273 60 src[1] = s->input_views[i]->data[1];
274 60 src[2] = s->input_views[i]->data[2];
275
276 60 dst[0] = out->data[0] + i * out->linesize[0] *
277 60 (interleaved + s->input_views[i]->height * (1 - interleaved));
278 60 dst[1] = out->data[1] + i * out->linesize[1] *
279 60 (interleaved + sub_h * (1 - interleaved));
280 60 dst[2] = out->data[2] + i * out->linesize[2] *
281 60 (interleaved + sub_h * (1 - interleaved));
282
283 60 linesizes[0] = out->linesize[0] +
284 60 interleaved * out->linesize[0];
285 60 linesizes[1] = out->linesize[1] +
286 60 interleaved * out->linesize[1];
287 60 linesizes[2] = out->linesize[2] +
288 60 interleaved * out->linesize[2];
289
290 60 av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
291 60 s->input_views[i]->format,
292 60 s->input_views[i]->width,
293 60 s->input_views[i]->height);
294 }
295 30 }
296
297 60 static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
298 AVFrame *dst)
299 {
300 60 AVFilterContext *ctx = outlink->src;
301 60 FramepackContext *s = ctx->priv;
302
4/5
✓ Branch 0 taken 15 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 15 times.
✗ Branch 4 not taken.
60 switch (s->format) {
303 15 case AV_STEREO3D_SIDEBYSIDE:
304 15 horizontal_frame_pack(outlink, dst, 0);
305 15 break;
306 15 case AV_STEREO3D_COLUMNS:
307 15 horizontal_frame_pack(outlink, dst, 1);
308 15 break;
309 15 case AV_STEREO3D_TOPBOTTOM:
310 15 vertical_frame_pack(outlink, dst, 0);
311 15 break;
312 15 case AV_STEREO3D_LINES:
313 15 vertical_frame_pack(outlink, dst, 1);
314 15 break;
315 }
316 60 }
317
318 68 static int try_push_frame(AVFilterContext *ctx)
319 {
320 68 FramepackContext *s = ctx->priv;
321 68 AVFilterLink *outlink = ctx->outputs[0];
322 AVStereo3D *stereo;
323 int ret, i;
324
325
2/4
✓ Branch 0 taken 68 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 68 times.
68 if (!(s->input_views[0] && s->input_views[1]))
326 return 0;
327
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 60 times.
68 if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
328 8 int64_t pts = s->input_views[0]->pts;
329
330
2/2
✓ Branch 0 taken 16 times.
✓ Branch 1 taken 8 times.
24 for (i = 0; i < 2; i++) {
331 // set correct timestamps
332
1/2
✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
16 if (pts != AV_NOPTS_VALUE) {
333
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 s->input_views[i]->pts = i == 0 ? pts * 2 : pts * 2 + av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
334 16 s->input_views[i]->duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
335 }
336
337 // set stereo3d side data
338 16 stereo = av_stereo3d_create_side_data(s->input_views[i]);
339
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
16 if (!stereo)
340 return AVERROR(ENOMEM);
341 16 stereo->type = s->format;
342 16 stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT
343
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 : AV_STEREO3D_VIEW_RIGHT;
344
345 // filter the frame and immediately relinquish its pointer
346 16 ret = ff_filter_frame(outlink, s->input_views[i]);
347 16 s->input_views[i] = NULL;
348
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
16 if (ret < 0)
349 return ret;
350 }
351 8 return ret;
352 } else {
353 60 AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
354
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
60 if (!dst)
355 return AVERROR(ENOMEM);
356
357 60 spatial_frame_pack(outlink, dst);
358
359 // get any property from the original frame
360 60 ret = av_frame_copy_props(dst, s->input_views[LEFT]);
361
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
60 if (ret < 0) {
362 av_frame_free(&dst);
363 return ret;
364 }
365
366
2/2
✓ Branch 0 taken 120 times.
✓ Branch 1 taken 60 times.
180 for (i = 0; i < 2; i++)
367 120 av_frame_free(&s->input_views[i]);
368
369 // set stereo3d side data
370 60 stereo = av_stereo3d_create_side_data(dst);
371
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 60 times.
60 if (!stereo) {
372 av_frame_free(&dst);
373 return AVERROR(ENOMEM);
374 }
375 60 stereo->type = s->format;
376
377 60 return ff_filter_frame(outlink, dst);
378 }
379 }
380
381 204 static int activate(AVFilterContext *ctx)
382 {
383 204 AVFilterLink *outlink = ctx->outputs[0];
384 204 FramepackContext *s = ctx->priv;
385 int ret;
386
387
4/4
✓ Branch 1 taken 5 times.
✓ Branch 2 taken 199 times.
✓ Branch 4 taken 10 times.
✓ Branch 5 taken 5 times.
214 FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
388
389
2/2
✓ Branch 0 taken 136 times.
✓ Branch 1 taken 63 times.
199 if (!s->input_views[0]) {
390 136 ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_views[0]);
391
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 136 times.
136 if (ret < 0)
392 return ret;
393 }
394
395
1/2
✓ Branch 0 taken 199 times.
✗ Branch 1 not taken.
199 if (!s->input_views[1]) {
396 199 ret = ff_inlink_consume_frame(ctx->inputs[1], &s->input_views[1]);
397
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 199 times.
199 if (ret < 0)
398 return ret;
399 }
400
401
4/4
✓ Branch 0 taken 131 times.
✓ Branch 1 taken 68 times.
✓ Branch 2 taken 68 times.
✓ Branch 3 taken 63 times.
199 if (s->input_views[0] && s->input_views[1])
402 68 return try_push_frame(ctx);
403
404
2/2
✓ Branch 1 taken 5 times.
✓ Branch 2 taken 126 times.
131 FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink);
405
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 126 times.
126 FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
406
407
2/4
✓ Branch 1 taken 126 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 126 times.
✗ Branch 4 not taken.
252 if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
408 126 !ff_outlink_get_status(ctx->inputs[0]) &&
409
2/2
✓ Branch 0 taken 63 times.
✓ Branch 1 taken 63 times.
126 !s->input_views[0]) {
410 63 ff_inlink_request_frame(ctx->inputs[0]);
411 63 return 0;
412 }
413
414
2/4
✓ Branch 1 taken 63 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 63 times.
✗ Branch 4 not taken.
126 if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
415 63 !ff_outlink_get_status(ctx->inputs[1]) &&
416
1/2
✓ Branch 0 taken 63 times.
✗ Branch 1 not taken.
63 !s->input_views[1]) {
417 63 ff_inlink_request_frame(ctx->inputs[1]);
418 63 return 0;
419 }
420
421 return FFERROR_NOT_READY;
422 }
423
424 #define OFFSET(x) offsetof(FramepackContext, x)
425 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
426 static const AVOption framepack_options[] = {
427 { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
428 { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = VF, .unit = "format" },
429 { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
430 { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
431 { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
432 { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
433 { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
434 { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
435 { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
436 { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
437 { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
438 { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
439 { NULL },
440 };
441
442 AVFILTER_DEFINE_CLASS(framepack);
443
444 static const AVFilterPad framepack_inputs[] = {
445 {
446 .name = "left",
447 .type = AVMEDIA_TYPE_VIDEO,
448 },
449 {
450 .name = "right",
451 .type = AVMEDIA_TYPE_VIDEO,
452 },
453 };
454
455 static const AVFilterPad framepack_outputs[] = {
456 {
457 .name = "packed",
458 .type = AVMEDIA_TYPE_VIDEO,
459 .config_props = config_output,
460 },
461 };
462
463 const AVFilter ff_vf_framepack = {
464 .name = "framepack",
465 .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
466 .priv_size = sizeof(FramepackContext),
467 .priv_class = &framepack_class,
468 FILTER_INPUTS(framepack_inputs),
469 FILTER_OUTPUTS(framepack_outputs),
470 FILTER_PIXFMTS_ARRAY(formats_supported),
471 .activate = activate,
472 .uninit = framepack_uninit,
473 };
474