Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2013 Paul B Mahol | ||
3 | * | ||
4 | * This file is part of FFmpeg. | ||
5 | * | ||
6 | * FFmpeg is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Lesser General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2.1 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * FFmpeg is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Lesser General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Lesser General Public | ||
17 | * License along with FFmpeg; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | */ | ||
20 | |||
21 | #include "libavutil/imgutils.h" | ||
22 | #include "libavutil/opt.h" | ||
23 | #include "libavutil/pixdesc.h" | ||
24 | #include "avfilter.h" | ||
25 | #include "formats.h" | ||
26 | #include "internal.h" | ||
27 | #include "video.h" | ||
28 | |||
29 | typedef struct WeaveContext { | ||
30 | const AVClass *class; | ||
31 | int first_field; | ||
32 | int double_weave; | ||
33 | int nb_planes; | ||
34 | int planeheight[4]; | ||
35 | int linesize[4]; | ||
36 | |||
37 | AVFrame *prev; | ||
38 | } WeaveContext; | ||
39 | |||
40 | #define OFFSET(x) offsetof(WeaveContext, x) | ||
41 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | ||
42 | |||
43 | static const AVOption weave_options[] = { | ||
44 | { "first_field", "set first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"}, | ||
45 | { "top", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"}, | ||
46 | { "t", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"}, | ||
47 | { "bottom", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"}, | ||
48 | { "b", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"}, | ||
49 | { NULL } | ||
50 | }; | ||
51 | |||
52 | AVFILTER_DEFINE_CLASS_EXT(weave, "(double)weave", weave_options); | ||
53 | |||
54 | 1 | static int query_formats(AVFilterContext *ctx) | |
55 | { | ||
56 | 1 | int reject_flags = AV_PIX_FMT_FLAG_PAL | AV_PIX_FMT_FLAG_HWACCEL; | |
57 | |||
58 | 1 | return ff_set_common_formats(ctx, ff_formats_pixdesc_filter(0, reject_flags)); | |
59 | } | ||
60 | |||
61 | 1 | static int config_props_output(AVFilterLink *outlink) | |
62 | { | ||
63 | 1 | AVFilterContext *ctx = outlink->src; | |
64 | 1 | WeaveContext *s = ctx->priv; | |
65 | 1 | AVFilterLink *inlink = ctx->inputs[0]; | |
66 | 1 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |
67 | int ret; | ||
68 | |||
69 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (!s->double_weave) { |
70 | 1 | outlink->time_base.num = inlink->time_base.num * 2; | |
71 | 1 | outlink->time_base.den = inlink->time_base.den; | |
72 | 1 | outlink->frame_rate.num = inlink->frame_rate.num; | |
73 | 1 | outlink->frame_rate.den = inlink->frame_rate.den * 2; | |
74 | } | ||
75 | 1 | outlink->w = inlink->w; | |
76 | 1 | outlink->h = inlink->h * 2; | |
77 | |||
78 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
|
1 | if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) |
79 | ✗ | return ret; | |
80 | |||
81 | 1 | s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); | |
82 | 1 | s->planeheight[0] = s->planeheight[3] = inlink->h; | |
83 | |||
84 | 1 | s->nb_planes = av_pix_fmt_count_planes(inlink->format); | |
85 | |||
86 | 1 | return 0; | |
87 | } | ||
88 | |||
89 | typedef struct ThreadData { | ||
90 | AVFrame *in, *out; | ||
91 | } ThreadData; | ||
92 | |||
93 | 225 | static int weave_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
94 | { | ||
95 | 225 | AVFilterLink *inlink = ctx->inputs[0]; | |
96 | 225 | WeaveContext *s = ctx->priv; | |
97 | 225 | ThreadData *td = arg; | |
98 | 225 | AVFrame *in = td->in; | |
99 | 225 | AVFrame *out = td->out; | |
100 | |||
101 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 225 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
225 | const int weave = (s->double_weave && !(inlink->frame_count_out & 1)); |
102 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 225 times.
|
225 | const int field1 = weave ? s->first_field : (!s->first_field); |
103 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 225 times.
|
225 | const int field2 = weave ? (!s->first_field) : s->first_field; |
104 | |||
105 |
2/2✓ Branch 0 taken 675 times.
✓ Branch 1 taken 225 times.
|
900 | for (int i = 0; i < s->nb_planes; i++) { |
106 | 675 | const int height = s->planeheight[i]; | |
107 | 675 | const int start = (height * jobnr) / nb_jobs; | |
108 | 675 | const int end = (height * (jobnr+1)) / nb_jobs; | |
109 | |||
110 | 675 | av_image_copy_plane(out->data[i] + out->linesize[i] * field1 + | |
111 | 675 | out->linesize[i] * start * 2, | |
112 | 675 | out->linesize[i] * 2, | |
113 | 675 | in->data[i] + start * in->linesize[i], | |
114 | in->linesize[i], | ||
115 | s->linesize[i], end - start); | ||
116 | 675 | av_image_copy_plane(out->data[i] + out->linesize[i] * field2 + | |
117 | 675 | out->linesize[i] * start * 2, | |
118 | 675 | out->linesize[i] * 2, | |
119 | 675 | s->prev->data[i] + start * s->prev->linesize[i], | |
120 | 675 | s->prev->linesize[i], | |
121 | s->linesize[i], end - start); | ||
122 | } | ||
123 | |||
124 | 225 | return 0; | |
125 | } | ||
126 | |||
127 | 50 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |
128 | { | ||
129 | 50 | AVFilterContext *ctx = inlink->dst; | |
130 | 50 | WeaveContext *s = ctx->priv; | |
131 | 50 | AVFilterLink *outlink = ctx->outputs[0]; | |
132 | ThreadData td; | ||
133 | AVFrame *out; | ||
134 | |||
135 |
2/2✓ Branch 0 taken 25 times.
✓ Branch 1 taken 25 times.
|
50 | if (!s->prev) { |
136 | 25 | s->prev = in; | |
137 | 25 | return 0; | |
138 | } | ||
139 | |||
140 | 25 | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
141 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 25 times.
|
25 | if (!out) { |
142 | ✗ | av_frame_free(&in); | |
143 | ✗ | av_frame_free(&s->prev); | |
144 | ✗ | return AVERROR(ENOMEM); | |
145 | } | ||
146 | 25 | av_frame_copy_props(out, in); | |
147 | |||
148 | 25 | td.out = out, td.in = in; | |
149 | 25 | ff_filter_execute(ctx, weave_slice, &td, NULL, | |
150 |
1/2✓ Branch 0 taken 25 times.
✗ Branch 1 not taken.
|
25 | FFMIN(s->planeheight[1], ff_filter_get_nb_threads(ctx))); |
151 | |||
152 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 25 times.
|
25 | out->pts = s->double_weave ? s->prev->pts : in->pts / 2; |
153 | #if FF_API_INTERLACED_FRAME | ||
154 | FF_DISABLE_DEPRECATION_WARNINGS | ||
155 | 25 | out->interlaced_frame = 1; | |
156 | 25 | out->top_field_first = !s->first_field; | |
157 | FF_ENABLE_DEPRECATION_WARNINGS | ||
158 | #endif | ||
159 | 25 | out->flags |= AV_FRAME_FLAG_INTERLACED; | |
160 |
1/2✓ Branch 0 taken 25 times.
✗ Branch 1 not taken.
|
25 | if (s->first_field) |
161 | 25 | out->flags &= ~AV_FRAME_FLAG_TOP_FIELD_FIRST; | |
162 | else | ||
163 | ✗ | out->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST; | |
164 | |||
165 |
1/2✓ Branch 0 taken 25 times.
✗ Branch 1 not taken.
|
25 | if (!s->double_weave) |
166 | 25 | av_frame_free(&in); | |
167 | 25 | av_frame_free(&s->prev); | |
168 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 25 times.
|
25 | if (s->double_weave) |
169 | ✗ | s->prev = in; | |
170 | 25 | return ff_filter_frame(outlink, out); | |
171 | } | ||
172 | |||
173 | 2 | static av_cold void uninit(AVFilterContext *ctx) | |
174 | { | ||
175 | 2 | WeaveContext *s = ctx->priv; | |
176 | |||
177 | 2 | av_frame_free(&s->prev); | |
178 | 2 | } | |
179 | |||
180 | static const AVFilterPad weave_inputs[] = { | ||
181 | { | ||
182 | .name = "default", | ||
183 | .type = AVMEDIA_TYPE_VIDEO, | ||
184 | .filter_frame = filter_frame, | ||
185 | }, | ||
186 | }; | ||
187 | |||
188 | static const AVFilterPad weave_outputs[] = { | ||
189 | { | ||
190 | .name = "default", | ||
191 | .type = AVMEDIA_TYPE_VIDEO, | ||
192 | .config_props = config_props_output, | ||
193 | }, | ||
194 | }; | ||
195 | |||
196 | const AVFilter ff_vf_weave = { | ||
197 | .name = "weave", | ||
198 | .description = NULL_IF_CONFIG_SMALL("Weave input video fields into frames."), | ||
199 | .priv_size = sizeof(WeaveContext), | ||
200 | .priv_class = &weave_class, | ||
201 | .uninit = uninit, | ||
202 | FILTER_INPUTS(weave_inputs), | ||
203 | FILTER_OUTPUTS(weave_outputs), | ||
204 | FILTER_QUERY_FUNC(query_formats), | ||
205 | .flags = AVFILTER_FLAG_SLICE_THREADS, | ||
206 | }; | ||
207 | |||
208 | ✗ | static av_cold int init(AVFilterContext *ctx) | |
209 | { | ||
210 | ✗ | WeaveContext *s = ctx->priv; | |
211 | |||
212 | ✗ | if (!strcmp(ctx->filter->name, "doubleweave")) | |
213 | ✗ | s->double_weave = 1; | |
214 | |||
215 | ✗ | return 0; | |
216 | } | ||
217 | |||
218 | const AVFilter ff_vf_doubleweave = { | ||
219 | .name = "doubleweave", | ||
220 | .description = NULL_IF_CONFIG_SMALL("Weave input video fields into double number of frames."), | ||
221 | .priv_class = &weave_class, | ||
222 | .priv_size = sizeof(WeaveContext), | ||
223 | .init = init, | ||
224 | .uninit = uninit, | ||
225 | FILTER_INPUTS(weave_inputs), | ||
226 | FILTER_OUTPUTS(weave_outputs), | ||
227 | FILTER_QUERY_FUNC(query_formats), | ||
228 | .flags = AVFILTER_FLAG_SLICE_THREADS, | ||
229 | }; | ||
230 |