| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /* | ||
| 2 | * This file is part of FFmpeg. | ||
| 3 | * | ||
| 4 | * FFmpeg is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU Lesser General Public | ||
| 6 | * License as published by the Free Software Foundation; either | ||
| 7 | * version 2.1 of the License, or (at your option) any later version. | ||
| 8 | * | ||
| 9 | * FFmpeg is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 12 | * Lesser General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU Lesser General Public | ||
| 15 | * License along with FFmpeg; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <string.h> | ||
| 20 | |||
| 21 | #include "libavutil/common.h" | ||
| 22 | #include "libavutil/opt.h" | ||
| 23 | #include "libavutil/pixdesc.h" | ||
| 24 | |||
| 25 | #include "avfilter.h" | ||
| 26 | #include "filters.h" | ||
| 27 | #include "video.h" | ||
| 28 | #include "vaapi_vpp.h" | ||
| 29 | |||
| 30 | #define MAX_REFERENCES 8 | ||
| 31 | |||
| 32 | typedef struct DeintVAAPIContext { | ||
| 33 | VAAPIVPPContext vpp_ctx; // must be the first field | ||
| 34 | |||
| 35 | int mode; | ||
| 36 | int field_rate; | ||
| 37 | int auto_enable; | ||
| 38 | |||
| 39 | VAProcFilterCapDeinterlacing | ||
| 40 | deint_caps[VAProcDeinterlacingCount]; | ||
| 41 | int nb_deint_caps; | ||
| 42 | VAProcPipelineCaps pipeline_caps; | ||
| 43 | |||
| 44 | int queue_depth; | ||
| 45 | int queue_count; | ||
| 46 | AVFrame *frame_queue[MAX_REFERENCES]; | ||
| 47 | int extra_delay_for_timestamps; | ||
| 48 | |||
| 49 | int eof; | ||
| 50 | int prev_pts; | ||
| 51 | } DeintVAAPIContext; | ||
| 52 | |||
| 53 | ✗ | static const char *deint_vaapi_mode_name(int mode) | |
| 54 | { | ||
| 55 | ✗ | switch (mode) { | |
| 56 | #define D(name) case VAProcDeinterlacing ## name: return #name | ||
| 57 | ✗ | D(Bob); | |
| 58 | ✗ | D(Weave); | |
| 59 | ✗ | D(MotionAdaptive); | |
| 60 | ✗ | D(MotionCompensated); | |
| 61 | #undef D | ||
| 62 | ✗ | default: | |
| 63 | ✗ | return "Invalid"; | |
| 64 | } | ||
| 65 | } | ||
| 66 | |||
| 67 | ✗ | static void deint_vaapi_pipeline_uninit(AVFilterContext *avctx) | |
| 68 | { | ||
| 69 | ✗ | DeintVAAPIContext *ctx = avctx->priv; | |
| 70 | int i; | ||
| 71 | |||
| 72 | ✗ | for (i = 0; i < ctx->queue_count; i++) | |
| 73 | ✗ | av_frame_free(&ctx->frame_queue[i]); | |
| 74 | ✗ | ctx->queue_count = 0; | |
| 75 | |||
| 76 | ✗ | ff_vaapi_vpp_pipeline_uninit(avctx); | |
| 77 | ✗ | } | |
| 78 | |||
| 79 | ✗ | static int deint_vaapi_build_filter_params(AVFilterContext *avctx) | |
| 80 | { | ||
| 81 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 82 | ✗ | DeintVAAPIContext *ctx = avctx->priv; | |
| 83 | VAStatus vas; | ||
| 84 | VAProcFilterParameterBufferDeinterlacing params; | ||
| 85 | int i; | ||
| 86 | |||
| 87 | ✗ | ctx->nb_deint_caps = VAProcDeinterlacingCount; | |
| 88 | ✗ | vas = vaQueryVideoProcFilterCaps(vpp_ctx->hwctx->display, | |
| 89 | vpp_ctx->va_context, | ||
| 90 | VAProcFilterDeinterlacing, | ||
| 91 | ✗ | &ctx->deint_caps, | |
| 92 | ✗ | &ctx->nb_deint_caps); | |
| 93 | ✗ | if (vas != VA_STATUS_SUCCESS) { | |
| 94 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to query deinterlacing " | |
| 95 | "caps: %d (%s).\n", vas, vaErrorStr(vas)); | ||
| 96 | ✗ | return AVERROR(EIO); | |
| 97 | } | ||
| 98 | |||
| 99 | ✗ | if (ctx->mode == VAProcDeinterlacingNone) { | |
| 100 | ✗ | for (i = 0; i < ctx->nb_deint_caps; i++) { | |
| 101 | ✗ | if (ctx->deint_caps[i].type > ctx->mode) | |
| 102 | ✗ | ctx->mode = ctx->deint_caps[i].type; | |
| 103 | } | ||
| 104 | ✗ | av_log(avctx, AV_LOG_VERBOSE, "Picking %d (%s) as default " | |
| 105 | "deinterlacing mode.\n", ctx->mode, | ||
| 106 | deint_vaapi_mode_name(ctx->mode)); | ||
| 107 | } else { | ||
| 108 | ✗ | for (i = 0; i < ctx->nb_deint_caps; i++) { | |
| 109 | ✗ | if (ctx->deint_caps[i].type == ctx->mode) | |
| 110 | ✗ | break; | |
| 111 | } | ||
| 112 | ✗ | if (i >= ctx->nb_deint_caps) { | |
| 113 | ✗ | av_log(avctx, AV_LOG_ERROR, "Deinterlacing mode %d (%s) is " | |
| 114 | "not supported.\n", ctx->mode, | ||
| 115 | deint_vaapi_mode_name(ctx->mode)); | ||
| 116 | ✗ | return AVERROR(EINVAL); | |
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | ✗ | params.type = VAProcFilterDeinterlacing; | |
| 121 | ✗ | params.algorithm = ctx->mode; | |
| 122 | ✗ | params.flags = 0; | |
| 123 | |||
| 124 | ✗ | vas = ff_vaapi_vpp_make_param_buffers(avctx, | |
| 125 | VAProcFilterParameterBufferType, | ||
| 126 | ¶ms, | ||
| 127 | sizeof(params), | ||
| 128 | 1); | ||
| 129 | ✗ | if (vas) | |
| 130 | ✗ | return vas; | |
| 131 | |||
| 132 | ✗ | vas = vaQueryVideoProcPipelineCaps(vpp_ctx->hwctx->display, | |
| 133 | vpp_ctx->va_context, | ||
| 134 | &vpp_ctx->filter_buffers[0], 1, | ||
| 135 | &ctx->pipeline_caps); | ||
| 136 | ✗ | if (vas != VA_STATUS_SUCCESS) { | |
| 137 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to query pipeline " | |
| 138 | "caps: %d (%s).\n", vas, vaErrorStr(vas)); | ||
| 139 | ✗ | return AVERROR(EIO); | |
| 140 | } | ||
| 141 | |||
| 142 | ✗ | ctx->extra_delay_for_timestamps = ctx->field_rate == 2 && | |
| 143 | ✗ | ctx->pipeline_caps.num_backward_references == 0; | |
| 144 | |||
| 145 | ✗ | ctx->queue_depth = ctx->pipeline_caps.num_backward_references + | |
| 146 | ✗ | ctx->pipeline_caps.num_forward_references + | |
| 147 | ✗ | ctx->extra_delay_for_timestamps + 1; | |
| 148 | ✗ | if (ctx->queue_depth > MAX_REFERENCES) { | |
| 149 | ✗ | av_log(avctx, AV_LOG_ERROR, "Pipeline requires too many " | |
| 150 | "references (%u forward, %u back).\n", | ||
| 151 | ctx->pipeline_caps.num_forward_references, | ||
| 152 | ctx->pipeline_caps.num_backward_references); | ||
| 153 | ✗ | return AVERROR(ENOSYS); | |
| 154 | } | ||
| 155 | |||
| 156 | ✗ | return 0; | |
| 157 | } | ||
| 158 | |||
| 159 | ✗ | static int deint_vaapi_config_output(AVFilterLink *outlink) | |
| 160 | { | ||
| 161 | ✗ | FilterLink *outl = ff_filter_link(outlink); | |
| 162 | ✗ | AVFilterLink *inlink = outlink->src->inputs[0]; | |
| 163 | ✗ | FilterLink *inl = ff_filter_link(inlink); | |
| 164 | ✗ | AVFilterContext *avctx = outlink->src; | |
| 165 | ✗ | DeintVAAPIContext *ctx = avctx->priv; | |
| 166 | int err; | ||
| 167 | |||
| 168 | ✗ | err = ff_vaapi_vpp_config_output(outlink); | |
| 169 | ✗ | if (err < 0) | |
| 170 | ✗ | return err; | |
| 171 | ✗ | outlink->time_base = av_mul_q(inlink->time_base, | |
| 172 | ✗ | (AVRational) { 1, ctx->field_rate }); | |
| 173 | ✗ | outl->frame_rate = av_mul_q(inl->frame_rate, | |
| 174 | ✗ | (AVRational) { ctx->field_rate, 1 }); | |
| 175 | |||
| 176 | ✗ | return 0; | |
| 177 | } | ||
| 178 | |||
| 179 | ✗ | static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) | |
| 180 | { | ||
| 181 | ✗ | AVFilterContext *avctx = inlink->dst; | |
| 182 | ✗ | AVFilterLink *outlink = avctx->outputs[0]; | |
| 183 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 184 | ✗ | DeintVAAPIContext *ctx = avctx->priv; | |
| 185 | ✗ | AVFrame *output_frame = NULL; | |
| 186 | VASurfaceID input_surface; | ||
| 187 | VASurfaceID backward_references[MAX_REFERENCES]; | ||
| 188 | VASurfaceID forward_references[MAX_REFERENCES]; | ||
| 189 | VAProcPipelineParameterBuffer params; | ||
| 190 | VAProcFilterParameterBufferDeinterlacing *filter_params; | ||
| 191 | VAStatus vas; | ||
| 192 | ✗ | void *filter_params_addr = NULL; | |
| 193 | int err, i, field, current_frame_index; | ||
| 194 | |||
| 195 | // NULL frame is used to flush the queue in field mode | ||
| 196 | ✗ | if (input_frame) | |
| 197 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", | |
| 198 | ✗ | av_get_pix_fmt_name(input_frame->format), | |
| 199 | input_frame->width, input_frame->height, input_frame->pts); | ||
| 200 | |||
| 201 | ✗ | if (ctx->queue_count < ctx->queue_depth) { | |
| 202 | ✗ | ctx->frame_queue[ctx->queue_count++] = input_frame; | |
| 203 | ✗ | if (ctx->queue_count < ctx->queue_depth) { | |
| 204 | // Need more reference surfaces before we can continue. | ||
| 205 | ✗ | return 0; | |
| 206 | } | ||
| 207 | } else { | ||
| 208 | ✗ | av_frame_free(&ctx->frame_queue[0]); | |
| 209 | ✗ | for (i = 0; i + 1 < ctx->queue_count; i++) | |
| 210 | ✗ | ctx->frame_queue[i] = ctx->frame_queue[i + 1]; | |
| 211 | ✗ | ctx->frame_queue[i] = input_frame; | |
| 212 | } | ||
| 213 | |||
| 214 | ✗ | current_frame_index = ctx->pipeline_caps.num_forward_references; | |
| 215 | |||
| 216 | ✗ | input_frame = ctx->frame_queue[current_frame_index]; | |
| 217 | ✗ | if (!input_frame) | |
| 218 | ✗ | return 0; | |
| 219 | |||
| 220 | ✗ | input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; | |
| 221 | ✗ | for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++) | |
| 222 | ✗ | forward_references[i] = (VASurfaceID)(uintptr_t) | |
| 223 | ✗ | ctx->frame_queue[current_frame_index - i - 1]->data[3]; | |
| 224 | ✗ | for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++) | |
| 225 | ✗ | backward_references[i] = (VASurfaceID)(uintptr_t) | |
| 226 | ✗ | ctx->frame_queue[current_frame_index + i + 1]->data[3]; | |
| 227 | |||
| 228 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for " | |
| 229 | "deinterlace input.\n", input_surface); | ||
| 230 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Backward references:"); | |
| 231 | ✗ | for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++) | |
| 232 | ✗ | av_log(avctx, AV_LOG_DEBUG, " %#x", backward_references[i]); | |
| 233 | ✗ | av_log(avctx, AV_LOG_DEBUG, "\n"); | |
| 234 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Forward references:"); | |
| 235 | ✗ | for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++) | |
| 236 | ✗ | av_log(avctx, AV_LOG_DEBUG, " %#x", forward_references[i]); | |
| 237 | ✗ | av_log(avctx, AV_LOG_DEBUG, "\n"); | |
| 238 | |||
| 239 | ✗ | for (field = 0; field < ctx->field_rate; field++) { | |
| 240 | ✗ | output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width, | |
| 241 | vpp_ctx->output_height); | ||
| 242 | ✗ | if (!output_frame) { | |
| 243 | ✗ | err = AVERROR(ENOMEM); | |
| 244 | ✗ | goto fail; | |
| 245 | } | ||
| 246 | |||
| 247 | ✗ | err = av_frame_copy_props(output_frame, input_frame); | |
| 248 | ✗ | if (err < 0) | |
| 249 | ✗ | goto fail; | |
| 250 | |||
| 251 | ✗ | err = ff_vaapi_vpp_init_params(avctx, ¶ms, | |
| 252 | input_frame, output_frame); | ||
| 253 | ✗ | if (err < 0) | |
| 254 | ✗ | goto fail; | |
| 255 | |||
| 256 | ✗ | if (!ctx->auto_enable || (input_frame->flags & AV_FRAME_FLAG_INTERLACED)) { | |
| 257 | ✗ | vas = vaMapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0], | |
| 258 | &filter_params_addr); | ||
| 259 | ✗ | if (vas != VA_STATUS_SUCCESS) { | |
| 260 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to map filter parameter " | |
| 261 | "buffer: %d (%s).\n", vas, vaErrorStr(vas)); | ||
| 262 | ✗ | err = AVERROR(EIO); | |
| 263 | ✗ | goto fail; | |
| 264 | } | ||
| 265 | ✗ | filter_params = filter_params_addr; | |
| 266 | ✗ | filter_params->flags = 0; | |
| 267 | ✗ | if (input_frame->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) { | |
| 268 | ✗ | filter_params->flags |= field ? VA_DEINTERLACING_BOTTOM_FIELD : 0; | |
| 269 | } else { | ||
| 270 | ✗ | filter_params->flags |= VA_DEINTERLACING_BOTTOM_FIELD_FIRST; | |
| 271 | ✗ | filter_params->flags |= field ? 0 : VA_DEINTERLACING_BOTTOM_FIELD; | |
| 272 | } | ||
| 273 | ✗ | filter_params_addr = NULL; | |
| 274 | ✗ | vas = vaUnmapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0]); | |
| 275 | ✗ | if (vas != VA_STATUS_SUCCESS) | |
| 276 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to unmap filter parameter " | |
| 277 | "buffer: %d (%s).\n", vas, vaErrorStr(vas)); | ||
| 278 | |||
| 279 | ✗ | params.filters = &vpp_ctx->filter_buffers[0]; | |
| 280 | ✗ | params.num_filters = 1; | |
| 281 | |||
| 282 | ✗ | params.forward_references = forward_references; | |
| 283 | ✗ | params.num_forward_references = | |
| 284 | ✗ | ctx->pipeline_caps.num_forward_references; | |
| 285 | ✗ | params.backward_references = backward_references; | |
| 286 | ✗ | params.num_backward_references = | |
| 287 | ✗ | ctx->pipeline_caps.num_backward_references; | |
| 288 | |||
| 289 | } else { | ||
| 290 | ✗ | params.filters = NULL; | |
| 291 | ✗ | params.num_filters = 0; | |
| 292 | } | ||
| 293 | |||
| 294 | ✗ | err = ff_vaapi_vpp_render_picture(avctx, ¶ms, output_frame); | |
| 295 | ✗ | if (err < 0) | |
| 296 | ✗ | goto fail; | |
| 297 | |||
| 298 | ✗ | if (ctx->field_rate == 2) { | |
| 299 | ✗ | if (field == 0) | |
| 300 | ✗ | output_frame->pts = 2 * input_frame->pts; | |
| 301 | ✗ | else if (ctx->eof) | |
| 302 | ✗ | output_frame->pts = 3 * input_frame->pts - ctx->prev_pts; | |
| 303 | else | ||
| 304 | ✗ | output_frame->pts = input_frame->pts + | |
| 305 | ✗ | ctx->frame_queue[current_frame_index + 1]->pts; | |
| 306 | } | ||
| 307 | ✗ | output_frame->flags &= ~AV_FRAME_FLAG_INTERLACED; | |
| 308 | |||
| 309 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", | |
| 310 | ✗ | av_get_pix_fmt_name(output_frame->format), | |
| 311 | ✗ | output_frame->width, output_frame->height, output_frame->pts); | |
| 312 | |||
| 313 | ✗ | err = ff_filter_frame(outlink, output_frame); | |
| 314 | ✗ | if (err < 0) | |
| 315 | ✗ | break; | |
| 316 | } | ||
| 317 | |||
| 318 | ✗ | ctx->prev_pts = input_frame->pts; | |
| 319 | |||
| 320 | ✗ | return err; | |
| 321 | |||
| 322 | ✗ | fail: | |
| 323 | ✗ | if (filter_params_addr) | |
| 324 | ✗ | vaUnmapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0]); | |
| 325 | ✗ | av_frame_free(&output_frame); | |
| 326 | ✗ | return err; | |
| 327 | } | ||
| 328 | |||
| 329 | ✗ | static int deint_vaapi_request_frame(AVFilterLink *link) | |
| 330 | { | ||
| 331 | ✗ | AVFilterContext *avctx = link->src; | |
| 332 | ✗ | DeintVAAPIContext *ctx = avctx->priv; | |
| 333 | int ret; | ||
| 334 | |||
| 335 | ✗ | if (ctx->eof) | |
| 336 | ✗ | return AVERROR_EOF; | |
| 337 | |||
| 338 | ✗ | ret = ff_request_frame(link->src->inputs[0]); | |
| 339 | ✗ | if (ret == AVERROR_EOF && ctx->extra_delay_for_timestamps) { | |
| 340 | ✗ | ctx->eof = 1; | |
| 341 | ✗ | deint_vaapi_filter_frame(link->src->inputs[0], NULL); | |
| 342 | ✗ | } else if (ret < 0) | |
| 343 | ✗ | return ret; | |
| 344 | |||
| 345 | ✗ | return 0; | |
| 346 | } | ||
| 347 | |||
| 348 | ✗ | static av_cold int deint_vaapi_init(AVFilterContext *avctx) | |
| 349 | { | ||
| 350 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 351 | |||
| 352 | ✗ | ff_vaapi_vpp_ctx_init(avctx); | |
| 353 | ✗ | vpp_ctx->pipeline_uninit = deint_vaapi_pipeline_uninit; | |
| 354 | ✗ | vpp_ctx->build_filter_params = deint_vaapi_build_filter_params; | |
| 355 | ✗ | vpp_ctx->output_format = AV_PIX_FMT_NONE; | |
| 356 | |||
| 357 | ✗ | return 0; | |
| 358 | } | ||
| 359 | |||
| 360 | #define OFFSET(x) offsetof(DeintVAAPIContext, x) | ||
| 361 | #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM) | ||
| 362 | static const AVOption deint_vaapi_options[] = { | ||
| 363 | { "mode", "Deinterlacing mode", | ||
| 364 | OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VAProcDeinterlacingNone }, | ||
| 365 | VAProcDeinterlacingNone, VAProcDeinterlacingCount - 1, FLAGS, .unit = "mode" }, | ||
| 366 | { "default", "Use the highest-numbered (and therefore possibly most advanced) deinterlacing algorithm", | ||
| 367 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingNone }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 368 | { "bob", "Use the bob deinterlacing algorithm", | ||
| 369 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingBob }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 370 | { "weave", "Use the weave deinterlacing algorithm", | ||
| 371 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingWeave }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 372 | { "motion_adaptive", "Use the motion adaptive deinterlacing algorithm", | ||
| 373 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionAdaptive }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 374 | { "motion_compensated", "Use the motion compensated deinterlacing algorithm", | ||
| 375 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionCompensated }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 376 | |||
| 377 | { "rate", "Generate output at frame rate or field rate", | ||
| 378 | OFFSET(field_rate), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 2, FLAGS, .unit = "rate" }, | ||
| 379 | { "frame", "Output at frame rate (one frame of output for each field-pair)", | ||
| 380 | 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, .unit = "rate" }, | ||
| 381 | { "field", "Output at field rate (one frame of output for each field)", | ||
| 382 | 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, .unit = "rate" }, | ||
| 383 | |||
| 384 | { "auto", "Only deinterlace fields, passing frames through unchanged", | ||
| 385 | OFFSET(auto_enable), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS }, | ||
| 386 | |||
| 387 | { NULL }, | ||
| 388 | }; | ||
| 389 | |||
| 390 | static const AVClass deint_vaapi_class = { | ||
| 391 | .class_name = "deinterlace_vaapi", | ||
| 392 | .item_name = av_default_item_name, | ||
| 393 | .option = deint_vaapi_options, | ||
| 394 | .version = LIBAVUTIL_VERSION_INT, | ||
| 395 | }; | ||
| 396 | |||
| 397 | static const AVFilterPad deint_vaapi_inputs[] = { | ||
| 398 | { | ||
| 399 | .name = "default", | ||
| 400 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 401 | .filter_frame = &deint_vaapi_filter_frame, | ||
| 402 | .config_props = &ff_vaapi_vpp_config_input, | ||
| 403 | }, | ||
| 404 | }; | ||
| 405 | |||
| 406 | static const AVFilterPad deint_vaapi_outputs[] = { | ||
| 407 | { | ||
| 408 | .name = "default", | ||
| 409 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 410 | .request_frame = &deint_vaapi_request_frame, | ||
| 411 | .config_props = &deint_vaapi_config_output, | ||
| 412 | }, | ||
| 413 | }; | ||
| 414 | |||
| 415 | const FFFilter ff_vf_deinterlace_vaapi = { | ||
| 416 | .p.name = "deinterlace_vaapi", | ||
| 417 | .p.description = NULL_IF_CONFIG_SMALL("Deinterlacing of VAAPI surfaces"), | ||
| 418 | .p.priv_class = &deint_vaapi_class, | ||
| 419 | .priv_size = sizeof(DeintVAAPIContext), | ||
| 420 | .init = &deint_vaapi_init, | ||
| 421 | .uninit = &ff_vaapi_vpp_ctx_uninit, | ||
| 422 | FILTER_INPUTS(deint_vaapi_inputs), | ||
| 423 | FILTER_OUTPUTS(deint_vaapi_outputs), | ||
| 424 | FILTER_QUERY_FUNC2(&ff_vaapi_vpp_query_formats), | ||
| 425 | .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, | ||
| 426 | }; | ||
| 427 |