| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /* | ||
| 2 | * This file is part of FFmpeg. | ||
| 3 | * | ||
| 4 | * FFmpeg is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU Lesser General Public | ||
| 6 | * License as published by the Free Software Foundation; either | ||
| 7 | * version 2.1 of the License, or (at your option) any later version. | ||
| 8 | * | ||
| 9 | * FFmpeg is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 12 | * Lesser General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU Lesser General Public | ||
| 15 | * License along with FFmpeg; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | */ | ||
| 18 | #include <string.h> | ||
| 19 | |||
| 20 | #include "libavutil/opt.h" | ||
| 21 | #include "libavutil/pixdesc.h" | ||
| 22 | #include "libavutil/mastering_display_metadata.h" | ||
| 23 | |||
| 24 | #include "avfilter.h" | ||
| 25 | #include "filters.h" | ||
| 26 | #include "vaapi_vpp.h" | ||
| 27 | #include "video.h" | ||
| 28 | |||
| 29 | typedef struct HDRVAAPIContext { | ||
| 30 | VAAPIVPPContext vpp_ctx; // must be the first field | ||
| 31 | |||
| 32 | char *output_format_string; | ||
| 33 | |||
| 34 | char *color_primaries_string; | ||
| 35 | char *color_transfer_string; | ||
| 36 | char *color_matrix_string; | ||
| 37 | |||
| 38 | enum AVColorPrimaries color_primaries; | ||
| 39 | enum AVColorTransferCharacteristic color_transfer; | ||
| 40 | enum AVColorSpace color_matrix; | ||
| 41 | |||
| 42 | char *mastering_display; | ||
| 43 | char *content_light; | ||
| 44 | |||
| 45 | VAHdrMetaDataHDR10 in_metadata; | ||
| 46 | VAHdrMetaDataHDR10 out_metadata; | ||
| 47 | |||
| 48 | AVFrameSideData *src_display; | ||
| 49 | AVFrameSideData *src_light; | ||
| 50 | } HDRVAAPIContext; | ||
| 51 | |||
| 52 | ✗ | static int tonemap_vaapi_save_metadata(AVFilterContext *avctx, AVFrame *input_frame) | |
| 53 | { | ||
| 54 | ✗ | HDRVAAPIContext *ctx = avctx->priv; | |
| 55 | AVMasteringDisplayMetadata *hdr_meta; | ||
| 56 | AVContentLightMetadata *light_meta; | ||
| 57 | |||
| 58 | ✗ | if (input_frame->color_trc != AVCOL_TRC_SMPTE2084) { | |
| 59 | ✗ | av_log(avctx, AV_LOG_WARNING, "Only support HDR10 as input for vaapi tone-mapping\n"); | |
| 60 | } | ||
| 61 | |||
| 62 | ✗ | ctx->src_display = av_frame_get_side_data(input_frame, | |
| 63 | AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); | ||
| 64 | ✗ | if (ctx->src_display) { | |
| 65 | ✗ | hdr_meta = (AVMasteringDisplayMetadata *)ctx->src_display->data; | |
| 66 | ✗ | if (!hdr_meta) { | |
| 67 | ✗ | av_log(avctx, AV_LOG_ERROR, "No mastering display data\n"); | |
| 68 | ✗ | return AVERROR(EINVAL); | |
| 69 | } | ||
| 70 | |||
| 71 | ✗ | if (hdr_meta->has_luminance) { | |
| 72 | ✗ | const int luma_den = 10000; | |
| 73 | ✗ | ctx->in_metadata.max_display_mastering_luminance = | |
| 74 | ✗ | lrint(luma_den * av_q2d(hdr_meta->max_luminance)); | |
| 75 | ✗ | ctx->in_metadata.min_display_mastering_luminance = | |
| 76 | ✗ | FFMIN(lrint(luma_den * av_q2d(hdr_meta->min_luminance)), | |
| 77 | ctx->in_metadata.max_display_mastering_luminance); | ||
| 78 | |||
| 79 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 80 | "Mastering Display Metadata(in luminance):\n"); | ||
| 81 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 82 | "min_luminance=%u, max_luminance=%u\n", | ||
| 83 | ctx->in_metadata.min_display_mastering_luminance, | ||
| 84 | ctx->in_metadata.max_display_mastering_luminance); | ||
| 85 | } | ||
| 86 | |||
| 87 | ✗ | if (hdr_meta->has_primaries) { | |
| 88 | int i; | ||
| 89 | ✗ | const int mapping[3] = {1, 2, 0}; //green, blue, red | |
| 90 | ✗ | const int chroma_den = 50000; | |
| 91 | |||
| 92 | ✗ | for (i = 0; i < 3; i++) { | |
| 93 | ✗ | const int j = mapping[i]; | |
| 94 | ✗ | ctx->in_metadata.display_primaries_x[i] = | |
| 95 | ✗ | FFMIN(lrint(chroma_den * | |
| 96 | av_q2d(hdr_meta->display_primaries[j][0])), | ||
| 97 | chroma_den); | ||
| 98 | ✗ | ctx->in_metadata.display_primaries_y[i] = | |
| 99 | ✗ | FFMIN(lrint(chroma_den * | |
| 100 | av_q2d(hdr_meta->display_primaries[j][1])), | ||
| 101 | chroma_den); | ||
| 102 | } | ||
| 103 | |||
| 104 | ✗ | ctx->in_metadata.white_point_x = | |
| 105 | ✗ | FFMIN(lrint(chroma_den * av_q2d(hdr_meta->white_point[0])), | |
| 106 | chroma_den); | ||
| 107 | ✗ | ctx->in_metadata.white_point_y = | |
| 108 | ✗ | FFMIN(lrint(chroma_den * av_q2d(hdr_meta->white_point[1])), | |
| 109 | chroma_den); | ||
| 110 | |||
| 111 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 112 | "Mastering Display Metadata(in primaries):\n"); | ||
| 113 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 114 | "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n", | ||
| 115 | ✗ | ctx->in_metadata.display_primaries_x[0], | |
| 116 | ✗ | ctx->in_metadata.display_primaries_y[0], | |
| 117 | ✗ | ctx->in_metadata.display_primaries_x[1], | |
| 118 | ✗ | ctx->in_metadata.display_primaries_y[1], | |
| 119 | ✗ | ctx->in_metadata.display_primaries_x[2], | |
| 120 | ✗ | ctx->in_metadata.display_primaries_y[2], | |
| 121 | ✗ | ctx->in_metadata.white_point_x, | |
| 122 | ✗ | ctx->in_metadata.white_point_y); | |
| 123 | } | ||
| 124 | } else { | ||
| 125 | ✗ | av_log(avctx, AV_LOG_ERROR, "No mastering display data from input\n"); | |
| 126 | ✗ | return AVERROR(EINVAL); | |
| 127 | } | ||
| 128 | |||
| 129 | ✗ | ctx->src_light = av_frame_get_side_data(input_frame, | |
| 130 | AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); | ||
| 131 | ✗ | if (ctx->src_light) { | |
| 132 | ✗ | light_meta = (AVContentLightMetadata *)ctx->src_light->data; | |
| 133 | ✗ | if (!light_meta) { | |
| 134 | ✗ | av_log(avctx, AV_LOG_ERROR, "No light metadata\n"); | |
| 135 | ✗ | return AVERROR(EINVAL); | |
| 136 | } | ||
| 137 | |||
| 138 | ✗ | ctx->in_metadata.max_content_light_level = light_meta->MaxCLL; | |
| 139 | ✗ | ctx->in_metadata.max_pic_average_light_level = light_meta->MaxFALL; | |
| 140 | |||
| 141 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 142 | "Mastering Content Light Level (in):\n"); | ||
| 143 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 144 | "MaxCLL(%u) MaxFALL(%u)\n", | ||
| 145 | ✗ | ctx->in_metadata.max_content_light_level, | |
| 146 | ✗ | ctx->in_metadata.max_pic_average_light_level); | |
| 147 | } else { | ||
| 148 | ✗ | av_log(avctx, AV_LOG_DEBUG, "No content light level from input\n"); | |
| 149 | } | ||
| 150 | ✗ | return 0; | |
| 151 | } | ||
| 152 | |||
| 153 | ✗ | static int tonemap_vaapi_update_sidedata(AVFilterContext *avctx, AVFrame *output_frame) | |
| 154 | { | ||
| 155 | ✗ | HDRVAAPIContext *ctx = avctx->priv; | |
| 156 | AVFrameSideData *metadata; | ||
| 157 | AVMasteringDisplayMetadata *hdr_meta; | ||
| 158 | AVFrameSideData *metadata_lt; | ||
| 159 | AVContentLightMetadata *hdr_meta_lt; | ||
| 160 | int i; | ||
| 161 | ✗ | const int mapping[3] = {1, 2, 0}; //green, blue, red | |
| 162 | ✗ | const int chroma_den = 50000; | |
| 163 | ✗ | const int luma_den = 10000; | |
| 164 | |||
| 165 | ✗ | metadata = av_frame_new_side_data(output_frame, | |
| 166 | AV_FRAME_DATA_MASTERING_DISPLAY_METADATA, | ||
| 167 | sizeof(AVMasteringDisplayMetadata)); | ||
| 168 | ✗ | if (!metadata) | |
| 169 | ✗ | return AVERROR(ENOMEM); | |
| 170 | |||
| 171 | ✗ | hdr_meta = (AVMasteringDisplayMetadata *)metadata->data; | |
| 172 | |||
| 173 | ✗ | for (i = 0; i < 3; i++) { | |
| 174 | ✗ | const int j = mapping[i]; | |
| 175 | ✗ | hdr_meta->display_primaries[j][0].num = ctx->out_metadata.display_primaries_x[i]; | |
| 176 | ✗ | hdr_meta->display_primaries[j][0].den = chroma_den; | |
| 177 | |||
| 178 | ✗ | hdr_meta->display_primaries[j][1].num = ctx->out_metadata.display_primaries_y[i]; | |
| 179 | ✗ | hdr_meta->display_primaries[j][1].den = chroma_den; | |
| 180 | } | ||
| 181 | |||
| 182 | ✗ | hdr_meta->white_point[0].num = ctx->out_metadata.white_point_x; | |
| 183 | ✗ | hdr_meta->white_point[0].den = chroma_den; | |
| 184 | |||
| 185 | ✗ | hdr_meta->white_point[1].num = ctx->out_metadata.white_point_y; | |
| 186 | ✗ | hdr_meta->white_point[1].den = chroma_den; | |
| 187 | ✗ | hdr_meta->has_primaries = 1; | |
| 188 | |||
| 189 | ✗ | hdr_meta->max_luminance.num = ctx->out_metadata.max_display_mastering_luminance; | |
| 190 | ✗ | hdr_meta->max_luminance.den = luma_den; | |
| 191 | |||
| 192 | ✗ | hdr_meta->min_luminance.num = ctx->out_metadata.min_display_mastering_luminance; | |
| 193 | ✗ | hdr_meta->min_luminance.den = luma_den; | |
| 194 | ✗ | hdr_meta->has_luminance = 1; | |
| 195 | |||
| 196 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 197 | "Mastering display colour volume(out):\n"); | ||
| 198 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 199 | "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n", | ||
| 200 | ✗ | ctx->out_metadata.display_primaries_x[0], | |
| 201 | ✗ | ctx->out_metadata.display_primaries_y[0], | |
| 202 | ✗ | ctx->out_metadata.display_primaries_x[1], | |
| 203 | ✗ | ctx->out_metadata.display_primaries_y[1], | |
| 204 | ✗ | ctx->out_metadata.display_primaries_x[2], | |
| 205 | ✗ | ctx->out_metadata.display_primaries_y[2], | |
| 206 | ✗ | ctx->out_metadata.white_point_x, | |
| 207 | ✗ | ctx->out_metadata.white_point_y); | |
| 208 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 209 | "max_display_mastering_luminance=%u, min_display_mastering_luminance=%u\n", | ||
| 210 | ctx->out_metadata.max_display_mastering_luminance, | ||
| 211 | ctx->out_metadata.min_display_mastering_luminance); | ||
| 212 | |||
| 213 | ✗ | metadata_lt = av_frame_new_side_data(output_frame, | |
| 214 | AV_FRAME_DATA_CONTENT_LIGHT_LEVEL, | ||
| 215 | sizeof(AVContentLightMetadata)); | ||
| 216 | ✗ | if (!metadata_lt) | |
| 217 | ✗ | return AVERROR(ENOMEM); | |
| 218 | |||
| 219 | ✗ | hdr_meta_lt = (AVContentLightMetadata *)metadata_lt->data; | |
| 220 | |||
| 221 | ✗ | hdr_meta_lt->MaxCLL = FFMIN(ctx->out_metadata.max_content_light_level, 65535); | |
| 222 | ✗ | hdr_meta_lt->MaxFALL = FFMIN(ctx->out_metadata.max_pic_average_light_level, 65535); | |
| 223 | |||
| 224 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 225 | "Content light level information(out):\n"); | ||
| 226 | ✗ | av_log(avctx, AV_LOG_DEBUG, | |
| 227 | "MaxCLL(%u) MaxFALL(%u)\n", | ||
| 228 | ✗ | ctx->out_metadata.max_content_light_level, | |
| 229 | ✗ | ctx->out_metadata.max_pic_average_light_level); | |
| 230 | |||
| 231 | ✗ | return 0; | |
| 232 | } | ||
| 233 | |||
| 234 | ✗ | static int tonemap_vaapi_set_filter_params(AVFilterContext *avctx, AVFrame *input_frame) | |
| 235 | { | ||
| 236 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 237 | ✗ | HDRVAAPIContext *ctx = avctx->priv; | |
| 238 | VAStatus vas; | ||
| 239 | VAProcFilterParameterBufferHDRToneMapping *hdrtm_param; | ||
| 240 | |||
| 241 | ✗ | vas = vaMapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0], | |
| 242 | (void**)&hdrtm_param); | ||
| 243 | ✗ | if (vas != VA_STATUS_SUCCESS) { | |
| 244 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to map " | |
| 245 | "buffer (%d): %d (%s).\n", | ||
| 246 | vpp_ctx->filter_buffers[0], vas, vaErrorStr(vas)); | ||
| 247 | ✗ | return AVERROR(EIO); | |
| 248 | } | ||
| 249 | |||
| 250 | ✗ | memcpy(hdrtm_param->data.metadata, &ctx->in_metadata, sizeof(VAHdrMetaDataHDR10)); | |
| 251 | |||
| 252 | ✗ | vas = vaUnmapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0]); | |
| 253 | ✗ | if (vas != VA_STATUS_SUCCESS) { | |
| 254 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: " | |
| 255 | "%d (%s).\n", vas, vaErrorStr(vas)); | ||
| 256 | ✗ | return AVERROR(EIO); | |
| 257 | } | ||
| 258 | |||
| 259 | ✗ | return 0; | |
| 260 | } | ||
| 261 | |||
| 262 | ✗ | static int tonemap_vaapi_build_filter_params(AVFilterContext *avctx) | |
| 263 | { | ||
| 264 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 265 | ✗ | HDRVAAPIContext *ctx = avctx->priv; | |
| 266 | VAStatus vas; | ||
| 267 | VAProcFilterParameterBufferHDRToneMapping hdrtm_param; | ||
| 268 | VAProcFilterCapHighDynamicRange hdr_cap[VAProcHighDynamicRangeMetadataTypeCount]; | ||
| 269 | int num_query_caps; | ||
| 270 | int i; | ||
| 271 | |||
| 272 | ✗ | memset(&hdrtm_param, 0, sizeof(hdrtm_param)); | |
| 273 | ✗ | memset(&ctx->in_metadata, 0, sizeof(ctx->in_metadata)); | |
| 274 | |||
| 275 | ✗ | num_query_caps = VAProcHighDynamicRangeMetadataTypeCount; | |
| 276 | ✗ | vas = vaQueryVideoProcFilterCaps(vpp_ctx->hwctx->display, | |
| 277 | vpp_ctx->va_context, | ||
| 278 | VAProcFilterHighDynamicRangeToneMapping, | ||
| 279 | &hdr_cap, &num_query_caps); | ||
| 280 | ✗ | if (vas != VA_STATUS_SUCCESS) { | |
| 281 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to query HDR caps " | |
| 282 | "context: %d (%s).\n", vas, vaErrorStr(vas)); | ||
| 283 | ✗ | return AVERROR(EIO); | |
| 284 | } | ||
| 285 | |||
| 286 | ✗ | for (i = 0; i < num_query_caps; i++) { | |
| 287 | ✗ | if (hdr_cap[i].metadata_type != VAProcHighDynamicRangeMetadataNone) | |
| 288 | ✗ | break; | |
| 289 | } | ||
| 290 | |||
| 291 | ✗ | if (i >= num_query_caps) { | |
| 292 | ✗ | av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support HDR\n"); | |
| 293 | ✗ | return AVERROR(EINVAL); | |
| 294 | } | ||
| 295 | |||
| 296 | ✗ | if (ctx->mastering_display) { | |
| 297 | ✗ | for (i = 0; i < num_query_caps; i++) { | |
| 298 | ✗ | if (VA_TONE_MAPPING_HDR_TO_HDR & hdr_cap[i].caps_flag) | |
| 299 | ✗ | break; | |
| 300 | } | ||
| 301 | ✗ | if (i >= num_query_caps) { | |
| 302 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
| 303 | "VAAPI driver doesn't support HDR to HDR\n"); | ||
| 304 | ✗ | return AVERROR(EINVAL); | |
| 305 | } | ||
| 306 | } else { | ||
| 307 | ✗ | for (i = 0; i < num_query_caps; i++) { | |
| 308 | ✗ | if (VA_TONE_MAPPING_HDR_TO_SDR & hdr_cap[i].caps_flag) | |
| 309 | ✗ | break; | |
| 310 | } | ||
| 311 | ✗ | if (i >= num_query_caps) { | |
| 312 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
| 313 | "VAAPI driver doesn't support HDR to SDR\n"); | ||
| 314 | ✗ | return AVERROR(EINVAL); | |
| 315 | } | ||
| 316 | } | ||
| 317 | |||
| 318 | ✗ | hdrtm_param.type = VAProcFilterHighDynamicRangeToneMapping; | |
| 319 | ✗ | hdrtm_param.data.metadata_type = VAProcHighDynamicRangeMetadataHDR10; | |
| 320 | ✗ | hdrtm_param.data.metadata = &ctx->in_metadata; | |
| 321 | ✗ | hdrtm_param.data.metadata_size = sizeof(VAHdrMetaDataHDR10); | |
| 322 | |||
| 323 | ✗ | return ff_vaapi_vpp_make_param_buffers(avctx, | |
| 324 | VAProcFilterParameterBufferType, | ||
| 325 | &hdrtm_param, sizeof(hdrtm_param), 1); | ||
| 326 | } | ||
| 327 | |||
| 328 | ✗ | static int tonemap_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) | |
| 329 | { | ||
| 330 | ✗ | AVFilterContext *avctx = inlink->dst; | |
| 331 | ✗ | AVFilterLink *outlink = avctx->outputs[0]; | |
| 332 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 333 | ✗ | HDRVAAPIContext *ctx = avctx->priv; | |
| 334 | ✗ | AVFrame *output_frame = NULL; | |
| 335 | VASurfaceID input_surface, output_surface; | ||
| 336 | |||
| 337 | VAProcPipelineParameterBuffer params; | ||
| 338 | int err; | ||
| 339 | |||
| 340 | VAHdrMetaData out_hdr_metadata; | ||
| 341 | |||
| 342 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", | |
| 343 | ✗ | av_get_pix_fmt_name(input_frame->format), | |
| 344 | ✗ | input_frame->width, input_frame->height, input_frame->pts); | |
| 345 | |||
| 346 | ✗ | if (vpp_ctx->va_context == VA_INVALID_ID){ | |
| 347 | ✗ | av_frame_free(&input_frame); | |
| 348 | ✗ | return AVERROR(EINVAL); | |
| 349 | } | ||
| 350 | |||
| 351 | ✗ | err = tonemap_vaapi_save_metadata(avctx, input_frame); | |
| 352 | ✗ | if (err < 0) | |
| 353 | ✗ | goto fail; | |
| 354 | |||
| 355 | ✗ | err = tonemap_vaapi_set_filter_params(avctx, input_frame); | |
| 356 | ✗ | if (err < 0) | |
| 357 | ✗ | goto fail; | |
| 358 | |||
| 359 | ✗ | input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; | |
| 360 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for tonemap vpp input.\n", | |
| 361 | input_surface); | ||
| 362 | |||
| 363 | ✗ | output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width, | |
| 364 | vpp_ctx->output_height); | ||
| 365 | ✗ | if (!output_frame) { | |
| 366 | ✗ | err = AVERROR(ENOMEM); | |
| 367 | ✗ | goto fail; | |
| 368 | } | ||
| 369 | |||
| 370 | ✗ | output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; | |
| 371 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for tonemap vpp output.\n", | |
| 372 | output_surface); | ||
| 373 | ✗ | memset(¶ms, 0, sizeof(params)); | |
| 374 | |||
| 375 | ✗ | err = av_frame_copy_props(output_frame, input_frame); | |
| 376 | ✗ | if (err < 0) | |
| 377 | ✗ | goto fail; | |
| 378 | |||
| 379 | ✗ | av_frame_remove_side_data(output_frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); | |
| 380 | ✗ | av_frame_remove_side_data(output_frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); | |
| 381 | |||
| 382 | ✗ | if (!ctx->mastering_display) { | |
| 383 | /* Use BT709 by default for HDR to SDR output frame */ | ||
| 384 | ✗ | output_frame->color_primaries = AVCOL_PRI_BT709; | |
| 385 | ✗ | output_frame->color_trc = AVCOL_TRC_BT709; | |
| 386 | ✗ | output_frame->colorspace = AVCOL_SPC_BT709; | |
| 387 | } | ||
| 388 | |||
| 389 | ✗ | if (ctx->color_primaries != AVCOL_PRI_UNSPECIFIED) | |
| 390 | ✗ | output_frame->color_primaries = ctx->color_primaries; | |
| 391 | |||
| 392 | ✗ | if (ctx->color_transfer != AVCOL_TRC_UNSPECIFIED) | |
| 393 | ✗ | output_frame->color_trc = ctx->color_transfer; | |
| 394 | |||
| 395 | ✗ | if (ctx->color_matrix != AVCOL_SPC_UNSPECIFIED) | |
| 396 | ✗ | output_frame->colorspace = ctx->color_matrix; | |
| 397 | |||
| 398 | ✗ | if (ctx->mastering_display) { | |
| 399 | ✗ | err = tonemap_vaapi_update_sidedata(avctx, output_frame); | |
| 400 | ✗ | if (err < 0) | |
| 401 | ✗ | goto fail; | |
| 402 | } | ||
| 403 | |||
| 404 | ✗ | err = ff_vaapi_vpp_init_params(avctx, ¶ms, | |
| 405 | input_frame, output_frame); | ||
| 406 | ✗ | if (err < 0) | |
| 407 | ✗ | goto fail; | |
| 408 | |||
| 409 | ✗ | if (ctx->mastering_display) { | |
| 410 | ✗ | out_hdr_metadata.metadata_type = VAProcHighDynamicRangeMetadataHDR10; | |
| 411 | ✗ | out_hdr_metadata.metadata = &ctx->out_metadata; | |
| 412 | ✗ | out_hdr_metadata.metadata_size = sizeof(VAHdrMetaDataHDR10); | |
| 413 | ✗ | params.output_hdr_metadata = &out_hdr_metadata; | |
| 414 | } | ||
| 415 | |||
| 416 | ✗ | if (vpp_ctx->nb_filter_buffers) { | |
| 417 | ✗ | params.filters = &vpp_ctx->filter_buffers[0]; | |
| 418 | ✗ | params.num_filters = vpp_ctx->nb_filter_buffers; | |
| 419 | } | ||
| 420 | |||
| 421 | ✗ | err = ff_vaapi_vpp_render_picture(avctx, ¶ms, output_frame); | |
| 422 | ✗ | if (err < 0) | |
| 423 | ✗ | goto fail; | |
| 424 | |||
| 425 | ✗ | av_frame_free(&input_frame); | |
| 426 | |||
| 427 | ✗ | av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", | |
| 428 | ✗ | av_get_pix_fmt_name(output_frame->format), | |
| 429 | ✗ | output_frame->width, output_frame->height, output_frame->pts); | |
| 430 | |||
| 431 | ✗ | return ff_filter_frame(outlink, output_frame); | |
| 432 | |||
| 433 | ✗ | fail: | |
| 434 | ✗ | av_frame_free(&input_frame); | |
| 435 | ✗ | av_frame_free(&output_frame); | |
| 436 | ✗ | return err; | |
| 437 | } | ||
| 438 | |||
| 439 | ✗ | static av_cold int tonemap_vaapi_init(AVFilterContext *avctx) | |
| 440 | { | ||
| 441 | ✗ | VAAPIVPPContext *vpp_ctx = avctx->priv; | |
| 442 | ✗ | HDRVAAPIContext *ctx = avctx->priv; | |
| 443 | |||
| 444 | ✗ | ff_vaapi_vpp_ctx_init(avctx); | |
| 445 | ✗ | vpp_ctx->build_filter_params = tonemap_vaapi_build_filter_params; | |
| 446 | ✗ | vpp_ctx->pipeline_uninit = ff_vaapi_vpp_pipeline_uninit; | |
| 447 | |||
| 448 | ✗ | if (ctx->output_format_string) { | |
| 449 | ✗ | vpp_ctx->output_format = av_get_pix_fmt(ctx->output_format_string); | |
| 450 | } else { | ||
| 451 | ✗ | if (ctx->mastering_display) { | |
| 452 | ✗ | vpp_ctx->output_format = AV_PIX_FMT_P010; | |
| 453 | ✗ | av_log(avctx, AV_LOG_VERBOSE, "Output format not set, use default format P010 for HDR to HDR tone mapping.\n"); | |
| 454 | } else { | ||
| 455 | ✗ | vpp_ctx->output_format = AV_PIX_FMT_NV12; | |
| 456 | ✗ | av_log(avctx, AV_LOG_VERBOSE, "Output format not set, use default format NV12 for HDR to SDR tone mapping.\n"); | |
| 457 | } | ||
| 458 | } | ||
| 459 | |||
| 460 | #define STRING_OPTION(var_name, func_name, default_value) do { \ | ||
| 461 | if (ctx->var_name ## _string) { \ | ||
| 462 | int var = av_ ## func_name ## _from_name(ctx->var_name ## _string); \ | ||
| 463 | if (var < 0) { \ | ||
| 464 | av_log(avctx, AV_LOG_ERROR, "Invalid %s.\n", #var_name); \ | ||
| 465 | return AVERROR(EINVAL); \ | ||
| 466 | } \ | ||
| 467 | ctx->var_name = var; \ | ||
| 468 | } else { \ | ||
| 469 | ctx->var_name = default_value; \ | ||
| 470 | } \ | ||
| 471 | } while (0) | ||
| 472 | |||
| 473 | ✗ | STRING_OPTION(color_primaries, color_primaries, AVCOL_PRI_UNSPECIFIED); | |
| 474 | ✗ | STRING_OPTION(color_transfer, color_transfer, AVCOL_TRC_UNSPECIFIED); | |
| 475 | ✗ | STRING_OPTION(color_matrix, color_space, AVCOL_SPC_UNSPECIFIED); | |
| 476 | |||
| 477 | ✗ | if (ctx->mastering_display) { | |
| 478 | ✗ | if (10 != sscanf(ctx->mastering_display, | |
| 479 | "%hu %hu|%hu %hu|%hu %hu|%hu %hu|%u %u", | ||
| 480 | &ctx->out_metadata.display_primaries_x[0], | ||
| 481 | &ctx->out_metadata.display_primaries_y[0], | ||
| 482 | &ctx->out_metadata.display_primaries_x[1], | ||
| 483 | &ctx->out_metadata.display_primaries_y[1], | ||
| 484 | &ctx->out_metadata.display_primaries_x[2], | ||
| 485 | &ctx->out_metadata.display_primaries_y[2], | ||
| 486 | &ctx->out_metadata.white_point_x, | ||
| 487 | &ctx->out_metadata.white_point_y, | ||
| 488 | &ctx->out_metadata.min_display_mastering_luminance, | ||
| 489 | &ctx->out_metadata.max_display_mastering_luminance)) { | ||
| 490 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
| 491 | "Option mastering-display input invalid\n"); | ||
| 492 | ✗ | return AVERROR(EINVAL); | |
| 493 | } | ||
| 494 | |||
| 495 | ✗ | if (!ctx->content_light) { | |
| 496 | ✗ | ctx->out_metadata.max_content_light_level = 0; | |
| 497 | ✗ | ctx->out_metadata.max_pic_average_light_level = 0; | |
| 498 | ✗ | } else if (2 != sscanf(ctx->content_light, | |
| 499 | "%hu %hu", | ||
| 500 | &ctx->out_metadata.max_content_light_level, | ||
| 501 | &ctx->out_metadata.max_pic_average_light_level)) { | ||
| 502 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
| 503 | "Option content-light input invalid\n"); | ||
| 504 | ✗ | return AVERROR(EINVAL); | |
| 505 | } | ||
| 506 | } | ||
| 507 | |||
| 508 | ✗ | return 0; | |
| 509 | } | ||
| 510 | |||
| 511 | #define OFFSET(x) offsetof(HDRVAAPIContext, x) | ||
| 512 | #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM) | ||
| 513 | static const AVOption tonemap_vaapi_options[] = { | ||
| 514 | { "format", "Output pixel format set", OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS, .unit = "format" }, | ||
| 515 | { "matrix", "Output color matrix coefficient set", | ||
| 516 | OFFSET(color_matrix_string), AV_OPT_TYPE_STRING, | ||
| 517 | { .str = NULL }, .flags = FLAGS, .unit = "matrix" }, | ||
| 518 | { "m", "Output color matrix coefficient set", | ||
| 519 | OFFSET(color_matrix_string), AV_OPT_TYPE_STRING, | ||
| 520 | { .str = NULL }, .flags = FLAGS, .unit = "matrix" }, | ||
| 521 | { "primaries", "Output color primaries set", | ||
| 522 | OFFSET(color_primaries_string), AV_OPT_TYPE_STRING, | ||
| 523 | { .str = NULL }, .flags = FLAGS, .unit = "primaries" }, | ||
| 524 | { "p", "Output color primaries set", | ||
| 525 | OFFSET(color_primaries_string), AV_OPT_TYPE_STRING, | ||
| 526 | { .str = NULL }, .flags = FLAGS, .unit = "primaries" }, | ||
| 527 | { "transfer", "Output color transfer characteristics set", | ||
| 528 | OFFSET(color_transfer_string), AV_OPT_TYPE_STRING, | ||
| 529 | { .str = NULL }, .flags = FLAGS, .unit = "transfer" }, | ||
| 530 | { "t", "Output color transfer characteristics set", | ||
| 531 | OFFSET(color_transfer_string), AV_OPT_TYPE_STRING, | ||
| 532 | { .str = NULL }, .flags = FLAGS, .unit = "transfer" }, | ||
| 533 | { "display", "set mastering display colour volume", | ||
| 534 | OFFSET(mastering_display), AV_OPT_TYPE_STRING, | ||
| 535 | { .str = NULL }, .flags = FLAGS }, | ||
| 536 | { "light", "set content light level information", | ||
| 537 | OFFSET(content_light), AV_OPT_TYPE_STRING, | ||
| 538 | { .str = NULL }, .flags = FLAGS }, | ||
| 539 | { NULL } | ||
| 540 | }; | ||
| 541 | |||
| 542 | |||
| 543 | AVFILTER_DEFINE_CLASS(tonemap_vaapi); | ||
| 544 | |||
| 545 | static const AVFilterPad tonemap_vaapi_inputs[] = { | ||
| 546 | { | ||
| 547 | .name = "default", | ||
| 548 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 549 | .filter_frame = &tonemap_vaapi_filter_frame, | ||
| 550 | .config_props = &ff_vaapi_vpp_config_input, | ||
| 551 | }, | ||
| 552 | }; | ||
| 553 | |||
| 554 | static const AVFilterPad tonemap_vaapi_outputs[] = { | ||
| 555 | { | ||
| 556 | .name = "default", | ||
| 557 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 558 | .config_props = &ff_vaapi_vpp_config_output, | ||
| 559 | }, | ||
| 560 | }; | ||
| 561 | |||
| 562 | const FFFilter ff_vf_tonemap_vaapi = { | ||
| 563 | .p.name = "tonemap_vaapi", | ||
| 564 | .p.description = NULL_IF_CONFIG_SMALL("VAAPI VPP for tone-mapping"), | ||
| 565 | .p.priv_class = &tonemap_vaapi_class, | ||
| 566 | .priv_size = sizeof(HDRVAAPIContext), | ||
| 567 | .init = &tonemap_vaapi_init, | ||
| 568 | .uninit = &ff_vaapi_vpp_ctx_uninit, | ||
| 569 | FILTER_INPUTS(tonemap_vaapi_inputs), | ||
| 570 | FILTER_OUTPUTS(tonemap_vaapi_outputs), | ||
| 571 | FILTER_QUERY_FUNC2(&ff_vaapi_vpp_query_formats), | ||
| 572 | .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, | ||
| 573 | }; | ||
| 574 |