LCOV - code coverage report
Current view: top level - libavcodec - utvideoenc.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 251 304 82.6 %
Date: 2018-05-20 11:54:08 Functions: 10 10 100.0 %

          Line data    Source code
       1             : /*
       2             :  * Ut Video encoder
       3             :  * Copyright (c) 2012 Jan Ekström
       4             :  *
       5             :  * This file is part of FFmpeg.
       6             :  *
       7             :  * FFmpeg is free software; you can redistribute it and/or
       8             :  * modify it under the terms of the GNU Lesser General Public
       9             :  * License as published by the Free Software Foundation; either
      10             :  * version 2.1 of the License, or (at your option) any later version.
      11             :  *
      12             :  * FFmpeg is distributed in the hope that it will be useful,
      13             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      14             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      15             :  * Lesser General Public License for more details.
      16             :  *
      17             :  * You should have received a copy of the GNU Lesser General Public
      18             :  * License along with FFmpeg; if not, write to the Free Software
      19             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
      20             :  */
      21             : 
      22             : /**
      23             :  * @file
      24             :  * Ut Video encoder
      25             :  */
      26             : 
      27             : #include "libavutil/imgutils.h"
      28             : #include "libavutil/intreadwrite.h"
      29             : #include "libavutil/opt.h"
      30             : 
      31             : #include "avcodec.h"
      32             : #include "internal.h"
      33             : #include "bswapdsp.h"
      34             : #include "bytestream.h"
      35             : #include "put_bits.h"
      36             : #include "mathops.h"
      37             : #include "utvideo.h"
      38             : #include "huffman.h"
      39             : 
      40             : /* Compare huffentry symbols */
      41     3154892 : static int huff_cmp_sym(const void *a, const void *b)
      42             : {
      43     3154892 :     const HuffEntry *aa = a, *bb = b;
      44     3154892 :     return aa->sym - bb->sym;
      45             : }
      46             : 
      47         135 : static av_cold int utvideo_encode_close(AVCodecContext *avctx)
      48             : {
      49         135 :     UtvideoContext *c = avctx->priv_data;
      50             :     int i;
      51             : 
      52         135 :     av_freep(&c->slice_bits);
      53         675 :     for (i = 0; i < 4; i++)
      54         540 :         av_freep(&c->slice_buffer[i]);
      55             : 
      56         135 :     return 0;
      57             : }
      58             : 
      59         135 : static av_cold int utvideo_encode_init(AVCodecContext *avctx)
      60             : {
      61         135 :     UtvideoContext *c = avctx->priv_data;
      62             :     int i, subsampled_height;
      63             :     uint32_t original_format;
      64             : 
      65         135 :     c->avctx           = avctx;
      66         135 :     c->frame_info_size = 4;
      67         135 :     c->slice_stride    = FFALIGN(avctx->width, 32);
      68             : 
      69         135 :     switch (avctx->pix_fmt) {
      70          27 :     case AV_PIX_FMT_GBRP:
      71          27 :         c->planes        = 3;
      72          27 :         avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
      73          27 :         original_format  = UTVIDEO_RGB;
      74          27 :         break;
      75          27 :     case AV_PIX_FMT_GBRAP:
      76          27 :         c->planes        = 4;
      77          27 :         avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
      78          27 :         original_format  = UTVIDEO_RGBA;
      79          27 :         avctx->bits_per_coded_sample = 32;
      80          27 :         break;
      81          27 :     case AV_PIX_FMT_YUV420P:
      82          27 :         if (avctx->width & 1 || avctx->height & 1) {
      83           0 :             av_log(avctx, AV_LOG_ERROR,
      84             :                    "4:2:0 video requires even width and height.\n");
      85           0 :             return AVERROR_INVALIDDATA;
      86             :         }
      87          27 :         c->planes        = 3;
      88          27 :         if (avctx->colorspace == AVCOL_SPC_BT709)
      89           0 :             avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
      90             :         else
      91          27 :             avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
      92          27 :         original_format  = UTVIDEO_420;
      93          27 :         break;
      94          27 :     case AV_PIX_FMT_YUV422P:
      95          27 :         if (avctx->width & 1) {
      96           0 :             av_log(avctx, AV_LOG_ERROR,
      97             :                    "4:2:2 video requires even width.\n");
      98           0 :             return AVERROR_INVALIDDATA;
      99             :         }
     100          27 :         c->planes        = 3;
     101          27 :         if (avctx->colorspace == AVCOL_SPC_BT709)
     102           0 :             avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
     103             :         else
     104          27 :             avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
     105          27 :         original_format  = UTVIDEO_422;
     106          27 :         break;
     107          27 :     case AV_PIX_FMT_YUV444P:
     108          27 :         c->planes        = 3;
     109          27 :         if (avctx->colorspace == AVCOL_SPC_BT709)
     110           0 :             avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
     111             :         else
     112          27 :             avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
     113          27 :         original_format  = UTVIDEO_444;
     114          27 :         break;
     115           0 :     default:
     116           0 :         av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
     117           0 :                avctx->pix_fmt);
     118           0 :         return AVERROR_INVALIDDATA;
     119             :     }
     120             : 
     121         135 :     ff_bswapdsp_init(&c->bdsp);
     122         135 :     ff_llvidencdsp_init(&c->llvidencdsp);
     123             : 
     124             : #if FF_API_PRIVATE_OPT
     125             : FF_DISABLE_DEPRECATION_WARNINGS
     126             :     /* Check the prediction method, and error out if unsupported */
     127         135 :     if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
     128           0 :         av_log(avctx, AV_LOG_WARNING,
     129             :                "Prediction method %d is not supported in Ut Video.\n",
     130             :                avctx->prediction_method);
     131           0 :         return AVERROR_OPTION_NOT_FOUND;
     132             :     }
     133             : 
     134         135 :     if (avctx->prediction_method == FF_PRED_PLANE) {
     135           0 :         av_log(avctx, AV_LOG_ERROR,
     136             :                "Plane prediction is not supported in Ut Video.\n");
     137           0 :         return AVERROR_OPTION_NOT_FOUND;
     138             :     }
     139             : 
     140             :     /* Convert from libavcodec prediction type to Ut Video's */
     141         135 :     if (avctx->prediction_method)
     142           0 :         c->frame_pred = ff_ut_pred_order[avctx->prediction_method];
     143             : FF_ENABLE_DEPRECATION_WARNINGS
     144             : #endif
     145             : 
     146         135 :     if (c->frame_pred == PRED_GRADIENT) {
     147           0 :         av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
     148           0 :         return AVERROR_OPTION_NOT_FOUND;
     149             :     }
     150             : 
     151             :     /*
     152             :      * Check the asked slice count for obviously invalid
     153             :      * values (> 256 or negative).
     154             :      */
     155         135 :     if (avctx->slices > 256 || avctx->slices < 0) {
     156           0 :         av_log(avctx, AV_LOG_ERROR,
     157             :                "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
     158             :                avctx->slices);
     159           0 :         return AVERROR(EINVAL);
     160             :     }
     161             : 
     162             :     /* Check that the slice count is not larger than the subsampled height */
     163         135 :     subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
     164         135 :     if (avctx->slices > subsampled_height) {
     165           0 :         av_log(avctx, AV_LOG_ERROR,
     166             :                "Slice count %d is larger than the subsampling-applied height %d.\n",
     167             :                avctx->slices, subsampled_height);
     168           0 :         return AVERROR(EINVAL);
     169             :     }
     170             : 
     171             :     /* extradata size is 4 * 32 bits */
     172         135 :     avctx->extradata_size = 16;
     173             : 
     174         135 :     avctx->extradata = av_mallocz(avctx->extradata_size +
     175             :                                   AV_INPUT_BUFFER_PADDING_SIZE);
     176             : 
     177         135 :     if (!avctx->extradata) {
     178           0 :         av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
     179           0 :         utvideo_encode_close(avctx);
     180           0 :         return AVERROR(ENOMEM);
     181             :     }
     182             : 
     183         567 :     for (i = 0; i < c->planes; i++) {
     184         432 :         c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
     185             :                                        AV_INPUT_BUFFER_PADDING_SIZE);
     186         432 :         if (!c->slice_buffer[i]) {
     187           0 :             av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
     188           0 :             utvideo_encode_close(avctx);
     189           0 :             return AVERROR(ENOMEM);
     190             :         }
     191             :     }
     192             : 
     193             :     /*
     194             :      * Set the version of the encoder.
     195             :      * Last byte is "implementation ID", which is
     196             :      * obtained from the creator of the format.
     197             :      * Libavcodec has been assigned with the ID 0xF0.
     198             :      */
     199         135 :     AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
     200             : 
     201             :     /*
     202             :      * Set the "original format"
     203             :      * Not used for anything during decoding.
     204             :      */
     205         135 :     AV_WL32(avctx->extradata + 4, original_format);
     206             : 
     207             :     /* Write 4 as the 'frame info size' */
     208         135 :     AV_WL32(avctx->extradata + 8, c->frame_info_size);
     209             : 
     210             :     /*
     211             :      * Set how many slices are going to be used.
     212             :      * By default uses multiple slices depending on the subsampled height.
     213             :      * This enables multithreading in the official decoder.
     214             :      */
     215         135 :     if (!avctx->slices) {
     216           0 :         c->slices = subsampled_height / 120;
     217             : 
     218           0 :         if (!c->slices)
     219           0 :             c->slices = 1;
     220           0 :         else if (c->slices > 256)
     221           0 :             c->slices = 256;
     222             :     } else {
     223         135 :         c->slices = avctx->slices;
     224             :     }
     225             : 
     226             :     /* Set compression mode */
     227         135 :     c->compression = COMP_HUFF;
     228             : 
     229             :     /*
     230             :      * Set the encoding flags:
     231             :      * - Slice count minus 1
     232             :      * - Interlaced encoding mode flag, set to zero for now.
     233             :      * - Compression mode (none/huff)
     234             :      * And write the flags.
     235             :      */
     236         135 :     c->flags  = (c->slices - 1) << 24;
     237         135 :     c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
     238         135 :     c->flags |= c->compression;
     239             : 
     240         135 :     AV_WL32(avctx->extradata + 12, c->flags);
     241             : 
     242         135 :     return 0;
     243             : }
     244             : 
     245         300 : static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
     246             :                               uint8_t *const src[4], int planes, const int stride[4],
     247             :                               int width, int height)
     248             : {
     249             :     int i, j;
     250         300 :     int k = 2 * dst_stride;
     251         300 :     const uint8_t *sg = src[0];
     252         300 :     const uint8_t *sb = src[1];
     253         300 :     const uint8_t *sr = src[2];
     254         300 :     const uint8_t *sa = src[3];
     255             :     unsigned int g;
     256             : 
     257       86700 :     for (j = 0; j < height; j++) {
     258       86400 :         if (planes == 3) {
     259    15249600 :             for (i = 0; i < width; i++) {
     260    15206400 :                 g         = sg[i];
     261    15206400 :                 dst[0][k] = g;
     262    15206400 :                 g        += 0x80;
     263    15206400 :                 dst[1][k] = sb[i] - g;
     264    15206400 :                 dst[2][k] = sr[i] - g;
     265    15206400 :                 k++;
     266             :             }
     267             :         } else {
     268    15249600 :             for (i = 0; i < width; i++) {
     269    15206400 :                 g         = sg[i];
     270    15206400 :                 dst[0][k] = g;
     271    15206400 :                 g        += 0x80;
     272    15206400 :                 dst[1][k] = sb[i] - g;
     273    15206400 :                 dst[2][k] = sr[i] - g;
     274    15206400 :                 dst[3][k] = sa[i];
     275    15206400 :                 k++;
     276             :             }
     277       43200 :             sa += stride[3];
     278             :         }
     279       86400 :         k += dst_stride - width;
     280       86400 :         sg += stride[0];
     281       86400 :         sb += stride[1];
     282       86400 :         sr += stride[2];
     283             :     }
     284         300 : }
     285             : 
     286             : #undef A
     287             : #undef B
     288             : 
     289             : /* Write data to a plane with median prediction */
     290         800 : static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst,
     291             :                            ptrdiff_t stride, int width, int height)
     292             : {
     293             :     int i, j;
     294             :     int A, B;
     295             :     uint8_t prev;
     296             : 
     297             :     /* First line uses left neighbour prediction */
     298         800 :     prev = 0x80; /* Set the initial value */
     299      247200 :     for (i = 0; i < width; i++) {
     300      246400 :         *dst++ = src[i] - prev;
     301      246400 :         prev   = src[i];
     302             :     }
     303             : 
     304         800 :     if (height == 1)
     305           0 :         return;
     306             : 
     307         800 :     src += stride;
     308             : 
     309             :     /*
     310             :      * Second line uses top prediction for the first sample,
     311             :      * and median for the rest.
     312             :      */
     313         800 :     A = B = 0;
     314             : 
     315             :     /* Rest of the coded part uses median prediction */
     316      216000 :     for (j = 1; j < height; j++) {
     317      215200 :         c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
     318      215200 :         dst += width;
     319      215200 :         src += stride;
     320             :     }
     321             : }
     322             : 
     323             : /* Count the usage of values in a plane */
     324        2400 : static void count_usage(uint8_t *src, int width,
     325             :                         int height, uint64_t *counts)
     326             : {
     327             :     int i, j;
     328             : 
     329      650400 :     for (j = 0; j < height; j++) {
     330   205934400 :         for (i = 0; i < width; i++) {
     331   205286400 :             counts[src[i]]++;
     332             :         }
     333      648000 :         src += width;
     334             :     }
     335        2400 : }
     336             : 
     337             : /* Calculate the actual huffman codes from the code lengths */
     338        2350 : static void calculate_codes(HuffEntry *he)
     339             : {
     340             :     int last, i;
     341             :     uint32_t code;
     342             : 
     343        2350 :     qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
     344             : 
     345        2350 :     last = 255;
     346       88391 :     while (he[last].len == 255 && last)
     347       83691 :         last--;
     348             : 
     349        2350 :     code = 1;
     350      520259 :     for (i = last; i >= 0; i--) {
     351      517909 :         he[i].code  = code >> (32 - he[i].len);
     352      517909 :         code       += 0x80000000u >> (he[i].len - 1);
     353             :     }
     354             : 
     355        2350 :     qsort(he, 256, sizeof(*he), huff_cmp_sym);
     356        2350 : }
     357             : 
     358             : /* Write huffman bit codes to a memory block */
     359        2350 : static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
     360             :                             int width, int height, HuffEntry *he)
     361             : {
     362             :     PutBitContext pb;
     363             :     int i, j;
     364             :     int count;
     365             : 
     366        2350 :     init_put_bits(&pb, dst, dst_size);
     367             : 
     368             :     /* Write the codes */
     369      635950 :     for (j = 0; j < height; j++) {
     370   200851200 :         for (i = 0; i < width; i++)
     371   200217600 :             put_bits(&pb, he[src[i]].len, he[src[i]].code);
     372             : 
     373      633600 :         src += width;
     374             :     }
     375             : 
     376             :     /* Pad output to a 32-bit boundary */
     377        2350 :     count = put_bits_count(&pb) & 0x1F;
     378             : 
     379        2350 :     if (count)
     380        2170 :         put_bits(&pb, 32 - count, 0);
     381             : 
     382             :     /* Get the amount of bits written */
     383        2350 :     count = put_bits_count(&pb);
     384             : 
     385             :     /* Flush the rest with zeroes */
     386        2350 :     flush_put_bits(&pb);
     387             : 
     388        2350 :     return count;
     389             : }
     390             : 
     391        2400 : static int encode_plane(AVCodecContext *avctx, uint8_t *src,
     392             :                         uint8_t *dst, ptrdiff_t stride, int plane_no,
     393             :                         int width, int height, PutByteContext *pb)
     394             : {
     395        2400 :     UtvideoContext *c        = avctx->priv_data;
     396             :     uint8_t  lengths[256];
     397        2400 :     uint64_t counts[256]     = { 0 };
     398             : 
     399             :     HuffEntry he[256];
     400             : 
     401        2400 :     uint32_t offset = 0, slice_len = 0;
     402        2400 :     const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
     403        2400 :     int      i, sstart, send = 0;
     404             :     int      symbol;
     405             :     int      ret;
     406             : 
     407             :     /* Do prediction / make planes */
     408        2400 :     switch (c->frame_pred) {
     409         800 :     case PRED_NONE:
     410        1600 :         for (i = 0; i < c->slices; i++) {
     411         800 :             sstart = send;
     412         800 :             send   = height * (i + 1) / c->slices & cmask;
     413        1600 :             av_image_copy_plane(dst + sstart * width, width,
     414         800 :                                 src + sstart * stride, stride,
     415             :                                 width, send - sstart);
     416             :         }
     417         800 :         break;
     418         800 :     case PRED_LEFT:
     419        1600 :         for (i = 0; i < c->slices; i++) {
     420         800 :             sstart = send;
     421         800 :             send   = height * (i + 1) / c->slices & cmask;
     422         800 :             c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
     423             :         }
     424         800 :         break;
     425         800 :     case PRED_MEDIAN:
     426        1600 :         for (i = 0; i < c->slices; i++) {
     427         800 :             sstart = send;
     428         800 :             send   = height * (i + 1) / c->slices & cmask;
     429         800 :             median_predict(c, src + sstart * stride, dst + sstart * width,
     430             :                            stride, width, send - sstart);
     431             :         }
     432         800 :         break;
     433           0 :     default:
     434           0 :         av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
     435             :                c->frame_pred);
     436           0 :         return AVERROR_OPTION_NOT_FOUND;
     437             :     }
     438             : 
     439             :     /* Count the usage of values */
     440        2400 :     count_usage(dst, width, height, counts);
     441             : 
     442             :     /* Check for a special case where only one symbol was used */
     443       16840 :     for (symbol = 0; symbol < 256; symbol++) {
     444             :         /* If non-zero count is found, see if it matches width * height */
     445       16840 :         if (counts[symbol]) {
     446             :             /* Special case if only one symbol was used */
     447        2400 :             if (counts[symbol] == width * (int64_t)height) {
     448             :                 /*
     449             :                  * Write a zero for the single symbol
     450             :                  * used in the plane, else 0xFF.
     451             :                  */
     452       12850 :                 for (i = 0; i < 256; i++) {
     453       12800 :                     if (i == symbol)
     454          50 :                         bytestream2_put_byte(pb, 0);
     455             :                     else
     456       12750 :                         bytestream2_put_byte(pb, 0xFF);
     457             :                 }
     458             : 
     459             :                 /* Write zeroes for lengths */
     460         100 :                 for (i = 0; i < c->slices; i++)
     461          50 :                     bytestream2_put_le32(pb, 0);
     462             : 
     463             :                 /* And that's all for that plane folks */
     464          50 :                 return 0;
     465             :             }
     466        2350 :             break;
     467             :         }
     468             :     }
     469             : 
     470             :     /* Calculate huffman lengths */
     471        2350 :     if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
     472           0 :         return ret;
     473             : 
     474             :     /*
     475             :      * Write the plane's header into the output packet:
     476             :      * - huffman code lengths (256 bytes)
     477             :      * - slice end offsets (gotten from the slice lengths)
     478             :      */
     479      603950 :     for (i = 0; i < 256; i++) {
     480      601600 :         bytestream2_put_byte(pb, lengths[i]);
     481             : 
     482      601600 :         he[i].len = lengths[i];
     483      601600 :         he[i].sym = i;
     484             :     }
     485             : 
     486             :     /* Calculate the huffman codes themselves */
     487        2350 :     calculate_codes(he);
     488             : 
     489        2350 :     send = 0;
     490        4700 :     for (i = 0; i < c->slices; i++) {
     491        2350 :         sstart  = send;
     492        2350 :         send    = height * (i + 1) / c->slices & cmask;
     493             : 
     494             :         /*
     495             :          * Write the huffman codes to a buffer,
     496             :          * get the offset in bits and convert to bytes.
     497             :          */
     498        7050 :         offset += write_huff_codes(dst + sstart * width, c->slice_bits,
     499        2350 :                                    width * height + 4, width,
     500        2350 :                                    send - sstart, he) >> 3;
     501             : 
     502        2350 :         slice_len = offset - slice_len;
     503             : 
     504             :         /* Byteswap the written huffman codes */
     505        7050 :         c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
     506        2350 :                           (uint32_t *) c->slice_bits,
     507        2350 :                           slice_len >> 2);
     508             : 
     509             :         /* Write the offset to the stream */
     510        2350 :         bytestream2_put_le32(pb, offset);
     511             : 
     512             :         /* Seek to the data part of the packet */
     513        4700 :         bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
     514        2350 :                            offset - slice_len, SEEK_CUR);
     515             : 
     516             :         /* Write the slices' data into the output packet */
     517        2350 :         bytestream2_put_buffer(pb, c->slice_bits, slice_len);
     518             : 
     519             :         /* Seek back to the slice offsets */
     520        2350 :         bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
     521             :                            SEEK_CUR);
     522             : 
     523        2350 :         slice_len = offset;
     524             :     }
     525             : 
     526             :     /* And at the end seek to the end of written slice(s) */
     527        2350 :     bytestream2_seek_p(pb, offset, SEEK_CUR);
     528             : 
     529        2350 :     return 0;
     530             : }
     531             : 
     532         750 : static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
     533             :                                 const AVFrame *pic, int *got_packet)
     534             : {
     535         750 :     UtvideoContext *c = avctx->priv_data;
     536             :     PutByteContext pb;
     537             : 
     538             :     uint32_t frame_info;
     539             : 
     540             :     uint8_t *dst;
     541             : 
     542         750 :     int width = avctx->width, height = avctx->height;
     543         750 :     int i, ret = 0;
     544             : 
     545             :     /* Allocate a new packet if needed, and set it to the pointer dst */
     546        2250 :     ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
     547        1500 :                            c->planes + 4, 0);
     548             : 
     549         750 :     if (ret < 0)
     550           0 :         return ret;
     551             : 
     552         750 :     dst = pkt->data;
     553             : 
     554         750 :     bytestream2_init_writer(&pb, dst, pkt->size);
     555             : 
     556         750 :     av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
     557             : 
     558         750 :     if (!c->slice_bits) {
     559           0 :         av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
     560           0 :         return AVERROR(ENOMEM);
     561             :     }
     562             : 
     563             :     /* In case of RGB, mangle the planes to Ut Video's format */
     564         750 :     if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
     565         300 :         mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data,
     566         300 :                           c->planes, pic->linesize, width, height);
     567             : 
     568             :     /* Deal with the planes */
     569         750 :     switch (avctx->pix_fmt) {
     570         300 :     case AV_PIX_FMT_GBRP:
     571             :     case AV_PIX_FMT_GBRAP:
     572        1350 :         for (i = 0; i < c->planes; i++) {
     573        1050 :             ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
     574             :                                c->slice_buffer[i], c->slice_stride, i,
     575             :                                width, height, &pb);
     576             : 
     577        1050 :             if (ret) {
     578           0 :                 av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
     579           0 :                 return ret;
     580             :             }
     581             :         }
     582         300 :         break;
     583         150 :     case AV_PIX_FMT_YUV444P:
     584         600 :         for (i = 0; i < c->planes; i++) {
     585         450 :             ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
     586         450 :                                pic->linesize[i], i, width, height, &pb);
     587             : 
     588         450 :             if (ret) {
     589           0 :                 av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
     590           0 :                 return ret;
     591             :             }
     592             :         }
     593         150 :         break;
     594         150 :     case AV_PIX_FMT_YUV422P:
     595         600 :         for (i = 0; i < c->planes; i++) {
     596         900 :             ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
     597         900 :                                pic->linesize[i], i, width >> !!i, height, &pb);
     598             : 
     599         450 :             if (ret) {
     600           0 :                 av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
     601           0 :                 return ret;
     602             :             }
     603             :         }
     604         150 :         break;
     605         150 :     case AV_PIX_FMT_YUV420P:
     606         600 :         for (i = 0; i < c->planes; i++) {
     607        1350 :             ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
     608        1350 :                                pic->linesize[i], i, width >> !!i, height >> !!i,
     609             :                                &pb);
     610             : 
     611         450 :             if (ret) {
     612           0 :                 av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
     613           0 :                 return ret;
     614             :             }
     615             :         }
     616         150 :         break;
     617           0 :     default:
     618           0 :         av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
     619           0 :                avctx->pix_fmt);
     620           0 :         return AVERROR_INVALIDDATA;
     621             :     }
     622             : 
     623             :     /*
     624             :      * Write frame information (LE 32-bit unsigned)
     625             :      * into the output packet.
     626             :      * Contains the prediction method.
     627             :      */
     628         750 :     frame_info = c->frame_pred << 8;
     629         750 :     bytestream2_put_le32(&pb, frame_info);
     630             : 
     631             :     /*
     632             :      * At least currently Ut Video is IDR only.
     633             :      * Set flags accordingly.
     634             :      */
     635             : #if FF_API_CODED_FRAME
     636             : FF_DISABLE_DEPRECATION_WARNINGS
     637         750 :     avctx->coded_frame->key_frame = 1;
     638         750 :     avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
     639             : FF_ENABLE_DEPRECATION_WARNINGS
     640             : #endif
     641             : 
     642         750 :     pkt->size   = bytestream2_tell_p(&pb);
     643         750 :     pkt->flags |= AV_PKT_FLAG_KEY;
     644             : 
     645             :     /* Packet should be done */
     646         750 :     *got_packet = 1;
     647             : 
     648         750 :     return 0;
     649             : }
     650             : 
     651             : #define OFFSET(x) offsetof(UtvideoContext, x)
     652             : #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
     653             : static const AVOption options[] = {
     654             : { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
     655             :     { "none",     NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
     656             :     { "left",     NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
     657             :     { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
     658             :     { "median",   NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
     659             : 
     660             :     { NULL},
     661             : };
     662             : 
     663             : static const AVClass utvideo_class = {
     664             :     .class_name = "utvideo",
     665             :     .item_name  = av_default_item_name,
     666             :     .option     = options,
     667             :     .version    = LIBAVUTIL_VERSION_INT,
     668             : };
     669             : 
     670             : AVCodec ff_utvideo_encoder = {
     671             :     .name           = "utvideo",
     672             :     .long_name      = NULL_IF_CONFIG_SMALL("Ut Video"),
     673             :     .type           = AVMEDIA_TYPE_VIDEO,
     674             :     .id             = AV_CODEC_ID_UTVIDEO,
     675             :     .priv_data_size = sizeof(UtvideoContext),
     676             :     .priv_class     = &utvideo_class,
     677             :     .init           = utvideo_encode_init,
     678             :     .encode2        = utvideo_encode_frame,
     679             :     .close          = utvideo_encode_close,
     680             :     .capabilities   = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
     681             :     .pix_fmts       = (const enum AVPixelFormat[]) {
     682             :                           AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_YUV422P,
     683             :                           AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE
     684             :                       },
     685             : };

Generated by: LCOV version 1.13