GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavcodec/vp9prob.c Lines: 169 169 100.0 %
Date: 2020-08-14 10:39:37 Branches: 68 68 100.0 %

Line Branch Exec Source
1
/*
2
 * VP9 compatible video decoder
3
 *
4
 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5
 * Copyright (C) 2013 Clément Bœsch <u pkh me>
6
 *
7
 * This file is part of FFmpeg.
8
 *
9
 * FFmpeg is free software; you can redistribute it and/or
10
 * modify it under the terms of the GNU Lesser General Public
11
 * License as published by the Free Software Foundation; either
12
 * version 2.1 of the License, or (at your option) any later version.
13
 *
14
 * FFmpeg is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
 * Lesser General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU Lesser General Public
20
 * License along with FFmpeg; if not, write to the Free Software
21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22
 */
23
24
#include "vp56.h"
25
#include "vp9.h"
26
#include "vp9data.h"
27
#include "vp9dec.h"
28
29
4061053
static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
30
                                        int max_count, int update_factor)
31
{
32
4061053
    unsigned ct = ct0 + ct1, p2, p1;
33
34
4061053
    if (!ct)
35
3345210
        return;
36
37
715843
    update_factor = FASTDIV(update_factor * FFMIN(ct, max_count), max_count);
38
715843
    p1 = *p;
39
715843
    p2 = ((((int64_t) ct0) << 8) + (ct >> 1)) / ct;
40
715843
    p2 = av_clip(p2, 1, 255);
41
42
    // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
43
715843
    *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
44
}
45
46
2233
void ff_vp9_adapt_probs(VP9Context *s)
47
{
48
    int i, j, k, l, m;
49
2233
    ProbContext *p = &s->prob_ctx[s->s.h.framectxid].p;
50

2233
    int uf = (s->s.h.keyframe || s->s.h.intraonly || !s->last_keyframe) ? 112 : 128;
51
52
    // coefficients
53
11165
    for (i = 0; i < 4; i++)
54
26796
        for (j = 0; j < 2; j++)
55
53592
            for (k = 0; k < 2; k++)
56
250096
                for (l = 0; l < 6; l++)
57
1393392
                    for (m = 0; m < 6; m++) {
58
1214752
                        uint8_t *pp = s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m];
59
1214752
                        unsigned *e = s->td[0].counts.eob[i][j][k][l][m];
60
1214752
                        unsigned *c = s->td[0].counts.coef[i][j][k][l][m];
61
62

1214752
                        if (l == 0 && m >= 3) // dc only has 3 pt
63
35728
                            break;
64
65
1179024
                        adapt_prob(&pp[0], e[0], e[1], 24, uf);
66
1179024
                        adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
67
1179024
                        adapt_prob(&pp[2], c[1], c[2], 24, uf);
68
                    }
69
70

2233
    if (s->s.h.keyframe || s->s.h.intraonly) {
71
466
        memcpy(p->skip,  s->prob.p.skip,  sizeof(p->skip));
72
466
        memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
73
466
        memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
74
466
        memcpy(p->tx8p,  s->prob.p.tx8p,  sizeof(p->tx8p));
75
466
        return;
76
    }
77
78
    // skip flag
79
7068
    for (i = 0; i < 3; i++)
80
5301
        adapt_prob(&p->skip[i], s->td[0].counts.skip[i][0],
81
5301
                   s->td[0].counts.skip[i][1], 20, 128);
82
83
    // intra/inter flag
84
8835
    for (i = 0; i < 4; i++)
85
7068
        adapt_prob(&p->intra[i], s->td[0].counts.intra[i][0],
86
7068
                   s->td[0].counts.intra[i][1], 20, 128);
87
88
    // comppred flag
89
1767
    if (s->s.h.comppredmode == PRED_SWITCHABLE) {
90
1800
        for (i = 0; i < 5; i++)
91
1500
            adapt_prob(&p->comp[i], s->td[0].counts.comp[i][0],
92
1500
                       s->td[0].counts.comp[i][1], 20, 128);
93
    }
94
95
    // reference frames
96
1767
    if (s->s.h.comppredmode != PRED_SINGLEREF) {
97
1812
        for (i = 0; i < 5; i++)
98
1510
            adapt_prob(&p->comp_ref[i], s->td[0].counts.comp_ref[i][0],
99
1510
                       s->td[0].counts.comp_ref[i][1], 20, 128);
100
    }
101
102
1767
    if (s->s.h.comppredmode != PRED_COMPREF) {
103
10590
        for (i = 0; i < 5; i++) {
104
8825
            uint8_t *pp = p->single_ref[i];
105
8825
            unsigned (*c)[2] = s->td[0].counts.single_ref[i];
106
107
8825
            adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
108
8825
            adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
109
        }
110
    }
111
112
    // block partitioning
113
8835
    for (i = 0; i < 4; i++)
114
35340
        for (j = 0; j < 4; j++) {
115
28272
            uint8_t *pp = p->partition[i][j];
116
28272
            unsigned *c = s->td[0].counts.partition[i][j];
117
118
28272
            adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
119
28272
            adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
120
28272
            adapt_prob(&pp[2], c[2], c[3], 20, 128);
121
        }
122
123
    // tx size
124
1767
    if (s->s.h.txfmmode == TX_SWITCHABLE) {
125
3891
        for (i = 0; i < 2; i++) {
126
2594
            unsigned *c16 = s->td[0].counts.tx16p[i], *c32 = s->td[0].counts.tx32p[i];
127
128
2594
            adapt_prob(&p->tx8p[i], s->td[0].counts.tx8p[i][0],
129
2594
                       s->td[0].counts.tx8p[i][1], 20, 128);
130
2594
            adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
131
2594
            adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
132
2594
            adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
133
2594
            adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
134
2594
            adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
135
        }
136
    }
137
138
    // interpolation filter
139
1767
    if (s->s.h.filtermode == FILTER_SWITCHABLE) {
140
6495
        for (i = 0; i < 4; i++) {
141
5196
            uint8_t *pp = p->filter[i];
142
5196
            unsigned *c = s->td[0].counts.filter[i];
143
144
5196
            adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
145
5196
            adapt_prob(&pp[1], c[1], c[2], 20, 128);
146
        }
147
    }
148
149
    // inter modes
150
14136
    for (i = 0; i < 7; i++) {
151
12369
        uint8_t *pp = p->mv_mode[i];
152
12369
        unsigned *c = s->td[0].counts.mv_mode[i];
153
154
12369
        adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
155
12369
        adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
156
12369
        adapt_prob(&pp[2], c[1], c[3], 20, 128);
157
    }
158
159
    // mv joints
160
    {
161
1767
        uint8_t *pp = p->mv_joint;
162
1767
        unsigned *c = s->td[0].counts.mv_joint;
163
164
1767
        adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
165
1767
        adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
166
1767
        adapt_prob(&pp[2], c[2], c[3], 20, 128);
167
    }
168
169
    // mv components
170
5301
    for (i = 0; i < 2; i++) {
171
        uint8_t *pp;
172
        unsigned *c, (*c2)[2], sum;
173
174
3534
        adapt_prob(&p->mv_comp[i].sign, s->td[0].counts.mv_comp[i].sign[0],
175
3534
                   s->td[0].counts.mv_comp[i].sign[1], 20, 128);
176
177
3534
        pp  = p->mv_comp[i].classes;
178
3534
        c   = s->td[0].counts.mv_comp[i].classes;
179
3534
        sum = c[1] + c[2] + c[3] + c[4] + c[5] +
180
3534
              c[6] + c[7] + c[8] + c[9] + c[10];
181
3534
        adapt_prob(&pp[0], c[0], sum, 20, 128);
182
3534
        sum -= c[1];
183
3534
        adapt_prob(&pp[1], c[1], sum, 20, 128);
184
3534
        sum -= c[2] + c[3];
185
3534
        adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
186
3534
        adapt_prob(&pp[3], c[2], c[3], 20, 128);
187
3534
        sum -= c[4] + c[5];
188
3534
        adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
189
3534
        adapt_prob(&pp[5], c[4], c[5], 20, 128);
190
3534
        sum -= c[6];
191
3534
        adapt_prob(&pp[6], c[6], sum, 20, 128);
192
3534
        adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
193
3534
        adapt_prob(&pp[8], c[7], c[8], 20, 128);
194
3534
        adapt_prob(&pp[9], c[9], c[10], 20, 128);
195
196
3534
        adapt_prob(&p->mv_comp[i].class0, s->td[0].counts.mv_comp[i].class0[0],
197
3534
                   s->td[0].counts.mv_comp[i].class0[1], 20, 128);
198
3534
        pp = p->mv_comp[i].bits;
199
3534
        c2 = s->td[0].counts.mv_comp[i].bits;
200
38874
        for (j = 0; j < 10; j++)
201
35340
            adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
202
203
10602
        for (j = 0; j < 2; j++) {
204
7068
            pp = p->mv_comp[i].class0_fp[j];
205
7068
            c  = s->td[0].counts.mv_comp[i].class0_fp[j];
206
7068
            adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
207
7068
            adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
208
7068
            adapt_prob(&pp[2], c[2], c[3], 20, 128);
209
        }
210
3534
        pp = p->mv_comp[i].fp;
211
3534
        c  = s->td[0].counts.mv_comp[i].fp;
212
3534
        adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
213
3534
        adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
214
3534
        adapt_prob(&pp[2], c[2], c[3], 20, 128);
215
216
3534
        if (s->s.h.highprecisionmvs) {
217
2788
            adapt_prob(&p->mv_comp[i].class0_hp,
218
2788
                       s->td[0].counts.mv_comp[i].class0_hp[0],
219
2788
                       s->td[0].counts.mv_comp[i].class0_hp[1], 20, 128);
220
2788
            adapt_prob(&p->mv_comp[i].hp, s->td[0].counts.mv_comp[i].hp[0],
221
2788
                       s->td[0].counts.mv_comp[i].hp[1], 20, 128);
222
        }
223
    }
224
225
    // y intra modes
226
8835
    for (i = 0; i < 4; i++) {
227
7068
        uint8_t *pp = p->y_mode[i];
228
7068
        unsigned *c = s->td[0].counts.y_mode[i], sum, s2;
229
230
7068
        sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
231
7068
        adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
232
7068
        sum -= c[TM_VP8_PRED];
233
7068
        adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
234
7068
        sum -= c[VERT_PRED];
235
7068
        adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
236
7068
        s2   = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
237
7068
        sum -= s2;
238
7068
        adapt_prob(&pp[3], s2, sum, 20, 128);
239
7068
        s2 -= c[HOR_PRED];
240
7068
        adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
241
7068
        adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
242
                   20, 128);
243
7068
        sum -= c[DIAG_DOWN_LEFT_PRED];
244
7068
        adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
245
7068
        sum -= c[VERT_LEFT_PRED];
246
7068
        adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
247
7068
        adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
248
    }
249
250
    // uv intra modes
251
19437
    for (i = 0; i < 10; i++) {
252
17670
        uint8_t *pp = p->uv_mode[i];
253
17670
        unsigned *c = s->td[0].counts.uv_mode[i], sum, s2;
254
255
17670
        sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
256
17670
        adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
257
17670
        sum -= c[TM_VP8_PRED];
258
17670
        adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
259
17670
        sum -= c[VERT_PRED];
260
17670
        adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
261
17670
        s2   = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
262
17670
        sum -= s2;
263
17670
        adapt_prob(&pp[3], s2, sum, 20, 128);
264
17670
        s2 -= c[HOR_PRED];
265
17670
        adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
266
17670
        adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
267
                   20, 128);
268
17670
        sum -= c[DIAG_DOWN_LEFT_PRED];
269
17670
        adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
270
17670
        sum -= c[VERT_LEFT_PRED];
271
17670
        adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
272
17670
        adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
273
    }
274
}