GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
/* |
||
2 |
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> |
||
3 |
* Copyright (c) 2011 Stefano Sabatini |
||
4 |
* |
||
5 |
* This file is part of FFmpeg. |
||
6 |
* |
||
7 |
* FFmpeg is free software; you can redistribute it and/or modify |
||
8 |
* it under the terms of the GNU General Public License as published by |
||
9 |
* the Free Software Foundation; either version 2 of the License, or |
||
10 |
* (at your option) any later version. |
||
11 |
* |
||
12 |
* FFmpeg is distributed in the hope that it will be useful, |
||
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
15 |
* GNU General Public License for more details. |
||
16 |
* |
||
17 |
* You should have received a copy of the GNU General Public License along |
||
18 |
* with FFmpeg; if not, write to the Free Software Foundation, Inc., |
||
19 |
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
||
20 |
*/ |
||
21 |
|||
22 |
/** |
||
23 |
* @file |
||
24 |
* Apply a boxblur filter to the input video. |
||
25 |
* Ported from MPlayer libmpcodecs/vf_boxblur.c. |
||
26 |
*/ |
||
27 |
|||
28 |
#include "libavutil/avstring.h" |
||
29 |
#include "libavutil/common.h" |
||
30 |
#include "libavutil/opt.h" |
||
31 |
#include "avfilter.h" |
||
32 |
#include "formats.h" |
||
33 |
#include "internal.h" |
||
34 |
#include "video.h" |
||
35 |
#include "boxblur.h" |
||
36 |
|||
37 |
|||
38 |
typedef struct BoxBlurContext { |
||
39 |
const AVClass *class; |
||
40 |
FilterParam luma_param; |
||
41 |
FilterParam chroma_param; |
||
42 |
FilterParam alpha_param; |
||
43 |
|||
44 |
int hsub, vsub; |
||
45 |
int radius[4]; |
||
46 |
int power[4]; |
||
47 |
uint8_t *temp[2]; ///< temporary buffer used in blur_power() |
||
48 |
} BoxBlurContext; |
||
49 |
|||
50 |
1 |
static av_cold void uninit(AVFilterContext *ctx) |
|
51 |
{ |
||
52 |
1 |
BoxBlurContext *s = ctx->priv; |
|
53 |
|||
54 |
1 |
av_freep(&s->temp[0]); |
|
55 |
1 |
av_freep(&s->temp[1]); |
|
56 |
1 |
} |
|
57 |
|||
58 |
1 |
static int query_formats(AVFilterContext *ctx) |
|
59 |
{ |
||
60 |
1 |
AVFilterFormats *formats = NULL; |
|
61 |
int fmt, ret; |
||
62 |
|||
63 |
✓✓ | 199 |
for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) { |
64 |
198 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); |
|
65 |
✓✓ | 198 |
if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_PAL)) && |
66 |
✓✓✓✓ |
177 |
(desc->flags & AV_PIX_FMT_FLAG_PLANAR || desc->nb_components == 1) && |
67 |
✓✓✗✓ ✗✓ |
185 |
(!(desc->flags & AV_PIX_FMT_FLAG_BE) == !HAVE_BIGENDIAN || desc->comp[0].depth == 8) && |
68 |
69 |
(ret = ff_add_format(&formats, fmt)) < 0) |
|
69 |
return ret; |
||
70 |
} |
||
71 |
|||
72 |
1 |
return ff_set_common_formats(ctx, formats); |
|
73 |
} |
||
74 |
|||
75 |
1 |
static int config_input(AVFilterLink *inlink) |
|
76 |
{ |
||
77 |
1 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
|
78 |
1 |
AVFilterContext *ctx = inlink->dst; |
|
79 |
1 |
BoxBlurContext *s = ctx->priv; |
|
80 |
1 |
int w = inlink->w, h = inlink->h; |
|
81 |
int ret; |
||
82 |
|||
83 |
✓✗ | 1 |
if (!(s->temp[0] = av_malloc(2*FFMAX(w, h))) || |
84 |
✗✓ | 1 |
!(s->temp[1] = av_malloc(2*FFMAX(w, h)))) |
85 |
return AVERROR(ENOMEM); |
||
86 |
|||
87 |
1 |
s->hsub = desc->log2_chroma_w; |
|
88 |
1 |
s->vsub = desc->log2_chroma_h; |
|
89 |
|||
90 |
1 |
ret = ff_boxblur_eval_filter_params(inlink, |
|
91 |
&s->luma_param, |
||
92 |
&s->chroma_param, |
||
93 |
&s->alpha_param); |
||
94 |
|||
95 |
✗✓ | 1 |
if (ret != 0) { |
96 |
av_log(ctx, AV_LOG_ERROR, "Failed to evaluate " |
||
97 |
"filter params: %d.\n", ret); |
||
98 |
return ret; |
||
99 |
} |
||
100 |
|||
101 |
1 |
s->radius[Y] = s->luma_param.radius; |
|
102 |
1 |
s->radius[U] = s->radius[V] = s->chroma_param.radius; |
|
103 |
1 |
s->radius[A] = s->alpha_param.radius; |
|
104 |
|||
105 |
1 |
s->power[Y] = s->luma_param.power; |
|
106 |
1 |
s->power[U] = s->power[V] = s->chroma_param.power; |
|
107 |
1 |
s->power[A] = s->alpha_param.power; |
|
108 |
|||
109 |
1 |
return 0; |
|
110 |
} |
||
111 |
|||
112 |
/* Naive boxblur would sum source pixels from x-radius .. x+radius |
||
113 |
* for destination pixel x. That would be O(radius*width). |
||
114 |
* If you now look at what source pixels represent 2 consecutive |
||
115 |
* output pixels, then you see they are almost identical and only |
||
116 |
* differ by 2 pixels, like: |
||
117 |
* src0 111111111 |
||
118 |
* dst0 1 |
||
119 |
* src1 111111111 |
||
120 |
* dst1 1 |
||
121 |
* src0-src1 1 -1 |
||
122 |
* so when you know one output pixel you can find the next by just adding |
||
123 |
* and subtracting 1 input pixel. |
||
124 |
* The following code adopts this faster variant. |
||
125 |
*/ |
||
126 |
#define BLUR(type, depth) \ |
||
127 |
static inline void blur ## depth(type *dst, int dst_step, const type *src, \ |
||
128 |
int src_step, int len, int radius) \ |
||
129 |
{ \ |
||
130 |
const int length = radius*2 + 1; \ |
||
131 |
const int inv = ((1<<16) + length/2)/length; \ |
||
132 |
int x, sum = src[radius*src_step]; \ |
||
133 |
\ |
||
134 |
for (x = 0; x < radius; x++) \ |
||
135 |
sum += src[x*src_step]<<1; \ |
||
136 |
\ |
||
137 |
sum = sum*inv + (1<<15); \ |
||
138 |
\ |
||
139 |
for (x = 0; x <= radius; x++) { \ |
||
140 |
sum += (src[(radius+x)*src_step] - src[(radius-x)*src_step])*inv; \ |
||
141 |
dst[x*dst_step] = sum>>16; \ |
||
142 |
} \ |
||
143 |
\ |
||
144 |
for (; x < len-radius; x++) { \ |
||
145 |
sum += (src[(radius+x)*src_step] - src[(x-radius-1)*src_step])*inv; \ |
||
146 |
dst[x*dst_step] = sum >>16; \ |
||
147 |
} \ |
||
148 |
\ |
||
149 |
for (; x < len; x++) { \ |
||
150 |
sum += (src[(2*len-radius-x-1)*src_step] - src[(x-radius-1)*src_step])*inv; \ |
||
151 |
dst[x*dst_step] = sum>>16; \ |
||
152 |
} \ |
||
153 |
} |
||
154 |
|||
155 |
✓✓✓✓ ✓✓✓✓ |
15398400 |
BLUR(uint8_t, 8) |
156 |
BLUR(uint16_t, 16) |
||
157 |
|||
158 |
#undef BLUR |
||
159 |
|||
160 |
64000 |
static inline void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, |
|
161 |
int len, int radius, int pixsize) |
||
162 |
{ |
||
163 |
✓✗ | 64000 |
if (pixsize == 1) blur8 (dst, dst_step , src, src_step , len, radius); |
164 |
else blur16((uint16_t*)dst, dst_step>>1, (const uint16_t*)src, src_step>>1, len, radius); |
||
165 |
64000 |
} |
|
166 |
|||
167 |
64000 |
static inline void blur_power(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, |
|
168 |
int len, int radius, int power, uint8_t *temp[2], int pixsize) |
||
169 |
{ |
||
170 |
64000 |
uint8_t *a = temp[0], *b = temp[1]; |
|
171 |
|||
172 |
✓✗✓✗ |
64000 |
if (radius && power) { |
173 |
64000 |
blur(a, pixsize, src, src_step, len, radius, pixsize); |
|
174 |
✗✓ | 64000 |
for (; power > 2; power--) { |
175 |
uint8_t *c; |
||
176 |
blur(b, pixsize, a, pixsize, len, radius, pixsize); |
||
177 |
c = a; a = b; b = c; |
||
178 |
} |
||
179 |
✗✓ | 64000 |
if (power > 1) { |
180 |
blur(dst, dst_step, a, pixsize, len, radius, pixsize); |
||
181 |
} else { |
||
182 |
int i; |
||
183 |
✓✗ | 64000 |
if (pixsize == 1) { |
184 |
✓✓ | 15270400 |
for (i = 0; i < len; i++) |
185 |
15206400 |
dst[i*dst_step] = a[i]; |
|
186 |
} else |
||
187 |
for (i = 0; i < len; i++) |
||
188 |
*(uint16_t*)(dst + i*dst_step) = ((uint16_t*)a)[i]; |
||
189 |
} |
||
190 |
} else { |
||
191 |
int i; |
||
192 |
if (pixsize == 1) { |
||
193 |
for (i = 0; i < len; i++) |
||
194 |
dst[i*dst_step] = src[i*src_step]; |
||
195 |
} else |
||
196 |
for (i = 0; i < len; i++) |
||
197 |
*(uint16_t*)(dst + i*dst_step) = *(uint16_t*)(src + i*src_step); |
||
198 |
} |
||
199 |
64000 |
} |
|
200 |
|||
201 |
150 |
static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, |
|
202 |
int w, int h, int radius, int power, uint8_t *temp[2], int pixsize) |
||
203 |
{ |
||
204 |
int y; |
||
205 |
|||
206 |
✗✓✗✗ |
150 |
if (radius == 0 && dst == src) |
207 |
return; |
||
208 |
|||
209 |
✓✓ | 28950 |
for (y = 0; y < h; y++) |
210 |
28800 |
blur_power(dst + y*dst_linesize, pixsize, src + y*src_linesize, pixsize, |
|
211 |
w, radius, power, temp, pixsize); |
||
212 |
} |
||
213 |
|||
214 |
150 |
static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, |
|
215 |
int w, int h, int radius, int power, uint8_t *temp[2], int pixsize) |
||
216 |
{ |
||
217 |
int x; |
||
218 |
|||
219 |
✗✓✗✗ |
150 |
if (radius == 0 && dst == src) |
220 |
return; |
||
221 |
|||
222 |
✓✓ | 35350 |
for (x = 0; x < w; x++) |
223 |
35200 |
blur_power(dst + x*pixsize, dst_linesize, src + x*pixsize, src_linesize, |
|
224 |
h, radius, power, temp, pixsize); |
||
225 |
} |
||
226 |
|||
227 |
50 |
static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
228 |
{ |
||
229 |
50 |
AVFilterContext *ctx = inlink->dst; |
|
230 |
50 |
BoxBlurContext *s = ctx->priv; |
|
231 |
50 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
|
232 |
AVFrame *out; |
||
233 |
int plane; |
||
234 |
50 |
int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub), ch = AV_CEIL_RSHIFT(in->height, s->vsub); |
|
235 |
50 |
int w[4] = { inlink->w, cw, cw, inlink->w }; |
|
236 |
50 |
int h[4] = { in->height, ch, ch, in->height }; |
|
237 |
50 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
|
238 |
50 |
const int depth = desc->comp[0].depth; |
|
239 |
50 |
const int pixsize = (depth+7)/8; |
|
240 |
|||
241 |
50 |
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
242 |
✗✓ | 50 |
if (!out) { |
243 |
av_frame_free(&in); |
||
244 |
return AVERROR(ENOMEM); |
||
245 |
} |
||
246 |
50 |
av_frame_copy_props(out, in); |
|
247 |
|||
248 |
✓✗✓✓ ✓✗ |
200 |
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) |
249 |
150 |
hblur(out->data[plane], out->linesize[plane], |
|
250 |
150 |
in ->data[plane], in ->linesize[plane], |
|
251 |
w[plane], h[plane], s->radius[plane], s->power[plane], |
||
252 |
150 |
s->temp, pixsize); |
|
253 |
|||
254 |
✓✗✓✓ ✓✗ |
200 |
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) |
255 |
150 |
vblur(out->data[plane], out->linesize[plane], |
|
256 |
150 |
out->data[plane], out->linesize[plane], |
|
257 |
w[plane], h[plane], s->radius[plane], s->power[plane], |
||
258 |
150 |
s->temp, pixsize); |
|
259 |
|||
260 |
50 |
av_frame_free(&in); |
|
261 |
|||
262 |
50 |
return ff_filter_frame(outlink, out); |
|
263 |
} |
||
264 |
|||
265 |
#define OFFSET(x) offsetof(BoxBlurContext, x) |
||
266 |
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
||
267 |
|||
268 |
static const AVOption boxblur_options[] = { |
||
269 |
{ "luma_radius", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS }, |
||
270 |
{ "lr", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS }, |
||
271 |
{ "luma_power", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS }, |
||
272 |
{ "lp", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS }, |
||
273 |
|||
274 |
{ "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
||
275 |
{ "cr", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
||
276 |
{ "chroma_power", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS }, |
||
277 |
{ "cp", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS }, |
||
278 |
|||
279 |
{ "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
||
280 |
{ "ar", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
||
281 |
{ "alpha_power", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS }, |
||
282 |
{ "ap", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS }, |
||
283 |
|||
284 |
{ NULL } |
||
285 |
}; |
||
286 |
|||
287 |
AVFILTER_DEFINE_CLASS(boxblur); |
||
288 |
|||
289 |
static const AVFilterPad avfilter_vf_boxblur_inputs[] = { |
||
290 |
{ |
||
291 |
.name = "default", |
||
292 |
.type = AVMEDIA_TYPE_VIDEO, |
||
293 |
.config_props = config_input, |
||
294 |
.filter_frame = filter_frame, |
||
295 |
}, |
||
296 |
{ NULL } |
||
297 |
}; |
||
298 |
|||
299 |
static const AVFilterPad avfilter_vf_boxblur_outputs[] = { |
||
300 |
{ |
||
301 |
.name = "default", |
||
302 |
.type = AVMEDIA_TYPE_VIDEO, |
||
303 |
}, |
||
304 |
{ NULL } |
||
305 |
}; |
||
306 |
|||
307 |
AVFilter ff_vf_boxblur = { |
||
308 |
.name = "boxblur", |
||
309 |
.description = NULL_IF_CONFIG_SMALL("Blur the input."), |
||
310 |
.priv_size = sizeof(BoxBlurContext), |
||
311 |
.priv_class = &boxblur_class, |
||
312 |
.uninit = uninit, |
||
313 |
.query_formats = query_formats, |
||
314 |
.inputs = avfilter_vf_boxblur_inputs, |
||
315 |
.outputs = avfilter_vf_boxblur_outputs, |
||
316 |
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, |
||
317 |
}; |
Generated by: GCOVR (Version 4.2) |