Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2021 Paul Buxton |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
/** |
22 |
|
|
* @file |
23 |
|
|
* Color correction filter based on |
24 |
|
|
* https://www.researchgate.net/publication/275213614_A_New_Color_Correction_Method_for_Underwater_Imaging |
25 |
|
|
* |
26 |
|
|
*/ |
27 |
|
|
|
28 |
|
|
#include "libavutil/imgutils.h" |
29 |
|
|
#include "libavutil/mem.h" |
30 |
|
|
|
31 |
|
|
#include "avfilter.h" |
32 |
|
|
#include "filters.h" |
33 |
|
|
#include "video.h" |
34 |
|
|
|
35 |
|
|
typedef struct ThreadData { |
36 |
|
|
AVFrame *in, *out; |
37 |
|
|
float l_avg; |
38 |
|
|
float a_avg; |
39 |
|
|
float b_avg; |
40 |
|
|
} ThreadData; |
41 |
|
|
|
42 |
|
|
typedef struct GrayWorldContext { |
43 |
|
|
float *tmpplab; |
44 |
|
|
int *line_count_pels; |
45 |
|
|
float *line_sum; |
46 |
|
|
} GrayWorldContext; |
47 |
|
|
|
48 |
|
✗ |
static void apply_matrix(const float matrix[3][3], const float input[3], float output[3]) |
49 |
|
|
{ |
50 |
|
✗ |
output[0] = matrix[0][0] * input[0] + matrix[0][1] * input[1] + matrix[0][2] * input[2]; |
51 |
|
✗ |
output[1] = matrix[1][0] * input[0] + matrix[1][1] * input[1] + matrix[1][2] * input[2]; |
52 |
|
✗ |
output[2] = matrix[2][0] * input[0] + matrix[2][1] * input[1] + matrix[2][2] * input[2]; |
53 |
|
✗ |
} |
54 |
|
|
|
55 |
|
|
static const float lms2lab[3][3] = { |
56 |
|
|
{0.5774, 0.5774, 0.5774}, |
57 |
|
|
{0.40825, 0.40825, -0.816458}, |
58 |
|
|
{0.707, -0.707, 0} |
59 |
|
|
}; |
60 |
|
|
|
61 |
|
|
static const float lab2lms[3][3] = { |
62 |
|
|
{0.57735, 0.40825, 0.707}, |
63 |
|
|
{0.57735, 0.40825, -0.707}, |
64 |
|
|
{0.57735, -0.8165, 0} |
65 |
|
|
}; |
66 |
|
|
|
67 |
|
|
static const float rgb2lms[3][3] = { |
68 |
|
|
{0.3811, 0.5783, 0.0402}, |
69 |
|
|
{0.1967, 0.7244, 0.0782}, |
70 |
|
|
{0.0241, 0.1288, 0.8444} |
71 |
|
|
}; |
72 |
|
|
|
73 |
|
|
static const float lms2rgb[3][3] = { |
74 |
|
|
{4.4679, -3.5873, 0.1193}, |
75 |
|
|
{-1.2186, 2.3809, -0.1624}, |
76 |
|
|
{0.0497, -0.2439, 1.2045} |
77 |
|
|
}; |
78 |
|
|
|
79 |
|
|
/** |
80 |
|
|
* Convert from Linear RGB to logspace LAB |
81 |
|
|
* |
82 |
|
|
* @param rgb Input array of rgb components |
83 |
|
|
* @param lab output array of lab components |
84 |
|
|
*/ |
85 |
|
✗ |
static void rgb2lab(const float rgb[3], float lab[3]) |
86 |
|
|
{ |
87 |
|
|
float lms[3]; |
88 |
|
|
|
89 |
|
✗ |
apply_matrix(rgb2lms, rgb, lms); |
90 |
|
✗ |
lms[0] = lms[0] > 0.f ? logf(lms[0]) : -1024.f; |
91 |
|
✗ |
lms[1] = lms[1] > 0.f ? logf(lms[1]) : -1024.f; |
92 |
|
✗ |
lms[2] = lms[2] > 0.f ? logf(lms[2]) : -1024.f; |
93 |
|
✗ |
apply_matrix(lms2lab, lms, lab); |
94 |
|
✗ |
} |
95 |
|
|
|
96 |
|
|
/** |
97 |
|
|
* Convert from Logspace LAB to Linear RGB |
98 |
|
|
* |
99 |
|
|
* @param lab input array of lab components |
100 |
|
|
* @param rgb output array of rgb components |
101 |
|
|
*/ |
102 |
|
✗ |
static void lab2rgb(const float lab[3], float rgb[3]) |
103 |
|
|
{ |
104 |
|
|
float lms[3]; |
105 |
|
|
|
106 |
|
✗ |
apply_matrix(lab2lms, lab, lms); |
107 |
|
✗ |
lms[0] = expf(lms[0]); |
108 |
|
✗ |
lms[1] = expf(lms[1]); |
109 |
|
✗ |
lms[2] = expf(lms[2]); |
110 |
|
✗ |
apply_matrix(lms2rgb, lms, rgb); |
111 |
|
✗ |
} |
112 |
|
|
|
113 |
|
|
/** |
114 |
|
|
* Convert a frame from linear RGB to logspace LAB, and accumulate channel totals for each row |
115 |
|
|
* Convert from RGB -> lms using equation 4 in color transfer paper. |
116 |
|
|
* |
117 |
|
|
* @param ctx Filter context |
118 |
|
|
* @param arg Thread data pointer |
119 |
|
|
* @param jobnr job number |
120 |
|
|
* @param nb_jobs number of jobs |
121 |
|
|
*/ |
122 |
|
✗ |
static int convert_frame(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) |
123 |
|
|
{ |
124 |
|
✗ |
GrayWorldContext *s = ctx->priv; |
125 |
|
✗ |
ThreadData *td = arg; |
126 |
|
✗ |
AVFrame *in = td->in; |
127 |
|
✗ |
AVFrame *out = td->out; |
128 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
129 |
|
✗ |
const int slice_start = (out->height * jobnr) / nb_jobs; |
130 |
|
✗ |
const int slice_end = (out->height * (jobnr + 1)) / nb_jobs; |
131 |
|
|
float rgb[3], lab[3]; |
132 |
|
|
|
133 |
|
✗ |
for (int i = slice_start; i < slice_end; i++) { |
134 |
|
✗ |
float *b_in_row = (float *)(in->data[1] + i * in->linesize[1]); |
135 |
|
✗ |
float *g_in_row = (float *)(in->data[0] + i * in->linesize[0]); |
136 |
|
✗ |
float *r_in_row = (float *)(in->data[2] + i * in->linesize[2]); |
137 |
|
✗ |
float *acur = s->tmpplab + i * outlink->w + outlink->w * outlink->h; |
138 |
|
✗ |
float *bcur = s->tmpplab + i * outlink->w + 2 * outlink->w * outlink->h; |
139 |
|
✗ |
float *lcur = s->tmpplab + i * outlink->w; |
140 |
|
|
|
141 |
|
✗ |
s->line_sum[i] = 0.f; |
142 |
|
✗ |
s->line_sum[i + outlink->h] = 0.f; |
143 |
|
✗ |
s->line_count_pels[i] = 0; |
144 |
|
|
|
145 |
|
✗ |
for (int j = 0; j < outlink->w; j++) { |
146 |
|
✗ |
rgb[0] = r_in_row[j]; |
147 |
|
✗ |
rgb[1] = g_in_row[j]; |
148 |
|
✗ |
rgb[2] = b_in_row[j]; |
149 |
|
✗ |
rgb2lab(rgb, lab); |
150 |
|
✗ |
*(lcur++) = lab[0]; |
151 |
|
✗ |
*(acur++) = lab[1]; |
152 |
|
✗ |
*(bcur++) = lab[2]; |
153 |
|
✗ |
s->line_sum[i] += lab[1]; |
154 |
|
✗ |
s->line_sum[i + outlink->h] += lab[2]; |
155 |
|
✗ |
s->line_count_pels[i]++; |
156 |
|
|
} |
157 |
|
|
} |
158 |
|
✗ |
return 0; |
159 |
|
|
} |
160 |
|
|
|
161 |
|
|
/** |
162 |
|
|
* Sum the channel totals and compute the mean for each channel |
163 |
|
|
* |
164 |
|
|
* @param s Frame context |
165 |
|
|
* @param td thread data |
166 |
|
|
*/ |
167 |
|
✗ |
static void compute_correction(GrayWorldContext *s, ThreadData *td) |
168 |
|
|
{ |
169 |
|
✗ |
float asum = 0.f, bsum = 0.f; |
170 |
|
✗ |
int pixels = 0; |
171 |
|
|
|
172 |
|
✗ |
for (int y = 0; y < td->out->height; y++) { |
173 |
|
✗ |
asum += s->line_sum[y]; |
174 |
|
✗ |
bsum += s->line_sum[y + td->out->height]; |
175 |
|
✗ |
pixels += s->line_count_pels[y]; |
176 |
|
|
} |
177 |
|
|
|
178 |
|
✗ |
td->a_avg = asum / pixels; |
179 |
|
✗ |
td->b_avg = bsum / pixels; |
180 |
|
✗ |
} |
181 |
|
|
|
182 |
|
|
/** |
183 |
|
|
* Subtract the mean logspace AB values from each pixel. |
184 |
|
|
* |
185 |
|
|
* @param ctx Filter context |
186 |
|
|
* @param arg Thread data pointer |
187 |
|
|
* @param jobnr job number |
188 |
|
|
* @param nb_jobs number of jobs |
189 |
|
|
*/ |
190 |
|
✗ |
static int correct_frame(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) |
191 |
|
|
{ |
192 |
|
✗ |
GrayWorldContext *s = ctx->priv; |
193 |
|
✗ |
ThreadData *td = arg; |
194 |
|
✗ |
AVFrame *out = td->out; |
195 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
196 |
|
✗ |
const int slice_start = (out->height * jobnr) / nb_jobs; |
197 |
|
✗ |
const int slice_end = (out->height * (jobnr + 1)) / nb_jobs; |
198 |
|
|
float rgb[3], lab[3]; |
199 |
|
|
|
200 |
|
✗ |
for (int i = slice_start; i < slice_end; i++) { |
201 |
|
✗ |
float *g_out_row = (float *)(out->data[0] + i * out->linesize[0]); |
202 |
|
✗ |
float *b_out_row = (float *)(out->data[1] + i * out->linesize[1]); |
203 |
|
✗ |
float *r_out_row = (float *)(out->data[2] + i * out->linesize[2]); |
204 |
|
✗ |
float *lcur = s->tmpplab + i * outlink->w; |
205 |
|
✗ |
float *acur = s->tmpplab + i * outlink->w + outlink->w * outlink->h; |
206 |
|
✗ |
float *bcur = s->tmpplab + i * outlink->w + 2 * outlink->w * outlink->h; |
207 |
|
|
|
208 |
|
✗ |
for (int j = 0; j < outlink->w; j++) { |
209 |
|
✗ |
lab[0] = *lcur++; |
210 |
|
✗ |
lab[1] = *acur++; |
211 |
|
✗ |
lab[2] = *bcur++; |
212 |
|
|
|
213 |
|
|
// subtract the average for the color channels |
214 |
|
✗ |
lab[1] -= td->a_avg; |
215 |
|
✗ |
lab[2] -= td->b_avg; |
216 |
|
|
|
217 |
|
|
//convert back to linear rgb |
218 |
|
✗ |
lab2rgb(lab, rgb); |
219 |
|
✗ |
r_out_row[j] = rgb[0]; |
220 |
|
✗ |
g_out_row[j] = rgb[1]; |
221 |
|
✗ |
b_out_row[j] = rgb[2]; |
222 |
|
|
} |
223 |
|
|
} |
224 |
|
✗ |
return 0; |
225 |
|
|
} |
226 |
|
|
|
227 |
|
✗ |
static int config_input(AVFilterLink *inlink) |
228 |
|
|
{ |
229 |
|
✗ |
GrayWorldContext *s = inlink->dst->priv; |
230 |
|
|
|
231 |
|
✗ |
FF_ALLOC_TYPED_ARRAY(s->tmpplab, inlink->h * inlink->w * 3); |
232 |
|
✗ |
FF_ALLOC_TYPED_ARRAY(s->line_count_pels, inlink->h); |
233 |
|
✗ |
FF_ALLOC_TYPED_ARRAY(s->line_sum, inlink->h * 2); |
234 |
|
✗ |
if (!s->tmpplab || !s->line_count_pels || !s->line_sum) |
235 |
|
✗ |
return AVERROR(ENOMEM); |
236 |
|
|
|
237 |
|
✗ |
return 0; |
238 |
|
|
} |
239 |
|
|
|
240 |
|
✗ |
static av_cold void uninit(AVFilterContext *ctx) |
241 |
|
|
{ |
242 |
|
✗ |
GrayWorldContext *s = ctx->priv; |
243 |
|
|
|
244 |
|
✗ |
av_freep(&s->tmpplab); |
245 |
|
✗ |
av_freep(&s->line_count_pels); |
246 |
|
✗ |
av_freep(&s->line_sum); |
247 |
|
✗ |
} |
248 |
|
|
|
249 |
|
✗ |
static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
250 |
|
|
{ |
251 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
252 |
|
✗ |
GrayWorldContext *s = ctx->priv; |
253 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
254 |
|
|
ThreadData td; |
255 |
|
|
AVFrame *out; |
256 |
|
|
|
257 |
|
✗ |
if (av_frame_is_writable(in)) { |
258 |
|
✗ |
out = in; |
259 |
|
|
} else { |
260 |
|
✗ |
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
261 |
|
✗ |
if (!out) { |
262 |
|
✗ |
av_frame_free(&in); |
263 |
|
✗ |
return AVERROR(ENOMEM); |
264 |
|
|
} |
265 |
|
✗ |
av_frame_copy_props(out, in); |
266 |
|
|
} |
267 |
|
|
/* input and output transfer will be linear */ |
268 |
|
✗ |
if (in->color_trc == AVCOL_TRC_UNSPECIFIED) { |
269 |
|
✗ |
av_log(s, AV_LOG_WARNING, "Untagged transfer, assuming linear light.\n"); |
270 |
|
✗ |
out->color_trc = AVCOL_TRC_LINEAR; |
271 |
|
✗ |
} else if (in->color_trc != AVCOL_TRC_LINEAR) { |
272 |
|
✗ |
av_log(s, AV_LOG_WARNING, "Gray world color correction works on linear light only.\n"); |
273 |
|
|
} |
274 |
|
|
|
275 |
|
✗ |
td.in = in; |
276 |
|
✗ |
td.out = out; |
277 |
|
|
|
278 |
|
✗ |
ff_filter_execute(ctx, convert_frame, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx))); |
279 |
|
✗ |
compute_correction(s, &td); |
280 |
|
✗ |
ff_filter_execute(ctx, correct_frame, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx))); |
281 |
|
|
|
282 |
|
✗ |
if (in != out) { |
283 |
|
✗ |
av_image_copy_plane(out->data[3], out->linesize[3], |
284 |
|
✗ |
in->data[3], in->linesize[3], outlink->w * 4, outlink->h); |
285 |
|
✗ |
av_frame_free(&in); |
286 |
|
|
} |
287 |
|
|
|
288 |
|
✗ |
return ff_filter_frame(outlink, out); |
289 |
|
|
} |
290 |
|
|
|
291 |
|
|
static const AVFilterPad grayworld_inputs[] = { |
292 |
|
|
{ |
293 |
|
|
.name = "default", |
294 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
295 |
|
|
.filter_frame = filter_frame, |
296 |
|
|
.config_props = config_input, |
297 |
|
|
} |
298 |
|
|
}; |
299 |
|
|
|
300 |
|
|
const AVFilter ff_vf_grayworld = { |
301 |
|
|
.name = "grayworld", |
302 |
|
|
.description = NULL_IF_CONFIG_SMALL("Adjust white balance using LAB gray world algorithm"), |
303 |
|
|
.priv_size = sizeof(GrayWorldContext), |
304 |
|
|
FILTER_INPUTS(grayworld_inputs), |
305 |
|
|
FILTER_OUTPUTS(ff_video_default_filterpad), |
306 |
|
|
FILTER_PIXFMTS(AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32), |
307 |
|
|
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, |
308 |
|
|
.uninit = uninit, |
309 |
|
|
}; |
310 |
|
|
|