Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* Audio Processing Technology codec for Bluetooth (aptX) |
3 |
|
|
* |
4 |
|
|
* Copyright (C) 2017 Aurelien Jacobs <aurel@gnuage.org> |
5 |
|
|
* |
6 |
|
|
* This file is part of FFmpeg. |
7 |
|
|
* |
8 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
9 |
|
|
* modify it under the terms of the GNU Lesser General Public |
10 |
|
|
* License as published by the Free Software Foundation; either |
11 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
12 |
|
|
* |
13 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
14 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 |
|
|
* Lesser General Public License for more details. |
17 |
|
|
* |
18 |
|
|
* You should have received a copy of the GNU Lesser General Public |
19 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
20 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
21 |
|
|
*/ |
22 |
|
|
|
23 |
|
|
#include "config_components.h" |
24 |
|
|
|
25 |
|
|
#include "libavutil/channel_layout.h" |
26 |
|
|
#include "aptx.h" |
27 |
|
|
#include "codec_internal.h" |
28 |
|
|
#include "encode.h" |
29 |
|
|
|
30 |
|
|
/* |
31 |
|
|
* Half-band QMF analysis filter realized with a polyphase FIR filter. |
32 |
|
|
* Split into 2 subbands and downsample by 2. |
33 |
|
|
* So for each pair of samples that goes in, one sample goes out, |
34 |
|
|
* split into 2 separate subbands. |
35 |
|
|
*/ |
36 |
|
|
av_always_inline |
37 |
|
✗ |
static void aptx_qmf_polyphase_analysis(FilterSignal signal[NB_FILTERS], |
38 |
|
|
const int32_t coeffs[NB_FILTERS][FILTER_TAPS], |
39 |
|
|
int shift, |
40 |
|
|
int32_t samples[NB_FILTERS], |
41 |
|
|
int32_t *low_subband_output, |
42 |
|
|
int32_t *high_subband_output) |
43 |
|
|
{ |
44 |
|
|
int32_t subbands[NB_FILTERS]; |
45 |
|
|
int i; |
46 |
|
|
|
47 |
|
✗ |
for (i = 0; i < NB_FILTERS; i++) { |
48 |
|
✗ |
aptx_qmf_filter_signal_push(&signal[i], samples[NB_FILTERS-1-i]); |
49 |
|
✗ |
subbands[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift); |
50 |
|
|
} |
51 |
|
|
|
52 |
|
✗ |
*low_subband_output = av_clip_intp2(subbands[0] + subbands[1], 23); |
53 |
|
✗ |
*high_subband_output = av_clip_intp2(subbands[0] - subbands[1], 23); |
54 |
|
|
} |
55 |
|
|
|
56 |
|
|
/* |
57 |
|
|
* Two stage QMF analysis tree. |
58 |
|
|
* Split 4 input samples into 4 subbands and downsample by 4. |
59 |
|
|
* So for each group of 4 samples that goes in, one sample goes out, |
60 |
|
|
* split into 4 separate subbands. |
61 |
|
|
*/ |
62 |
|
✗ |
static void aptx_qmf_tree_analysis(QMFAnalysis *qmf, |
63 |
|
|
int32_t samples[4], |
64 |
|
|
int32_t subband_samples[4]) |
65 |
|
|
{ |
66 |
|
|
int32_t intermediate_samples[4]; |
67 |
|
|
int i; |
68 |
|
|
|
69 |
|
|
/* Split 4 input samples into 2 intermediate subbands downsampled to 2 samples */ |
70 |
|
✗ |
for (i = 0; i < 2; i++) |
71 |
|
✗ |
aptx_qmf_polyphase_analysis(qmf->outer_filter_signal, |
72 |
|
|
aptx_qmf_outer_coeffs, 23, |
73 |
|
✗ |
&samples[2*i], |
74 |
|
|
&intermediate_samples[0+i], |
75 |
|
✗ |
&intermediate_samples[2+i]); |
76 |
|
|
|
77 |
|
|
/* Split 2 intermediate subband samples into 4 final subbands downsampled to 1 sample */ |
78 |
|
✗ |
for (i = 0; i < 2; i++) |
79 |
|
✗ |
aptx_qmf_polyphase_analysis(qmf->inner_filter_signal[i], |
80 |
|
|
aptx_qmf_inner_coeffs, 23, |
81 |
|
✗ |
&intermediate_samples[2*i], |
82 |
|
✗ |
&subband_samples[2*i+0], |
83 |
|
✗ |
&subband_samples[2*i+1]); |
84 |
|
|
} |
85 |
|
|
|
86 |
|
|
av_always_inline |
87 |
|
✗ |
static int32_t aptx_bin_search(int32_t value, int32_t factor, |
88 |
|
|
const int32_t *intervals, int32_t nb_intervals) |
89 |
|
|
{ |
90 |
|
✗ |
int32_t idx = 0; |
91 |
|
|
int i; |
92 |
|
|
|
93 |
|
✗ |
for (i = nb_intervals >> 1; i > 0; i >>= 1) |
94 |
|
✗ |
if (MUL64(factor, intervals[idx + i]) <= ((int64_t)value << 24)) |
95 |
|
✗ |
idx += i; |
96 |
|
|
|
97 |
|
✗ |
return idx; |
98 |
|
|
} |
99 |
|
|
|
100 |
|
✗ |
static void aptx_quantize_difference(Quantize *quantize, |
101 |
|
|
int32_t sample_difference, |
102 |
|
|
int32_t dither, |
103 |
|
|
int32_t quantization_factor, |
104 |
|
|
ConstTables *tables) |
105 |
|
|
{ |
106 |
|
✗ |
const int32_t *intervals = tables->quantize_intervals; |
107 |
|
|
int32_t quantized_sample, dithered_sample, parity_change; |
108 |
|
|
int32_t d, mean, interval, inv, sample_difference_abs; |
109 |
|
|
int64_t error; |
110 |
|
|
|
111 |
|
✗ |
sample_difference_abs = FFABS(sample_difference); |
112 |
|
✗ |
sample_difference_abs = FFMIN(sample_difference_abs, (1 << 23) - 1); |
113 |
|
|
|
114 |
|
✗ |
quantized_sample = aptx_bin_search(sample_difference_abs >> 4, |
115 |
|
|
quantization_factor, |
116 |
|
✗ |
intervals, tables->tables_size); |
117 |
|
|
|
118 |
|
✗ |
d = rshift32_clip24(MULH(dither, dither), 7) - (1 << 23); |
119 |
|
✗ |
d = rshift64(MUL64(d, tables->quantize_dither_factors[quantized_sample]), 23); |
120 |
|
|
|
121 |
|
✗ |
intervals += quantized_sample; |
122 |
|
✗ |
mean = (intervals[1] + intervals[0]) / 2; |
123 |
|
✗ |
interval = (intervals[1] - intervals[0]) * (-(sample_difference < 0) | 1); |
124 |
|
|
|
125 |
|
✗ |
dithered_sample = rshift64_clip24(MUL64(dither, interval) + ((int64_t)av_clip_intp2(mean + d, 23) << 32), 32); |
126 |
|
✗ |
error = ((int64_t)sample_difference_abs << 20) - MUL64(dithered_sample, quantization_factor); |
127 |
|
✗ |
quantize->error = FFABS(rshift64(error, 23)); |
128 |
|
|
|
129 |
|
✗ |
parity_change = quantized_sample; |
130 |
|
✗ |
if (error < 0) |
131 |
|
✗ |
quantized_sample--; |
132 |
|
|
else |
133 |
|
✗ |
parity_change--; |
134 |
|
|
|
135 |
|
✗ |
inv = -(sample_difference < 0); |
136 |
|
✗ |
quantize->quantized_sample = quantized_sample ^ inv; |
137 |
|
✗ |
quantize->quantized_sample_parity_change = parity_change ^ inv; |
138 |
|
|
} |
139 |
|
|
|
140 |
|
✗ |
static void aptx_encode_channel(Channel *channel, int32_t samples[4], int hd) |
141 |
|
|
{ |
142 |
|
|
int32_t subband_samples[4]; |
143 |
|
|
int subband; |
144 |
|
✗ |
aptx_qmf_tree_analysis(&channel->qmf, samples, subband_samples); |
145 |
|
✗ |
ff_aptx_generate_dither(channel); |
146 |
|
✗ |
for (subband = 0; subband < NB_SUBBANDS; subband++) { |
147 |
|
✗ |
int32_t diff = av_clip_intp2(subband_samples[subband] - channel->prediction[subband].predicted_sample, 23); |
148 |
|
✗ |
aptx_quantize_difference(&channel->quantize[subband], diff, |
149 |
|
|
channel->dither[subband], |
150 |
|
|
channel->invert_quantize[subband].quantization_factor, |
151 |
|
|
&ff_aptx_quant_tables[hd][subband]); |
152 |
|
|
} |
153 |
|
|
} |
154 |
|
|
|
155 |
|
✗ |
static void aptx_insert_sync(Channel channels[NB_CHANNELS], int32_t *idx) |
156 |
|
|
{ |
157 |
|
✗ |
if (aptx_check_parity(channels, idx)) { |
158 |
|
|
int i; |
159 |
|
|
Channel *c; |
160 |
|
|
static const int map[] = { 1, 2, 0, 3 }; |
161 |
|
✗ |
Quantize *min = &channels[NB_CHANNELS-1].quantize[map[0]]; |
162 |
|
✗ |
for (c = &channels[NB_CHANNELS-1]; c >= channels; c--) |
163 |
|
✗ |
for (i = 0; i < NB_SUBBANDS; i++) |
164 |
|
✗ |
if (c->quantize[map[i]].error < min->error) |
165 |
|
✗ |
min = &c->quantize[map[i]]; |
166 |
|
|
|
167 |
|
|
/* Forcing the desired parity is done by offsetting by 1 the quantized |
168 |
|
|
* sample from the subband featuring the smallest quantization error. */ |
169 |
|
✗ |
min->quantized_sample = min->quantized_sample_parity_change; |
170 |
|
|
} |
171 |
|
|
} |
172 |
|
|
|
173 |
|
✗ |
static uint16_t aptx_pack_codeword(Channel *channel) |
174 |
|
|
{ |
175 |
|
✗ |
int32_t parity = aptx_quantized_parity(channel); |
176 |
|
✗ |
return (((channel->quantize[3].quantized_sample & 0x06) | parity) << 13) |
177 |
|
✗ |
| (((channel->quantize[2].quantized_sample & 0x03) ) << 11) |
178 |
|
✗ |
| (((channel->quantize[1].quantized_sample & 0x0F) ) << 7) |
179 |
|
✗ |
| (((channel->quantize[0].quantized_sample & 0x7F) ) << 0); |
180 |
|
|
} |
181 |
|
|
|
182 |
|
✗ |
static uint32_t aptxhd_pack_codeword(Channel *channel) |
183 |
|
|
{ |
184 |
|
✗ |
int32_t parity = aptx_quantized_parity(channel); |
185 |
|
✗ |
return (((channel->quantize[3].quantized_sample & 0x01E) | parity) << 19) |
186 |
|
✗ |
| (((channel->quantize[2].quantized_sample & 0x00F) ) << 15) |
187 |
|
✗ |
| (((channel->quantize[1].quantized_sample & 0x03F) ) << 9) |
188 |
|
✗ |
| (((channel->quantize[0].quantized_sample & 0x1FF) ) << 0); |
189 |
|
|
} |
190 |
|
|
|
191 |
|
✗ |
static void aptx_encode_samples(AptXContext *ctx, |
192 |
|
|
int32_t samples[NB_CHANNELS][4], |
193 |
|
|
uint8_t *output) |
194 |
|
|
{ |
195 |
|
|
int channel; |
196 |
|
✗ |
for (channel = 0; channel < NB_CHANNELS; channel++) |
197 |
|
✗ |
aptx_encode_channel(&ctx->channels[channel], samples[channel], ctx->hd); |
198 |
|
|
|
199 |
|
✗ |
aptx_insert_sync(ctx->channels, &ctx->sync_idx); |
200 |
|
|
|
201 |
|
✗ |
for (channel = 0; channel < NB_CHANNELS; channel++) { |
202 |
|
✗ |
ff_aptx_invert_quantize_and_prediction(&ctx->channels[channel], ctx->hd); |
203 |
|
✗ |
if (ctx->hd) |
204 |
|
✗ |
AV_WB24(output + 3*channel, |
205 |
|
|
aptxhd_pack_codeword(&ctx->channels[channel])); |
206 |
|
|
else |
207 |
|
✗ |
AV_WB16(output + 2*channel, |
208 |
|
|
aptx_pack_codeword(&ctx->channels[channel])); |
209 |
|
|
} |
210 |
|
|
} |
211 |
|
|
|
212 |
|
✗ |
static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, |
213 |
|
|
const AVFrame *frame, int *got_packet_ptr) |
214 |
|
|
{ |
215 |
|
✗ |
AptXContext *s = avctx->priv_data; |
216 |
|
|
int pos, ipos, channel, sample, output_size, ret; |
217 |
|
|
|
218 |
|
✗ |
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) |
219 |
|
✗ |
return ret; |
220 |
|
|
|
221 |
|
✗ |
output_size = s->block_size * frame->nb_samples/4; |
222 |
|
✗ |
if ((ret = ff_get_encode_buffer(avctx, avpkt, output_size, 0)) < 0) |
223 |
|
✗ |
return ret; |
224 |
|
|
|
225 |
|
✗ |
for (pos = 0, ipos = 0; pos < output_size; pos += s->block_size, ipos += 4) { |
226 |
|
|
int32_t samples[NB_CHANNELS][4]; |
227 |
|
|
|
228 |
|
✗ |
for (channel = 0; channel < NB_CHANNELS; channel++) |
229 |
|
✗ |
for (sample = 0; sample < 4; sample++) |
230 |
|
✗ |
samples[channel][sample] = (int32_t)AV_RN32A(&frame->data[channel][4*(ipos+sample)]) >> 8; |
231 |
|
|
|
232 |
|
✗ |
aptx_encode_samples(s, samples, avpkt->data + pos); |
233 |
|
|
} |
234 |
|
|
|
235 |
|
✗ |
ff_af_queue_remove(&s->afq, frame->nb_samples, &avpkt->pts, &avpkt->duration); |
236 |
|
✗ |
*got_packet_ptr = 1; |
237 |
|
✗ |
return 0; |
238 |
|
|
} |
239 |
|
|
|
240 |
|
✗ |
static av_cold int aptx_close(AVCodecContext *avctx) |
241 |
|
|
{ |
242 |
|
✗ |
AptXContext *s = avctx->priv_data; |
243 |
|
✗ |
ff_af_queue_close(&s->afq); |
244 |
|
✗ |
return 0; |
245 |
|
|
} |
246 |
|
|
|
247 |
|
|
#if CONFIG_APTX_ENCODER |
248 |
|
|
const FFCodec ff_aptx_encoder = { |
249 |
|
|
.p.name = "aptx", |
250 |
|
|
.p.long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"), |
251 |
|
|
.p.type = AVMEDIA_TYPE_AUDIO, |
252 |
|
|
.p.id = AV_CODEC_ID_APTX, |
253 |
|
|
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME, |
254 |
|
|
.priv_data_size = sizeof(AptXContext), |
255 |
|
|
.init = ff_aptx_init, |
256 |
|
|
FF_CODEC_ENCODE_CB(aptx_encode_frame), |
257 |
|
|
.close = aptx_close, |
258 |
|
|
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, |
259 |
|
|
#if FF_API_OLD_CHANNEL_LAYOUT |
260 |
|
|
.p.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0}, |
261 |
|
|
#endif |
262 |
|
|
.p.ch_layouts = (const AVChannelLayout[]) { AV_CHANNEL_LAYOUT_STEREO, { 0 } }, |
263 |
|
|
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P, |
264 |
|
|
AV_SAMPLE_FMT_NONE }, |
265 |
|
|
.p.supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0}, |
266 |
|
|
}; |
267 |
|
|
#endif |
268 |
|
|
|
269 |
|
|
#if CONFIG_APTX_HD_ENCODER |
270 |
|
|
const FFCodec ff_aptx_hd_encoder = { |
271 |
|
|
.p.name = "aptx_hd", |
272 |
|
|
.p.long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"), |
273 |
|
|
.p.type = AVMEDIA_TYPE_AUDIO, |
274 |
|
|
.p.id = AV_CODEC_ID_APTX_HD, |
275 |
|
|
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME, |
276 |
|
|
.priv_data_size = sizeof(AptXContext), |
277 |
|
|
.init = ff_aptx_init, |
278 |
|
|
FF_CODEC_ENCODE_CB(aptx_encode_frame), |
279 |
|
|
.close = aptx_close, |
280 |
|
|
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, |
281 |
|
|
#if FF_API_OLD_CHANNEL_LAYOUT |
282 |
|
|
.p.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0}, |
283 |
|
|
#endif |
284 |
|
|
.p.ch_layouts = (const AVChannelLayout[]) { AV_CHANNEL_LAYOUT_STEREO, { 0 } }, |
285 |
|
|
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P, |
286 |
|
|
AV_SAMPLE_FMT_NONE }, |
287 |
|
|
.p.supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0}, |
288 |
|
|
}; |
289 |
|
|
#endif |
290 |
|
|
|