Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Inter-thread scheduling/synchronization. | ||
3 | * Copyright (c) 2023 Anton Khirnov | ||
4 | * | ||
5 | * This file is part of FFmpeg. | ||
6 | * | ||
7 | * FFmpeg is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU Lesser General Public | ||
9 | * License as published by the Free Software Foundation; either | ||
10 | * version 2.1 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * FFmpeg is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public | ||
18 | * License along with FFmpeg; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
20 | */ | ||
21 | |||
22 | #include <stdatomic.h> | ||
23 | #include <stddef.h> | ||
24 | #include <stdint.h> | ||
25 | |||
26 | #include "cmdutils.h" | ||
27 | #include "ffmpeg_sched.h" | ||
28 | #include "ffmpeg_utils.h" | ||
29 | #include "sync_queue.h" | ||
30 | #include "thread_queue.h" | ||
31 | |||
32 | #include "libavcodec/packet.h" | ||
33 | |||
34 | #include "libavutil/avassert.h" | ||
35 | #include "libavutil/error.h" | ||
36 | #include "libavutil/fifo.h" | ||
37 | #include "libavutil/frame.h" | ||
38 | #include "libavutil/mem.h" | ||
39 | #include "libavutil/thread.h" | ||
40 | #include "libavutil/threadmessage.h" | ||
41 | #include "libavutil/time.h" | ||
42 | |||
43 | // 100 ms | ||
44 | // FIXME: some other value? make this dynamic? | ||
45 | #define SCHEDULE_TOLERANCE (100 * 1000) | ||
46 | |||
47 | enum QueueType { | ||
48 | QUEUE_PACKETS, | ||
49 | QUEUE_FRAMES, | ||
50 | }; | ||
51 | |||
52 | typedef struct SchWaiter { | ||
53 | pthread_mutex_t lock; | ||
54 | pthread_cond_t cond; | ||
55 | atomic_int choked; | ||
56 | |||
57 | // the following are internal state of schedule_update_locked() and must not | ||
58 | // be accessed outside of it | ||
59 | int choked_prev; | ||
60 | int choked_next; | ||
61 | } SchWaiter; | ||
62 | |||
63 | typedef struct SchTask { | ||
64 | Scheduler *parent; | ||
65 | SchedulerNode node; | ||
66 | |||
67 | SchThreadFunc func; | ||
68 | void *func_arg; | ||
69 | |||
70 | pthread_t thread; | ||
71 | int thread_running; | ||
72 | } SchTask; | ||
73 | |||
74 | typedef struct SchDecOutput { | ||
75 | SchedulerNode *dst; | ||
76 | uint8_t *dst_finished; | ||
77 | unsigned nb_dst; | ||
78 | } SchDecOutput; | ||
79 | |||
80 | typedef struct SchDec { | ||
81 | const AVClass *class; | ||
82 | |||
83 | SchedulerNode src; | ||
84 | |||
85 | SchDecOutput *outputs; | ||
86 | unsigned nb_outputs; | ||
87 | |||
88 | SchTask task; | ||
89 | // Queue for receiving input packets, one stream. | ||
90 | ThreadQueue *queue; | ||
91 | |||
92 | // Queue for sending post-flush end timestamps back to the source | ||
93 | AVThreadMessageQueue *queue_end_ts; | ||
94 | int expect_end_ts; | ||
95 | |||
96 | // temporary storage used by sch_dec_send() | ||
97 | AVFrame *send_frame; | ||
98 | } SchDec; | ||
99 | |||
100 | typedef struct SchSyncQueue { | ||
101 | SyncQueue *sq; | ||
102 | AVFrame *frame; | ||
103 | pthread_mutex_t lock; | ||
104 | |||
105 | unsigned *enc_idx; | ||
106 | unsigned nb_enc_idx; | ||
107 | } SchSyncQueue; | ||
108 | |||
109 | typedef struct SchEnc { | ||
110 | const AVClass *class; | ||
111 | |||
112 | SchedulerNode src; | ||
113 | SchedulerNode *dst; | ||
114 | uint8_t *dst_finished; | ||
115 | unsigned nb_dst; | ||
116 | |||
117 | // [0] - index of the sync queue in Scheduler.sq_enc, | ||
118 | // [1] - index of this encoder in the sq | ||
119 | int sq_idx[2]; | ||
120 | |||
121 | /* Opening encoders is somewhat nontrivial due to their interaction with | ||
122 | * sync queues, which are (among other things) responsible for maintaining | ||
123 | * constant audio frame size, when it is required by the encoder. | ||
124 | * | ||
125 | * Opening the encoder requires stream parameters, obtained from the first | ||
126 | * frame. However, that frame cannot be properly chunked by the sync queue | ||
127 | * without knowing the required frame size, which is only available after | ||
128 | * opening the encoder. | ||
129 | * | ||
130 | * This apparent circular dependency is resolved in the following way: | ||
131 | * - the caller creating the encoder gives us a callback which opens the | ||
132 | * encoder and returns the required frame size (if any) | ||
133 | * - when the first frame is sent to the encoder, the sending thread | ||
134 | * - calls this callback, opening the encoder | ||
135 | * - passes the returned frame size to the sync queue | ||
136 | */ | ||
137 | int (*open_cb)(void *opaque, const AVFrame *frame); | ||
138 | int opened; | ||
139 | |||
140 | SchTask task; | ||
141 | // Queue for receiving input frames, one stream. | ||
142 | ThreadQueue *queue; | ||
143 | // tq_send() to queue returned EOF | ||
144 | int in_finished; | ||
145 | |||
146 | // temporary storage used by sch_enc_send() | ||
147 | AVPacket *send_pkt; | ||
148 | } SchEnc; | ||
149 | |||
150 | typedef struct SchDemuxStream { | ||
151 | SchedulerNode *dst; | ||
152 | uint8_t *dst_finished; | ||
153 | unsigned nb_dst; | ||
154 | } SchDemuxStream; | ||
155 | |||
156 | typedef struct SchDemux { | ||
157 | const AVClass *class; | ||
158 | |||
159 | SchDemuxStream *streams; | ||
160 | unsigned nb_streams; | ||
161 | |||
162 | SchTask task; | ||
163 | SchWaiter waiter; | ||
164 | |||
165 | // temporary storage used by sch_demux_send() | ||
166 | AVPacket *send_pkt; | ||
167 | |||
168 | // protected by schedule_lock | ||
169 | int task_exited; | ||
170 | } SchDemux; | ||
171 | |||
172 | typedef struct PreMuxQueue { | ||
173 | /** | ||
174 | * Queue for buffering the packets before the muxer task can be started. | ||
175 | */ | ||
176 | AVFifo *fifo; | ||
177 | /** | ||
178 | * Maximum number of packets in fifo. | ||
179 | */ | ||
180 | int max_packets; | ||
181 | /* | ||
182 | * The size of the AVPackets' buffers in queue. | ||
183 | * Updated when a packet is either pushed or pulled from the queue. | ||
184 | */ | ||
185 | size_t data_size; | ||
186 | /* Threshold after which max_packets will be in effect */ | ||
187 | size_t data_threshold; | ||
188 | } PreMuxQueue; | ||
189 | |||
190 | typedef struct SchMuxStream { | ||
191 | SchedulerNode src; | ||
192 | SchedulerNode src_sched; | ||
193 | |||
194 | unsigned *sub_heartbeat_dst; | ||
195 | unsigned nb_sub_heartbeat_dst; | ||
196 | |||
197 | PreMuxQueue pre_mux_queue; | ||
198 | |||
199 | // an EOF was generated while flushing the pre-mux queue | ||
200 | int init_eof; | ||
201 | |||
202 | //////////////////////////////////////////////////////////// | ||
203 | // The following are protected by Scheduler.schedule_lock // | ||
204 | |||
205 | /* dts+duration of the last packet sent to this stream | ||
206 | in AV_TIME_BASE_Q */ | ||
207 | int64_t last_dts; | ||
208 | // this stream no longer accepts input | ||
209 | int source_finished; | ||
210 | //////////////////////////////////////////////////////////// | ||
211 | } SchMuxStream; | ||
212 | |||
213 | typedef struct SchMux { | ||
214 | const AVClass *class; | ||
215 | |||
216 | SchMuxStream *streams; | ||
217 | unsigned nb_streams; | ||
218 | unsigned nb_streams_ready; | ||
219 | |||
220 | int (*init)(void *arg); | ||
221 | |||
222 | SchTask task; | ||
223 | /** | ||
224 | * Set to 1 after starting the muxer task and flushing the | ||
225 | * pre-muxing queues. | ||
226 | * Set either before any tasks have started, or with | ||
227 | * Scheduler.mux_ready_lock held. | ||
228 | */ | ||
229 | atomic_int mux_started; | ||
230 | ThreadQueue *queue; | ||
231 | unsigned queue_size; | ||
232 | |||
233 | AVPacket *sub_heartbeat_pkt; | ||
234 | } SchMux; | ||
235 | |||
236 | typedef struct SchFilterIn { | ||
237 | SchedulerNode src; | ||
238 | SchedulerNode src_sched; | ||
239 | int send_finished; | ||
240 | int receive_finished; | ||
241 | } SchFilterIn; | ||
242 | |||
243 | typedef struct SchFilterOut { | ||
244 | SchedulerNode dst; | ||
245 | } SchFilterOut; | ||
246 | |||
247 | typedef struct SchFilterGraph { | ||
248 | const AVClass *class; | ||
249 | |||
250 | SchFilterIn *inputs; | ||
251 | unsigned nb_inputs; | ||
252 | atomic_uint nb_inputs_finished_send; | ||
253 | unsigned nb_inputs_finished_receive; | ||
254 | |||
255 | SchFilterOut *outputs; | ||
256 | unsigned nb_outputs; | ||
257 | |||
258 | SchTask task; | ||
259 | // input queue, nb_inputs+1 streams | ||
260 | // last stream is control | ||
261 | ThreadQueue *queue; | ||
262 | SchWaiter waiter; | ||
263 | |||
264 | // protected by schedule_lock | ||
265 | unsigned best_input; | ||
266 | int task_exited; | ||
267 | } SchFilterGraph; | ||
268 | |||
269 | enum SchedulerState { | ||
270 | SCH_STATE_UNINIT, | ||
271 | SCH_STATE_STARTED, | ||
272 | SCH_STATE_STOPPED, | ||
273 | }; | ||
274 | |||
275 | struct Scheduler { | ||
276 | const AVClass *class; | ||
277 | |||
278 | SchDemux *demux; | ||
279 | unsigned nb_demux; | ||
280 | |||
281 | SchMux *mux; | ||
282 | unsigned nb_mux; | ||
283 | |||
284 | unsigned nb_mux_ready; | ||
285 | pthread_mutex_t mux_ready_lock; | ||
286 | |||
287 | unsigned nb_mux_done; | ||
288 | unsigned task_failed; | ||
289 | pthread_mutex_t finish_lock; | ||
290 | pthread_cond_t finish_cond; | ||
291 | |||
292 | |||
293 | SchDec *dec; | ||
294 | unsigned nb_dec; | ||
295 | |||
296 | SchEnc *enc; | ||
297 | unsigned nb_enc; | ||
298 | |||
299 | SchSyncQueue *sq_enc; | ||
300 | unsigned nb_sq_enc; | ||
301 | |||
302 | SchFilterGraph *filters; | ||
303 | unsigned nb_filters; | ||
304 | |||
305 | char *sdp_filename; | ||
306 | int sdp_auto; | ||
307 | |||
308 | enum SchedulerState state; | ||
309 | atomic_int terminate; | ||
310 | |||
311 | pthread_mutex_t schedule_lock; | ||
312 | |||
313 | atomic_int_least64_t last_dts; | ||
314 | }; | ||
315 | |||
316 | /** | ||
317 | * Wait until this task is allowed to proceed. | ||
318 | * | ||
319 | * @retval 0 the caller should proceed | ||
320 | * @retval 1 the caller should terminate | ||
321 | */ | ||
322 | 492639 | static int waiter_wait(Scheduler *sch, SchWaiter *w) | |
323 | { | ||
324 | int terminate; | ||
325 | |||
326 |
2/2✓ Branch 0 taken 492371 times.
✓ Branch 1 taken 268 times.
|
492639 | if (!atomic_load(&w->choked)) |
327 | 492371 | return 0; | |
328 | |||
329 | 268 | pthread_mutex_lock(&w->lock); | |
330 | |||
331 |
4/4✓ Branch 0 taken 277 times.
✓ Branch 1 taken 227 times.
✓ Branch 2 taken 236 times.
✓ Branch 3 taken 41 times.
|
504 | while (atomic_load(&w->choked) && !atomic_load(&sch->terminate)) |
332 | 236 | pthread_cond_wait(&w->cond, &w->lock); | |
333 | |||
334 | 268 | terminate = atomic_load(&sch->terminate); | |
335 | |||
336 | 268 | pthread_mutex_unlock(&w->lock); | |
337 | |||
338 | 268 | return terminate; | |
339 | } | ||
340 | |||
341 | 32987 | static void waiter_set(SchWaiter *w, int choked) | |
342 | { | ||
343 | 32987 | pthread_mutex_lock(&w->lock); | |
344 | |||
345 | 32987 | atomic_store(&w->choked, choked); | |
346 | 32987 | pthread_cond_signal(&w->cond); | |
347 | |||
348 | 32987 | pthread_mutex_unlock(&w->lock); | |
349 | 32987 | } | |
350 | |||
351 | 14715 | static int waiter_init(SchWaiter *w) | |
352 | { | ||
353 | int ret; | ||
354 | |||
355 | 14715 | atomic_init(&w->choked, 0); | |
356 | |||
357 | 14715 | ret = pthread_mutex_init(&w->lock, NULL); | |
358 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 14715 times.
|
14715 | if (ret) |
359 | ✗ | return AVERROR(ret); | |
360 | |||
361 | 14715 | ret = pthread_cond_init(&w->cond, NULL); | |
362 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 14715 times.
|
14715 | if (ret) |
363 | ✗ | return AVERROR(ret); | |
364 | |||
365 | 14715 | return 0; | |
366 | } | ||
367 | |||
368 | 14715 | static void waiter_uninit(SchWaiter *w) | |
369 | { | ||
370 | 14715 | pthread_mutex_destroy(&w->lock); | |
371 | 14715 | pthread_cond_destroy(&w->cond); | |
372 | 14715 | } | |
373 | |||
374 | 29936 | static int queue_alloc(ThreadQueue **ptq, unsigned nb_streams, unsigned queue_size, | |
375 | enum QueueType type) | ||
376 | { | ||
377 | ThreadQueue *tq; | ||
378 | |||
379 |
1/2✓ Branch 0 taken 29936 times.
✗ Branch 1 not taken.
|
29936 | if (queue_size <= 0) { |
380 |
2/2✓ Branch 0 taken 15252 times.
✓ Branch 1 taken 14684 times.
|
29936 | if (type == QUEUE_FRAMES) |
381 | 15252 | queue_size = DEFAULT_FRAME_THREAD_QUEUE_SIZE; | |
382 | else | ||
383 | 14684 | queue_size = DEFAULT_PACKET_THREAD_QUEUE_SIZE; | |
384 | } | ||
385 | |||
386 |
2/2✓ Branch 0 taken 15252 times.
✓ Branch 1 taken 14684 times.
|
29936 | if (type == QUEUE_FRAMES) { |
387 | // This queue length is used in the decoder code to ensure that | ||
388 | // there are enough entries in fixed-size frame pools to account | ||
389 | // for frames held in queues inside the ffmpeg utility. If this | ||
390 | // can ever dynamically change then the corresponding decode | ||
391 | // code needs to be updated as well. | ||
392 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 15252 times.
|
15252 | av_assert0(queue_size == DEFAULT_FRAME_THREAD_QUEUE_SIZE); |
393 | } | ||
394 | |||
395 | 29936 | tq = tq_alloc(nb_streams, queue_size, | |
396 | (type == QUEUE_PACKETS) ? THREAD_QUEUE_PACKETS : THREAD_QUEUE_FRAMES); | ||
397 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 29936 times.
|
29936 | if (!tq) |
398 | ✗ | return AVERROR(ENOMEM); | |
399 | |||
400 | 29936 | *ptq = tq; | |
401 | 29936 | return 0; | |
402 | } | ||
403 | |||
404 | static void *task_wrapper(void *arg); | ||
405 | |||
406 | 37095 | static int task_start(SchTask *task) | |
407 | { | ||
408 | int ret; | ||
409 | |||
410 | 37095 | av_log(task->func_arg, AV_LOG_VERBOSE, "Starting thread...\n"); | |
411 | |||
412 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 37095 times.
|
37095 | av_assert0(!task->thread_running); |
413 | |||
414 | 37095 | ret = pthread_create(&task->thread, NULL, task_wrapper, task); | |
415 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 37095 times.
|
37095 | if (ret) { |
416 | ✗ | av_log(task->func_arg, AV_LOG_ERROR, "pthread_create() failed: %s\n", | |
417 | strerror(ret)); | ||
418 | ✗ | return AVERROR(ret); | |
419 | } | ||
420 | |||
421 | 37095 | task->thread_running = 1; | |
422 | 37095 | return 0; | |
423 | } | ||
424 | |||
425 | 37109 | static void task_init(Scheduler *sch, SchTask *task, enum SchedulerNodeType type, unsigned idx, | |
426 | SchThreadFunc func, void *func_arg) | ||
427 | { | ||
428 | 37109 | task->parent = sch; | |
429 | |||
430 | 37109 | task->node.type = type; | |
431 | 37109 | task->node.idx = idx; | |
432 | |||
433 | 37109 | task->func = func; | |
434 | 37109 | task->func_arg = func_arg; | |
435 | 37109 | } | |
436 | |||
437 | 538084 | static int64_t trailing_dts(const Scheduler *sch, int count_finished) | |
438 | { | ||
439 | 538084 | int64_t min_dts = INT64_MAX; | |
440 | |||
441 |
2/2✓ Branch 0 taken 538340 times.
✓ Branch 1 taken 523075 times.
|
1061415 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
442 | 538340 | const SchMux *mux = &sch->mux[i]; | |
443 | |||
444 |
2/2✓ Branch 0 taken 595283 times.
✓ Branch 1 taken 523331 times.
|
1118614 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
445 | 595283 | const SchMuxStream *ms = &mux->streams[j]; | |
446 | |||
447 |
4/4✓ Branch 0 taken 46010 times.
✓ Branch 1 taken 549273 times.
✓ Branch 2 taken 37644 times.
✓ Branch 3 taken 8366 times.
|
595283 | if (ms->source_finished && !count_finished) |
448 | 37644 | continue; | |
449 |
2/2✓ Branch 0 taken 15009 times.
✓ Branch 1 taken 542630 times.
|
557639 | if (ms->last_dts == AV_NOPTS_VALUE) |
450 | 15009 | return AV_NOPTS_VALUE; | |
451 | |||
452 | 542630 | min_dts = FFMIN(min_dts, ms->last_dts); | |
453 | } | ||
454 | } | ||
455 | |||
456 |
2/2✓ Branch 0 taken 494810 times.
✓ Branch 1 taken 28265 times.
|
523075 | return min_dts == INT64_MAX ? AV_NOPTS_VALUE : min_dts; |
457 | } | ||
458 | |||
459 | 7925 | void sch_free(Scheduler **psch) | |
460 | { | ||
461 | 7925 | Scheduler *sch = *psch; | |
462 | |||
463 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (!sch) |
464 | ✗ | return; | |
465 | |||
466 | 7925 | sch_stop(sch, NULL); | |
467 | |||
468 |
2/2✓ Branch 0 taken 7173 times.
✓ Branch 1 taken 7925 times.
|
15098 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
469 | 7173 | SchDemux *d = &sch->demux[i]; | |
470 | |||
471 |
2/2✓ Branch 0 taken 7426 times.
✓ Branch 1 taken 7173 times.
|
14599 | for (unsigned j = 0; j < d->nb_streams; j++) { |
472 | 7426 | SchDemuxStream *ds = &d->streams[j]; | |
473 | 7426 | av_freep(&ds->dst); | |
474 | 7426 | av_freep(&ds->dst_finished); | |
475 | } | ||
476 | 7173 | av_freep(&d->streams); | |
477 | |||
478 | 7173 | av_packet_free(&d->send_pkt); | |
479 | |||
480 | 7173 | waiter_uninit(&d->waiter); | |
481 | } | ||
482 | 7925 | av_freep(&sch->demux); | |
483 | |||
484 |
2/2✓ Branch 0 taken 7926 times.
✓ Branch 1 taken 7925 times.
|
15851 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
485 | 7926 | SchMux *mux = &sch->mux[i]; | |
486 | |||
487 |
2/2✓ Branch 0 taken 8402 times.
✓ Branch 1 taken 7926 times.
|
16328 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
488 | 8402 | SchMuxStream *ms = &mux->streams[j]; | |
489 | |||
490 |
1/2✓ Branch 0 taken 8402 times.
✗ Branch 1 not taken.
|
8402 | if (ms->pre_mux_queue.fifo) { |
491 | AVPacket *pkt; | ||
492 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 8402 times.
|
8402 | while (av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1) >= 0) |
493 | ✗ | av_packet_free(&pkt); | |
494 | 8402 | av_fifo_freep2(&ms->pre_mux_queue.fifo); | |
495 | } | ||
496 | |||
497 | 8402 | av_freep(&ms->sub_heartbeat_dst); | |
498 | } | ||
499 | 7926 | av_freep(&mux->streams); | |
500 | |||
501 | 7926 | av_packet_free(&mux->sub_heartbeat_pkt); | |
502 | |||
503 | 7926 | tq_free(&mux->queue); | |
504 | } | ||
505 | 7925 | av_freep(&sch->mux); | |
506 | |||
507 |
2/2✓ Branch 0 taken 6758 times.
✓ Branch 1 taken 7925 times.
|
14683 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
508 | 6758 | SchDec *dec = &sch->dec[i]; | |
509 | |||
510 | 6758 | tq_free(&dec->queue); | |
511 | |||
512 | 6758 | av_thread_message_queue_free(&dec->queue_end_ts); | |
513 | |||
514 |
2/2✓ Branch 0 taken 6764 times.
✓ Branch 1 taken 6758 times.
|
13522 | for (unsigned j = 0; j < dec->nb_outputs; j++) { |
515 | 6764 | SchDecOutput *o = &dec->outputs[j]; | |
516 | |||
517 | 6764 | av_freep(&o->dst); | |
518 | 6764 | av_freep(&o->dst_finished); | |
519 | } | ||
520 | |||
521 | 6758 | av_freep(&dec->outputs); | |
522 | |||
523 | 6758 | av_frame_free(&dec->send_frame); | |
524 | } | ||
525 | 7925 | av_freep(&sch->dec); | |
526 | |||
527 |
2/2✓ Branch 0 taken 7710 times.
✓ Branch 1 taken 7925 times.
|
15635 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
528 | 7710 | SchEnc *enc = &sch->enc[i]; | |
529 | |||
530 | 7710 | tq_free(&enc->queue); | |
531 | |||
532 | 7710 | av_packet_free(&enc->send_pkt); | |
533 | |||
534 | 7710 | av_freep(&enc->dst); | |
535 | 7710 | av_freep(&enc->dst_finished); | |
536 | } | ||
537 | 7925 | av_freep(&sch->enc); | |
538 | |||
539 |
2/2✓ Branch 0 taken 3069 times.
✓ Branch 1 taken 7925 times.
|
10994 | for (unsigned i = 0; i < sch->nb_sq_enc; i++) { |
540 | 3069 | SchSyncQueue *sq = &sch->sq_enc[i]; | |
541 | 3069 | sq_free(&sq->sq); | |
542 | 3069 | av_frame_free(&sq->frame); | |
543 | 3069 | pthread_mutex_destroy(&sq->lock); | |
544 | 3069 | av_freep(&sq->enc_idx); | |
545 | } | ||
546 | 7925 | av_freep(&sch->sq_enc); | |
547 | |||
548 |
2/2✓ Branch 0 taken 7542 times.
✓ Branch 1 taken 7925 times.
|
15467 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
549 | 7542 | SchFilterGraph *fg = &sch->filters[i]; | |
550 | |||
551 | 7542 | tq_free(&fg->queue); | |
552 | |||
553 | 7542 | av_freep(&fg->inputs); | |
554 | 7542 | av_freep(&fg->outputs); | |
555 | |||
556 | 7542 | waiter_uninit(&fg->waiter); | |
557 | } | ||
558 | 7925 | av_freep(&sch->filters); | |
559 | |||
560 | 7925 | av_freep(&sch->sdp_filename); | |
561 | |||
562 | 7925 | pthread_mutex_destroy(&sch->schedule_lock); | |
563 | |||
564 | 7925 | pthread_mutex_destroy(&sch->mux_ready_lock); | |
565 | |||
566 | 7925 | pthread_mutex_destroy(&sch->finish_lock); | |
567 | 7925 | pthread_cond_destroy(&sch->finish_cond); | |
568 | |||
569 | 7925 | av_freep(psch); | |
570 | } | ||
571 | |||
572 | static const AVClass scheduler_class = { | ||
573 | .class_name = "Scheduler", | ||
574 | .version = LIBAVUTIL_VERSION_INT, | ||
575 | }; | ||
576 | |||
577 | 7925 | Scheduler *sch_alloc(void) | |
578 | { | ||
579 | Scheduler *sch; | ||
580 | int ret; | ||
581 | |||
582 | 7925 | sch = av_mallocz(sizeof(*sch)); | |
583 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (!sch) |
584 | ✗ | return NULL; | |
585 | |||
586 | 7925 | sch->class = &scheduler_class; | |
587 | 7925 | sch->sdp_auto = 1; | |
588 | |||
589 | 7925 | ret = pthread_mutex_init(&sch->schedule_lock, NULL); | |
590 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret) |
591 | ✗ | goto fail; | |
592 | |||
593 | 7925 | ret = pthread_mutex_init(&sch->mux_ready_lock, NULL); | |
594 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret) |
595 | ✗ | goto fail; | |
596 | |||
597 | 7925 | ret = pthread_mutex_init(&sch->finish_lock, NULL); | |
598 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret) |
599 | ✗ | goto fail; | |
600 | |||
601 | 7925 | ret = pthread_cond_init(&sch->finish_cond, NULL); | |
602 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret) |
603 | ✗ | goto fail; | |
604 | |||
605 | 7925 | return sch; | |
606 | ✗ | fail: | |
607 | ✗ | sch_free(&sch); | |
608 | ✗ | return NULL; | |
609 | } | ||
610 | |||
611 | ✗ | int sch_sdp_filename(Scheduler *sch, const char *sdp_filename) | |
612 | { | ||
613 | ✗ | av_freep(&sch->sdp_filename); | |
614 | ✗ | sch->sdp_filename = av_strdup(sdp_filename); | |
615 | ✗ | return sch->sdp_filename ? 0 : AVERROR(ENOMEM); | |
616 | } | ||
617 | |||
618 | static const AVClass sch_mux_class = { | ||
619 | .class_name = "SchMux", | ||
620 | .version = LIBAVUTIL_VERSION_INT, | ||
621 | .parent_log_context_offset = offsetof(SchMux, task.func_arg), | ||
622 | }; | ||
623 | |||
624 | 7926 | int sch_add_mux(Scheduler *sch, SchThreadFunc func, int (*init)(void *), | |
625 | void *arg, int sdp_auto, unsigned thread_queue_size) | ||
626 | { | ||
627 | 7926 | const unsigned idx = sch->nb_mux; | |
628 | |||
629 | SchMux *mux; | ||
630 | int ret; | ||
631 | |||
632 | 7926 | ret = GROW_ARRAY(sch->mux, sch->nb_mux); | |
633 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7926 times.
|
7926 | if (ret < 0) |
634 | ✗ | return ret; | |
635 | |||
636 | 7926 | mux = &sch->mux[idx]; | |
637 | 7926 | mux->class = &sch_mux_class; | |
638 | 7926 | mux->init = init; | |
639 | 7926 | mux->queue_size = thread_queue_size; | |
640 | |||
641 | 7926 | task_init(sch, &mux->task, SCH_NODE_TYPE_MUX, idx, func, arg); | |
642 | |||
643 | 7926 | sch->sdp_auto &= sdp_auto; | |
644 | |||
645 | 7926 | return idx; | |
646 | } | ||
647 | |||
648 | 8402 | int sch_add_mux_stream(Scheduler *sch, unsigned mux_idx) | |
649 | { | ||
650 | SchMux *mux; | ||
651 | SchMuxStream *ms; | ||
652 | unsigned stream_idx; | ||
653 | int ret; | ||
654 | |||
655 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | av_assert0(mux_idx < sch->nb_mux); |
656 | 8402 | mux = &sch->mux[mux_idx]; | |
657 | |||
658 | 8402 | ret = GROW_ARRAY(mux->streams, mux->nb_streams); | |
659 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | if (ret < 0) |
660 | ✗ | return ret; | |
661 | 8402 | stream_idx = mux->nb_streams - 1; | |
662 | |||
663 | 8402 | ms = &mux->streams[stream_idx]; | |
664 | |||
665 | 8402 | ms->pre_mux_queue.fifo = av_fifo_alloc2(8, sizeof(AVPacket*), 0); | |
666 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | if (!ms->pre_mux_queue.fifo) |
667 | ✗ | return AVERROR(ENOMEM); | |
668 | |||
669 | 8402 | ms->last_dts = AV_NOPTS_VALUE; | |
670 | |||
671 | 8402 | return stream_idx; | |
672 | } | ||
673 | |||
674 | static const AVClass sch_demux_class = { | ||
675 | .class_name = "SchDemux", | ||
676 | .version = LIBAVUTIL_VERSION_INT, | ||
677 | .parent_log_context_offset = offsetof(SchDemux, task.func_arg), | ||
678 | }; | ||
679 | |||
680 | 7173 | int sch_add_demux(Scheduler *sch, SchThreadFunc func, void *ctx) | |
681 | { | ||
682 | 7173 | const unsigned idx = sch->nb_demux; | |
683 | |||
684 | SchDemux *d; | ||
685 | int ret; | ||
686 | |||
687 | 7173 | ret = GROW_ARRAY(sch->demux, sch->nb_demux); | |
688 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7173 times.
|
7173 | if (ret < 0) |
689 | ✗ | return ret; | |
690 | |||
691 | 7173 | d = &sch->demux[idx]; | |
692 | |||
693 | 7173 | task_init(sch, &d->task, SCH_NODE_TYPE_DEMUX, idx, func, ctx); | |
694 | |||
695 | 7173 | d->class = &sch_demux_class; | |
696 | 7173 | d->send_pkt = av_packet_alloc(); | |
697 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7173 times.
|
7173 | if (!d->send_pkt) |
698 | ✗ | return AVERROR(ENOMEM); | |
699 | |||
700 | 7173 | ret = waiter_init(&d->waiter); | |
701 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7173 times.
|
7173 | if (ret < 0) |
702 | ✗ | return ret; | |
703 | |||
704 | 7173 | return idx; | |
705 | } | ||
706 | |||
707 | 7426 | int sch_add_demux_stream(Scheduler *sch, unsigned demux_idx) | |
708 | { | ||
709 | SchDemux *d; | ||
710 | int ret; | ||
711 | |||
712 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7426 times.
|
7426 | av_assert0(demux_idx < sch->nb_demux); |
713 | 7426 | d = &sch->demux[demux_idx]; | |
714 | |||
715 | 7426 | ret = GROW_ARRAY(d->streams, d->nb_streams); | |
716 |
1/2✓ Branch 0 taken 7426 times.
✗ Branch 1 not taken.
|
7426 | return ret < 0 ? ret : d->nb_streams - 1; |
717 | } | ||
718 | |||
719 | 6764 | int sch_add_dec_output(Scheduler *sch, unsigned dec_idx) | |
720 | { | ||
721 | SchDec *dec; | ||
722 | int ret; | ||
723 | |||
724 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6764 times.
|
6764 | av_assert0(dec_idx < sch->nb_dec); |
725 | 6764 | dec = &sch->dec[dec_idx]; | |
726 | |||
727 | 6764 | ret = GROW_ARRAY(dec->outputs, dec->nb_outputs); | |
728 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6764 times.
|
6764 | if (ret < 0) |
729 | ✗ | return ret; | |
730 | |||
731 | 6764 | return dec->nb_outputs - 1; | |
732 | } | ||
733 | |||
734 | static const AVClass sch_dec_class = { | ||
735 | .class_name = "SchDec", | ||
736 | .version = LIBAVUTIL_VERSION_INT, | ||
737 | .parent_log_context_offset = offsetof(SchDec, task.func_arg), | ||
738 | }; | ||
739 | |||
740 | 6758 | int sch_add_dec(Scheduler *sch, SchThreadFunc func, void *ctx, int send_end_ts) | |
741 | { | ||
742 | 6758 | const unsigned idx = sch->nb_dec; | |
743 | |||
744 | SchDec *dec; | ||
745 | int ret; | ||
746 | |||
747 | 6758 | ret = GROW_ARRAY(sch->dec, sch->nb_dec); | |
748 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6758 times.
|
6758 | if (ret < 0) |
749 | ✗ | return ret; | |
750 | |||
751 | 6758 | dec = &sch->dec[idx]; | |
752 | |||
753 | 6758 | task_init(sch, &dec->task, SCH_NODE_TYPE_DEC, idx, func, ctx); | |
754 | |||
755 | 6758 | dec->class = &sch_dec_class; | |
756 | 6758 | dec->send_frame = av_frame_alloc(); | |
757 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6758 times.
|
6758 | if (!dec->send_frame) |
758 | ✗ | return AVERROR(ENOMEM); | |
759 | |||
760 | 6758 | ret = sch_add_dec_output(sch, idx); | |
761 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6758 times.
|
6758 | if (ret < 0) |
762 | ✗ | return ret; | |
763 | |||
764 | 6758 | ret = queue_alloc(&dec->queue, 1, 0, QUEUE_PACKETS); | |
765 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6758 times.
|
6758 | if (ret < 0) |
766 | ✗ | return ret; | |
767 | |||
768 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 6757 times.
|
6758 | if (send_end_ts) { |
769 | 1 | ret = av_thread_message_queue_alloc(&dec->queue_end_ts, 1, sizeof(Timestamp)); | |
770 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
771 | ✗ | return ret; | |
772 | } | ||
773 | |||
774 | 6758 | return idx; | |
775 | } | ||
776 | |||
777 | static const AVClass sch_enc_class = { | ||
778 | .class_name = "SchEnc", | ||
779 | .version = LIBAVUTIL_VERSION_INT, | ||
780 | .parent_log_context_offset = offsetof(SchEnc, task.func_arg), | ||
781 | }; | ||
782 | |||
783 | 7710 | int sch_add_enc(Scheduler *sch, SchThreadFunc func, void *ctx, | |
784 | int (*open_cb)(void *opaque, const AVFrame *frame)) | ||
785 | { | ||
786 | 7710 | const unsigned idx = sch->nb_enc; | |
787 | |||
788 | SchEnc *enc; | ||
789 | int ret; | ||
790 | |||
791 | 7710 | ret = GROW_ARRAY(sch->enc, sch->nb_enc); | |
792 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (ret < 0) |
793 | ✗ | return ret; | |
794 | |||
795 | 7710 | enc = &sch->enc[idx]; | |
796 | |||
797 | 7710 | enc->class = &sch_enc_class; | |
798 | 7710 | enc->open_cb = open_cb; | |
799 | 7710 | enc->sq_idx[0] = -1; | |
800 | 7710 | enc->sq_idx[1] = -1; | |
801 | |||
802 | 7710 | task_init(sch, &enc->task, SCH_NODE_TYPE_ENC, idx, func, ctx); | |
803 | |||
804 | 7710 | enc->send_pkt = av_packet_alloc(); | |
805 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (!enc->send_pkt) |
806 | ✗ | return AVERROR(ENOMEM); | |
807 | |||
808 | 7710 | ret = queue_alloc(&enc->queue, 1, 0, QUEUE_FRAMES); | |
809 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (ret < 0) |
810 | ✗ | return ret; | |
811 | |||
812 | 7710 | return idx; | |
813 | } | ||
814 | |||
815 | static const AVClass sch_fg_class = { | ||
816 | .class_name = "SchFilterGraph", | ||
817 | .version = LIBAVUTIL_VERSION_INT, | ||
818 | .parent_log_context_offset = offsetof(SchFilterGraph, task.func_arg), | ||
819 | }; | ||
820 | |||
821 | 7542 | int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, | |
822 | SchThreadFunc func, void *ctx) | ||
823 | { | ||
824 | 7542 | const unsigned idx = sch->nb_filters; | |
825 | |||
826 | SchFilterGraph *fg; | ||
827 | int ret; | ||
828 | |||
829 | 7542 | ret = GROW_ARRAY(sch->filters, sch->nb_filters); | |
830 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7542 times.
|
7542 | if (ret < 0) |
831 | ✗ | return ret; | |
832 | 7542 | fg = &sch->filters[idx]; | |
833 | |||
834 | 7542 | fg->class = &sch_fg_class; | |
835 | |||
836 | 7542 | task_init(sch, &fg->task, SCH_NODE_TYPE_FILTER_IN, idx, func, ctx); | |
837 | |||
838 |
2/2✓ Branch 0 taken 6702 times.
✓ Branch 1 taken 840 times.
|
7542 | if (nb_inputs) { |
839 | 6702 | fg->inputs = av_calloc(nb_inputs, sizeof(*fg->inputs)); | |
840 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6702 times.
|
6702 | if (!fg->inputs) |
841 | ✗ | return AVERROR(ENOMEM); | |
842 | 6702 | fg->nb_inputs = nb_inputs; | |
843 | } | ||
844 | |||
845 |
1/2✓ Branch 0 taken 7542 times.
✗ Branch 1 not taken.
|
7542 | if (nb_outputs) { |
846 | 7542 | fg->outputs = av_calloc(nb_outputs, sizeof(*fg->outputs)); | |
847 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7542 times.
|
7542 | if (!fg->outputs) |
848 | ✗ | return AVERROR(ENOMEM); | |
849 | 7542 | fg->nb_outputs = nb_outputs; | |
850 | } | ||
851 | |||
852 | 7542 | ret = waiter_init(&fg->waiter); | |
853 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7542 times.
|
7542 | if (ret < 0) |
854 | ✗ | return ret; | |
855 | |||
856 | 7542 | ret = queue_alloc(&fg->queue, fg->nb_inputs + 1, 0, QUEUE_FRAMES); | |
857 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7542 times.
|
7542 | if (ret < 0) |
858 | ✗ | return ret; | |
859 | |||
860 | 7542 | return idx; | |
861 | } | ||
862 | |||
863 | 3069 | int sch_add_sq_enc(Scheduler *sch, uint64_t buf_size_us, void *logctx) | |
864 | { | ||
865 | SchSyncQueue *sq; | ||
866 | int ret; | ||
867 | |||
868 | 3069 | ret = GROW_ARRAY(sch->sq_enc, sch->nb_sq_enc); | |
869 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (ret < 0) |
870 | ✗ | return ret; | |
871 | 3069 | sq = &sch->sq_enc[sch->nb_sq_enc - 1]; | |
872 | |||
873 | 3069 | sq->sq = sq_alloc(SYNC_QUEUE_FRAMES, buf_size_us, logctx); | |
874 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (!sq->sq) |
875 | ✗ | return AVERROR(ENOMEM); | |
876 | |||
877 | 3069 | sq->frame = av_frame_alloc(); | |
878 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (!sq->frame) |
879 | ✗ | return AVERROR(ENOMEM); | |
880 | |||
881 | 3069 | ret = pthread_mutex_init(&sq->lock, NULL); | |
882 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (ret) |
883 | ✗ | return AVERROR(ret); | |
884 | |||
885 | 3069 | return sq - sch->sq_enc; | |
886 | } | ||
887 | |||
888 | 3131 | int sch_sq_add_enc(Scheduler *sch, unsigned sq_idx, unsigned enc_idx, | |
889 | int limiting, uint64_t max_frames) | ||
890 | { | ||
891 | SchSyncQueue *sq; | ||
892 | SchEnc *enc; | ||
893 | int ret; | ||
894 | |||
895 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | av_assert0(sq_idx < sch->nb_sq_enc); |
896 | 3131 | sq = &sch->sq_enc[sq_idx]; | |
897 | |||
898 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | av_assert0(enc_idx < sch->nb_enc); |
899 | 3131 | enc = &sch->enc[enc_idx]; | |
900 | |||
901 | 3131 | ret = GROW_ARRAY(sq->enc_idx, sq->nb_enc_idx); | |
902 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | if (ret < 0) |
903 | ✗ | return ret; | |
904 | 3131 | sq->enc_idx[sq->nb_enc_idx - 1] = enc_idx; | |
905 | |||
906 | 3131 | ret = sq_add_stream(sq->sq, limiting); | |
907 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | if (ret < 0) |
908 | ✗ | return ret; | |
909 | |||
910 | 3131 | enc->sq_idx[0] = sq_idx; | |
911 | 3131 | enc->sq_idx[1] = ret; | |
912 | |||
913 |
2/2✓ Branch 0 taken 2942 times.
✓ Branch 1 taken 189 times.
|
3131 | if (max_frames != INT64_MAX) |
914 | 2942 | sq_limit_frames(sq->sq, enc->sq_idx[1], max_frames); | |
915 | |||
916 | 3131 | return 0; | |
917 | } | ||
918 | |||
919 | 29657 | int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst) | |
920 | { | ||
921 | int ret; | ||
922 | |||
923 |
4/5✓ Branch 0 taken 7449 times.
✓ Branch 1 taken 6825 times.
✓ Branch 2 taken 7672 times.
✓ Branch 3 taken 7711 times.
✗ Branch 4 not taken.
|
29657 | switch (src.type) { |
924 | 7449 | case SCH_NODE_TYPE_DEMUX: { | |
925 | SchDemuxStream *ds; | ||
926 | |||
927 |
2/4✓ Branch 0 taken 7449 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7449 times.
|
7449 | av_assert0(src.idx < sch->nb_demux && |
928 | src.idx_stream < sch->demux[src.idx].nb_streams); | ||
929 | 7449 | ds = &sch->demux[src.idx].streams[src.idx_stream]; | |
930 | |||
931 | 7449 | ret = GROW_ARRAY(ds->dst, ds->nb_dst); | |
932 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7449 times.
|
7449 | if (ret < 0) |
933 | ✗ | return ret; | |
934 | |||
935 | 7449 | ds->dst[ds->nb_dst - 1] = dst; | |
936 | |||
937 | // demuxed packets go to decoding or streamcopy | ||
938 |
2/3✓ Branch 0 taken 6757 times.
✓ Branch 1 taken 692 times.
✗ Branch 2 not taken.
|
7449 | switch (dst.type) { |
939 | 6757 | case SCH_NODE_TYPE_DEC: { | |
940 | SchDec *dec; | ||
941 | |||
942 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | av_assert0(dst.idx < sch->nb_dec); |
943 | 6757 | dec = &sch->dec[dst.idx]; | |
944 | |||
945 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | av_assert0(!dec->src.type); |
946 | 6757 | dec->src = src; | |
947 | 6757 | break; | |
948 | } | ||
949 | 692 | case SCH_NODE_TYPE_MUX: { | |
950 | SchMuxStream *ms; | ||
951 | |||
952 |
2/4✓ Branch 0 taken 692 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 692 times.
|
692 | av_assert0(dst.idx < sch->nb_mux && |
953 | dst.idx_stream < sch->mux[dst.idx].nb_streams); | ||
954 | 692 | ms = &sch->mux[dst.idx].streams[dst.idx_stream]; | |
955 | |||
956 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 692 times.
|
692 | av_assert0(!ms->src.type); |
957 | 692 | ms->src = src; | |
958 | |||
959 | 692 | break; | |
960 | } | ||
961 | ✗ | default: av_assert0(0); | |
962 | } | ||
963 | |||
964 | 7449 | break; | |
965 | } | ||
966 | 6825 | case SCH_NODE_TYPE_DEC: { | |
967 | SchDec *dec; | ||
968 | SchDecOutput *o; | ||
969 | |||
970 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6825 times.
|
6825 | av_assert0(src.idx < sch->nb_dec); |
971 | 6825 | dec = &sch->dec[src.idx]; | |
972 | |||
973 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6825 times.
|
6825 | av_assert0(src.idx_stream < dec->nb_outputs); |
974 | 6825 | o = &dec->outputs[src.idx_stream]; | |
975 | |||
976 | 6825 | ret = GROW_ARRAY(o->dst, o->nb_dst); | |
977 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6825 times.
|
6825 | if (ret < 0) |
978 | ✗ | return ret; | |
979 | |||
980 | 6825 | o->dst[o->nb_dst - 1] = dst; | |
981 | |||
982 | // decoded frames go to filters or encoding | ||
983 |
2/3✓ Branch 0 taken 6787 times.
✓ Branch 1 taken 38 times.
✗ Branch 2 not taken.
|
6825 | switch (dst.type) { |
984 | 6787 | case SCH_NODE_TYPE_FILTER_IN: { | |
985 | SchFilterIn *fi; | ||
986 | |||
987 |
2/4✓ Branch 0 taken 6787 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 6787 times.
|
6787 | av_assert0(dst.idx < sch->nb_filters && |
988 | dst.idx_stream < sch->filters[dst.idx].nb_inputs); | ||
989 | 6787 | fi = &sch->filters[dst.idx].inputs[dst.idx_stream]; | |
990 | |||
991 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6787 times.
|
6787 | av_assert0(!fi->src.type); |
992 | 6787 | fi->src = src; | |
993 | 6787 | break; | |
994 | } | ||
995 | 38 | case SCH_NODE_TYPE_ENC: { | |
996 | SchEnc *enc; | ||
997 | |||
998 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
|
38 | av_assert0(dst.idx < sch->nb_enc); |
999 | 38 | enc = &sch->enc[dst.idx]; | |
1000 | |||
1001 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
|
38 | av_assert0(!enc->src.type); |
1002 | 38 | enc->src = src; | |
1003 | 38 | break; | |
1004 | } | ||
1005 | ✗ | default: av_assert0(0); | |
1006 | } | ||
1007 | |||
1008 | 6825 | break; | |
1009 | } | ||
1010 | 7672 | case SCH_NODE_TYPE_FILTER_OUT: { | |
1011 | SchFilterOut *fo; | ||
1012 | |||
1013 |
2/4✓ Branch 0 taken 7672 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7672 times.
|
7672 | av_assert0(src.idx < sch->nb_filters && |
1014 | src.idx_stream < sch->filters[src.idx].nb_outputs); | ||
1015 | 7672 | fo = &sch->filters[src.idx].outputs[src.idx_stream]; | |
1016 | |||
1017 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | av_assert0(!fo->dst.type); |
1018 | 7672 | fo->dst = dst; | |
1019 | |||
1020 | // filtered frames go to encoding or another filtergraph | ||
1021 |
1/3✓ Branch 0 taken 7672 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
|
7672 | switch (dst.type) { |
1022 | 7672 | case SCH_NODE_TYPE_ENC: { | |
1023 | SchEnc *enc; | ||
1024 | |||
1025 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | av_assert0(dst.idx < sch->nb_enc); |
1026 | 7672 | enc = &sch->enc[dst.idx]; | |
1027 | |||
1028 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | av_assert0(!enc->src.type); |
1029 | 7672 | enc->src = src; | |
1030 | 7672 | break; | |
1031 | } | ||
1032 | ✗ | case SCH_NODE_TYPE_FILTER_IN: { | |
1033 | SchFilterIn *fi; | ||
1034 | |||
1035 | ✗ | av_assert0(dst.idx < sch->nb_filters && | |
1036 | dst.idx_stream < sch->filters[dst.idx].nb_inputs); | ||
1037 | ✗ | fi = &sch->filters[dst.idx].inputs[dst.idx_stream]; | |
1038 | |||
1039 | ✗ | av_assert0(!fi->src.type); | |
1040 | ✗ | fi->src = src; | |
1041 | ✗ | break; | |
1042 | } | ||
1043 | ✗ | default: av_assert0(0); | |
1044 | } | ||
1045 | |||
1046 | |||
1047 | 7672 | break; | |
1048 | } | ||
1049 | 7711 | case SCH_NODE_TYPE_ENC: { | |
1050 | SchEnc *enc; | ||
1051 | |||
1052 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7711 times.
|
7711 | av_assert0(src.idx < sch->nb_enc); |
1053 | 7711 | enc = &sch->enc[src.idx]; | |
1054 | |||
1055 | 7711 | ret = GROW_ARRAY(enc->dst, enc->nb_dst); | |
1056 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7711 times.
|
7711 | if (ret < 0) |
1057 | ✗ | return ret; | |
1058 | |||
1059 | 7711 | enc->dst[enc->nb_dst - 1] = dst; | |
1060 | |||
1061 | // encoding packets go to muxing or decoding | ||
1062 |
2/3✓ Branch 0 taken 7710 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
7711 | switch (dst.type) { |
1063 | 7710 | case SCH_NODE_TYPE_MUX: { | |
1064 | SchMuxStream *ms; | ||
1065 | |||
1066 |
2/4✓ Branch 0 taken 7710 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7710 times.
|
7710 | av_assert0(dst.idx < sch->nb_mux && |
1067 | dst.idx_stream < sch->mux[dst.idx].nb_streams); | ||
1068 | 7710 | ms = &sch->mux[dst.idx].streams[dst.idx_stream]; | |
1069 | |||
1070 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | av_assert0(!ms->src.type); |
1071 | 7710 | ms->src = src; | |
1072 | |||
1073 | 7710 | break; | |
1074 | } | ||
1075 | 1 | case SCH_NODE_TYPE_DEC: { | |
1076 | SchDec *dec; | ||
1077 | |||
1078 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(dst.idx < sch->nb_dec); |
1079 | 1 | dec = &sch->dec[dst.idx]; | |
1080 | |||
1081 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(!dec->src.type); |
1082 | 1 | dec->src = src; | |
1083 | |||
1084 | 1 | break; | |
1085 | } | ||
1086 | ✗ | default: av_assert0(0); | |
1087 | } | ||
1088 | |||
1089 | 7711 | break; | |
1090 | } | ||
1091 | ✗ | default: av_assert0(0); | |
1092 | } | ||
1093 | |||
1094 | 29657 | return 0; | |
1095 | } | ||
1096 | |||
1097 | 7926 | static int mux_task_start(SchMux *mux) | |
1098 | { | ||
1099 | 7926 | int ret = 0; | |
1100 | |||
1101 | 7926 | ret = task_start(&mux->task); | |
1102 |
1/2✓ Branch 0 taken 7926 times.
✗ Branch 1 not taken.
|
7926 | if (ret < 0) |
1103 | ✗ | return ret; | |
1104 | |||
1105 | /* flush the pre-muxing queues */ | ||
1106 | 3396 | while (1) { | |
1107 | 11322 | int min_stream = -1; | |
1108 | 11322 | Timestamp min_ts = { .ts = AV_NOPTS_VALUE }; | |
1109 | |||
1110 | AVPacket *pkt; | ||
1111 | |||
1112 | // find the stream with the earliest dts or EOF in pre-muxing queue | ||
1113 |
2/2✓ Branch 0 taken 21322 times.
✓ Branch 1 taken 11231 times.
|
32553 | for (unsigned i = 0; i < mux->nb_streams; i++) { |
1114 | 21322 | SchMuxStream *ms = &mux->streams[i]; | |
1115 | |||
1116 |
2/2✓ Branch 1 taken 15705 times.
✓ Branch 2 taken 5617 times.
|
21322 | if (av_fifo_peek(ms->pre_mux_queue.fifo, &pkt, 1, 0) < 0) |
1117 | 15705 | continue; | |
1118 | |||
1119 |
4/4✓ Branch 0 taken 5539 times.
✓ Branch 1 taken 78 times.
✓ Branch 2 taken 13 times.
✓ Branch 3 taken 5526 times.
|
5617 | if (!pkt || pkt->dts == AV_NOPTS_VALUE) { |
1120 | 91 | min_stream = i; | |
1121 | 91 | break; | |
1122 | } | ||
1123 | |||
1124 |
4/4✓ Branch 0 taken 2194 times.
✓ Branch 1 taken 3332 times.
✓ Branch 2 taken 32 times.
✓ Branch 3 taken 2162 times.
|
7720 | if (min_ts.ts == AV_NOPTS_VALUE || |
1125 | 2194 | av_compare_ts(min_ts.ts, min_ts.tb, pkt->dts, pkt->time_base) > 0) { | |
1126 | 3364 | min_stream = i; | |
1127 | 3364 | min_ts = (Timestamp){ .ts = pkt->dts, .tb = pkt->time_base }; | |
1128 | } | ||
1129 | } | ||
1130 | |||
1131 |
2/2✓ Branch 0 taken 3396 times.
✓ Branch 1 taken 7926 times.
|
11322 | if (min_stream >= 0) { |
1132 | 3396 | SchMuxStream *ms = &mux->streams[min_stream]; | |
1133 | |||
1134 | 3396 | ret = av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1); | |
1135 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3396 times.
|
3396 | av_assert0(ret >= 0); |
1136 | |||
1137 |
2/2✓ Branch 0 taken 3318 times.
✓ Branch 1 taken 78 times.
|
3396 | if (pkt) { |
1138 |
2/2✓ Branch 0 taken 3308 times.
✓ Branch 1 taken 10 times.
|
3318 | if (!ms->init_eof) |
1139 | 3308 | ret = tq_send(mux->queue, min_stream, pkt); | |
1140 | 3318 | av_packet_free(&pkt); | |
1141 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 3317 times.
|
3318 | if (ret == AVERROR_EOF) |
1142 | 1 | ms->init_eof = 1; | |
1143 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3317 times.
|
3317 | else if (ret < 0) |
1144 | ✗ | return ret; | |
1145 | } else | ||
1146 | 78 | tq_send_finish(mux->queue, min_stream); | |
1147 | |||
1148 | 3396 | continue; | |
1149 | } | ||
1150 | |||
1151 | 7926 | break; | |
1152 | } | ||
1153 | |||
1154 | 7926 | atomic_store(&mux->mux_started, 1); | |
1155 | |||
1156 | 7926 | return 0; | |
1157 | } | ||
1158 | |||
1159 | int print_sdp(const char *filename); | ||
1160 | |||
1161 | 7926 | static int mux_init(Scheduler *sch, SchMux *mux) | |
1162 | { | ||
1163 | int ret; | ||
1164 | |||
1165 | 7926 | ret = mux->init(mux->task.func_arg); | |
1166 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7926 times.
|
7926 | if (ret < 0) |
1167 | ✗ | return ret; | |
1168 | |||
1169 | 7926 | sch->nb_mux_ready++; | |
1170 | |||
1171 |
2/4✓ Branch 0 taken 7926 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7926 times.
|
7926 | if (sch->sdp_filename || sch->sdp_auto) { |
1172 | ✗ | if (sch->nb_mux_ready < sch->nb_mux) | |
1173 | ✗ | return 0; | |
1174 | |||
1175 | ✗ | ret = print_sdp(sch->sdp_filename); | |
1176 | ✗ | if (ret < 0) { | |
1177 | ✗ | av_log(sch, AV_LOG_ERROR, "Error writing the SDP.\n"); | |
1178 | ✗ | return ret; | |
1179 | } | ||
1180 | |||
1181 | /* SDP is written only after all the muxers are ready, so now we | ||
1182 | * start ALL the threads */ | ||
1183 | ✗ | for (unsigned i = 0; i < sch->nb_mux; i++) { | |
1184 | ✗ | ret = mux_task_start(&sch->mux[i]); | |
1185 | ✗ | if (ret < 0) | |
1186 | ✗ | return ret; | |
1187 | } | ||
1188 | } else { | ||
1189 | 7926 | ret = mux_task_start(mux); | |
1190 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7926 times.
|
7926 | if (ret < 0) |
1191 | ✗ | return ret; | |
1192 | } | ||
1193 | |||
1194 | 7926 | return 0; | |
1195 | } | ||
1196 | |||
1197 | 8402 | void sch_mux_stream_buffering(Scheduler *sch, unsigned mux_idx, unsigned stream_idx, | |
1198 | size_t data_threshold, int max_packets) | ||
1199 | { | ||
1200 | SchMux *mux; | ||
1201 | SchMuxStream *ms; | ||
1202 | |||
1203 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | av_assert0(mux_idx < sch->nb_mux); |
1204 | 8402 | mux = &sch->mux[mux_idx]; | |
1205 | |||
1206 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | av_assert0(stream_idx < mux->nb_streams); |
1207 | 8402 | ms = &mux->streams[stream_idx]; | |
1208 | |||
1209 | 8402 | ms->pre_mux_queue.max_packets = max_packets; | |
1210 | 8402 | ms->pre_mux_queue.data_threshold = data_threshold; | |
1211 | 8402 | } | |
1212 | |||
1213 | 8402 | int sch_mux_stream_ready(Scheduler *sch, unsigned mux_idx, unsigned stream_idx) | |
1214 | { | ||
1215 | SchMux *mux; | ||
1216 | 8402 | int ret = 0; | |
1217 | |||
1218 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | av_assert0(mux_idx < sch->nb_mux); |
1219 | 8402 | mux = &sch->mux[mux_idx]; | |
1220 | |||
1221 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | av_assert0(stream_idx < mux->nb_streams); |
1222 | |||
1223 | 8402 | pthread_mutex_lock(&sch->mux_ready_lock); | |
1224 | |||
1225 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8402 times.
|
8402 | av_assert0(mux->nb_streams_ready < mux->nb_streams); |
1226 | |||
1227 | // this may be called during initialization - do not start | ||
1228 | // threads before sch_start() is called | ||
1229 |
2/2✓ Branch 0 taken 7926 times.
✓ Branch 1 taken 476 times.
|
8402 | if (++mux->nb_streams_ready == mux->nb_streams && |
1230 |
2/2✓ Branch 0 taken 7467 times.
✓ Branch 1 taken 459 times.
|
7926 | sch->state >= SCH_STATE_STARTED) |
1231 | 7467 | ret = mux_init(sch, mux); | |
1232 | |||
1233 | 8402 | pthread_mutex_unlock(&sch->mux_ready_lock); | |
1234 | |||
1235 | 8402 | return ret; | |
1236 | } | ||
1237 | |||
1238 | 1 | int sch_mux_sub_heartbeat_add(Scheduler *sch, unsigned mux_idx, unsigned stream_idx, | |
1239 | unsigned dec_idx) | ||
1240 | { | ||
1241 | SchMux *mux; | ||
1242 | SchMuxStream *ms; | ||
1243 | 1 | int ret = 0; | |
1244 | |||
1245 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(mux_idx < sch->nb_mux); |
1246 | 1 | mux = &sch->mux[mux_idx]; | |
1247 | |||
1248 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(stream_idx < mux->nb_streams); |
1249 | 1 | ms = &mux->streams[stream_idx]; | |
1250 | |||
1251 | 1 | ret = GROW_ARRAY(ms->sub_heartbeat_dst, ms->nb_sub_heartbeat_dst); | |
1252 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
1253 | ✗ | return ret; | |
1254 | |||
1255 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(dec_idx < sch->nb_dec); |
1256 | 1 | ms->sub_heartbeat_dst[ms->nb_sub_heartbeat_dst - 1] = dec_idx; | |
1257 | |||
1258 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (!mux->sub_heartbeat_pkt) { |
1259 | 1 | mux->sub_heartbeat_pkt = av_packet_alloc(); | |
1260 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!mux->sub_heartbeat_pkt) |
1261 | ✗ | return AVERROR(ENOMEM); | |
1262 | } | ||
1263 | |||
1264 | 1 | return 0; | |
1265 | } | ||
1266 | |||
1267 | 532311 | static void unchoke_for_stream(Scheduler *sch, SchedulerNode src) | |
1268 | { | ||
1269 | 429786 | while (1) { | |
1270 | SchFilterGraph *fg; | ||
1271 | |||
1272 | // fed directly by a demuxer (i.e. not through a filtergraph) | ||
1273 |
2/2✓ Branch 0 taken 507044 times.
✓ Branch 1 taken 455053 times.
|
962097 | if (src.type == SCH_NODE_TYPE_DEMUX) { |
1274 | 507044 | sch->demux[src.idx].waiter.choked_next = 0; | |
1275 | 507044 | return; | |
1276 | } | ||
1277 | |||
1278 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 455053 times.
|
455053 | av_assert0(src.type == SCH_NODE_TYPE_FILTER_OUT); |
1279 | 455053 | fg = &sch->filters[src.idx]; | |
1280 | |||
1281 | // the filtergraph contains internal sources and | ||
1282 | // requested to be scheduled directly | ||
1283 |
2/2✓ Branch 0 taken 25267 times.
✓ Branch 1 taken 429786 times.
|
455053 | if (fg->best_input == fg->nb_inputs) { |
1284 | 25267 | fg->waiter.choked_next = 0; | |
1285 | 25267 | return; | |
1286 | } | ||
1287 | |||
1288 | 429786 | src = fg->inputs[fg->best_input].src_sched; | |
1289 | } | ||
1290 | } | ||
1291 | |||
1292 | 534224 | static void schedule_update_locked(Scheduler *sch) | |
1293 | { | ||
1294 | int64_t dts; | ||
1295 | 534224 | int have_unchoked = 0; | |
1296 | |||
1297 | // on termination request all waiters are choked, | ||
1298 | // we are not to unchoke them | ||
1299 |
2/2✓ Branch 0 taken 4064 times.
✓ Branch 1 taken 530160 times.
|
534224 | if (atomic_load(&sch->terminate)) |
1300 | 4064 | return; | |
1301 | |||
1302 | 530160 | dts = trailing_dts(sch, 0); | |
1303 | |||
1304 | 530160 | atomic_store(&sch->last_dts, dts); | |
1305 | |||
1306 | // initialize our internal state | ||
1307 |
2/2✓ Branch 0 taken 1060320 times.
✓ Branch 1 taken 530160 times.
|
1590480 | for (unsigned type = 0; type < 2; type++) |
1308 |
4/4✓ Branch 0 taken 1007955 times.
✓ Branch 1 taken 1042398 times.
✓ Branch 2 taken 990033 times.
✓ Branch 3 taken 1060320 times.
|
2050353 | for (unsigned i = 0; i < (type ? sch->nb_filters : sch->nb_demux); i++) { |
1309 |
2/2✓ Branch 0 taken 477795 times.
✓ Branch 1 taken 512238 times.
|
990033 | SchWaiter *w = type ? &sch->filters[i].waiter : &sch->demux[i].waiter; |
1310 | 990033 | w->choked_prev = atomic_load(&w->choked); | |
1311 | 990033 | w->choked_next = 1; | |
1312 | } | ||
1313 | |||
1314 | // figure out the sources that are allowed to proceed | ||
1315 |
2/2✓ Branch 0 taken 530427 times.
✓ Branch 1 taken 530160 times.
|
1060587 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
1316 | 530427 | SchMux *mux = &sch->mux[i]; | |
1317 | |||
1318 |
2/2✓ Branch 0 taken 596574 times.
✓ Branch 1 taken 530427 times.
|
1127001 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
1319 | 596574 | SchMuxStream *ms = &mux->streams[j]; | |
1320 | |||
1321 | // unblock sources for output streams that are not finished | ||
1322 | // and not too far ahead of the trailing stream | ||
1323 |
2/2✓ Branch 0 taken 37851 times.
✓ Branch 1 taken 558723 times.
|
596574 | if (ms->source_finished) |
1324 | 37851 | continue; | |
1325 |
4/4✓ Branch 0 taken 29501 times.
✓ Branch 1 taken 529222 times.
✓ Branch 2 taken 9176 times.
✓ Branch 3 taken 20325 times.
|
558723 | if (dts == AV_NOPTS_VALUE && ms->last_dts != AV_NOPTS_VALUE) |
1326 | 9176 | continue; | |
1327 |
4/4✓ Branch 0 taken 529222 times.
✓ Branch 1 taken 20325 times.
✓ Branch 2 taken 17236 times.
✓ Branch 3 taken 511986 times.
|
549547 | if (dts != AV_NOPTS_VALUE && ms->last_dts - dts >= SCHEDULE_TOLERANCE) |
1328 | 17236 | continue; | |
1329 | |||
1330 | // resolve the source to unchoke | ||
1331 | 532311 | unchoke_for_stream(sch, ms->src_sched); | |
1332 | 532311 | have_unchoked = 1; | |
1333 | } | ||
1334 | } | ||
1335 | |||
1336 | // make sure to unchoke at least one source, if still available | ||
1337 |
4/4✓ Branch 0 taken 61971 times.
✓ Branch 1 taken 516277 times.
✓ Branch 2 taken 48088 times.
✓ Branch 3 taken 13883 times.
|
578248 | for (unsigned type = 0; !have_unchoked && type < 2; type++) |
1338 |
4/4✓ Branch 0 taken 33082 times.
✓ Branch 1 taken 46911 times.
✓ Branch 2 taken 46287 times.
✓ Branch 3 taken 33706 times.
|
79993 | for (unsigned i = 0; i < (type ? sch->nb_filters : sch->nb_demux); i++) { |
1339 |
2/2✓ Branch 0 taken 19199 times.
✓ Branch 1 taken 27088 times.
|
46287 | int exited = type ? sch->filters[i].task_exited : sch->demux[i].task_exited; |
1340 |
2/2✓ Branch 0 taken 19199 times.
✓ Branch 1 taken 27088 times.
|
46287 | SchWaiter *w = type ? &sch->filters[i].waiter : &sch->demux[i].waiter; |
1341 |
2/2✓ Branch 0 taken 14382 times.
✓ Branch 1 taken 31905 times.
|
46287 | if (!exited) { |
1342 | 14382 | w->choked_next = 0; | |
1343 | 14382 | have_unchoked = 1; | |
1344 | 14382 | break; | |
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | |||
1349 |
2/2✓ Branch 0 taken 1060320 times.
✓ Branch 1 taken 530160 times.
|
1590480 | for (unsigned type = 0; type < 2; type++) |
1350 |
4/4✓ Branch 0 taken 1007955 times.
✓ Branch 1 taken 1042398 times.
✓ Branch 2 taken 990033 times.
✓ Branch 3 taken 1060320 times.
|
2050353 | for (unsigned i = 0; i < (type ? sch->nb_filters : sch->nb_demux); i++) { |
1351 |
2/2✓ Branch 0 taken 477795 times.
✓ Branch 1 taken 512238 times.
|
990033 | SchWaiter *w = type ? &sch->filters[i].waiter : &sch->demux[i].waiter; |
1352 |
2/2✓ Branch 0 taken 18272 times.
✓ Branch 1 taken 971761 times.
|
990033 | if (w->choked_prev != w->choked_next) |
1353 | 18272 | waiter_set(w, w->choked_next); | |
1354 | } | ||
1355 | |||
1356 | } | ||
1357 | |||
1358 | enum { | ||
1359 | CYCLE_NODE_NEW = 0, | ||
1360 | CYCLE_NODE_STARTED, | ||
1361 | CYCLE_NODE_DONE, | ||
1362 | }; | ||
1363 | |||
1364 | static int | ||
1365 | 7542 | check_acyclic_for_output(const Scheduler *sch, SchedulerNode src, | |
1366 | uint8_t *filters_visited, SchedulerNode *filters_stack) | ||
1367 | { | ||
1368 | 7542 | unsigned nb_filters_stack = 0; | |
1369 | |||
1370 | 7542 | memset(filters_visited, 0, sch->nb_filters * sizeof(*filters_visited)); | |
1371 | |||
1372 | 6789 | while (1) { | |
1373 | 14331 | const SchFilterGraph *fg = &sch->filters[src.idx]; | |
1374 | |||
1375 | 14331 | filters_visited[src.idx] = CYCLE_NODE_STARTED; | |
1376 | |||
1377 | // descend into every input, depth first | ||
1378 |
2/2✓ Branch 0 taken 6788 times.
✓ Branch 1 taken 7543 times.
|
14331 | if (src.idx_stream < fg->nb_inputs) { |
1379 | 6788 | const SchFilterIn *fi = &fg->inputs[src.idx_stream++]; | |
1380 | |||
1381 | // connected to demuxer, no cycles possible | ||
1382 |
2/2✓ Branch 0 taken 6787 times.
✓ Branch 1 taken 1 times.
|
6788 | if (fi->src_sched.type == SCH_NODE_TYPE_DEMUX) |
1383 | 6788 | continue; | |
1384 | |||
1385 | // otherwise connected to another filtergraph | ||
1386 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(fi->src_sched.type == SCH_NODE_TYPE_FILTER_OUT); |
1387 | |||
1388 | // found a cycle | ||
1389 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (filters_visited[fi->src_sched.idx] == CYCLE_NODE_STARTED) |
1390 | ✗ | return AVERROR(EINVAL); | |
1391 | |||
1392 | // place current position on stack and descend | ||
1393 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(nb_filters_stack < sch->nb_filters); |
1394 | 1 | filters_stack[nb_filters_stack++] = src; | |
1395 | 1 | src = (SchedulerNode){ .idx = fi->src_sched.idx, .idx_stream = 0 }; | |
1396 | 1 | continue; | |
1397 | } | ||
1398 | |||
1399 | 7543 | filters_visited[src.idx] = CYCLE_NODE_DONE; | |
1400 | |||
1401 | // previous search finished, | ||
1402 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 7542 times.
|
7543 | if (nb_filters_stack) { |
1403 | 1 | src = filters_stack[--nb_filters_stack]; | |
1404 | 1 | continue; | |
1405 | } | ||
1406 | 7542 | return 0; | |
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | 7924 | static int check_acyclic(Scheduler *sch) | |
1411 | { | ||
1412 | 7924 | uint8_t *filters_visited = NULL; | |
1413 | 7924 | SchedulerNode *filters_stack = NULL; | |
1414 | |||
1415 | 7924 | int ret = 0; | |
1416 | |||
1417 |
2/2✓ Branch 0 taken 494 times.
✓ Branch 1 taken 7430 times.
|
7924 | if (!sch->nb_filters) |
1418 | 494 | return 0; | |
1419 | |||
1420 | 7430 | filters_visited = av_malloc_array(sch->nb_filters, sizeof(*filters_visited)); | |
1421 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7430 times.
|
7430 | if (!filters_visited) |
1422 | ✗ | return AVERROR(ENOMEM); | |
1423 | |||
1424 | 7430 | filters_stack = av_malloc_array(sch->nb_filters, sizeof(*filters_stack)); | |
1425 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7430 times.
|
7430 | if (!filters_stack) { |
1426 | ✗ | ret = AVERROR(ENOMEM); | |
1427 | ✗ | goto fail; | |
1428 | } | ||
1429 | |||
1430 | // trace the transcoding graph upstream from every filtegraph | ||
1431 |
2/2✓ Branch 0 taken 7542 times.
✓ Branch 1 taken 7430 times.
|
14972 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
1432 | 7542 | ret = check_acyclic_for_output(sch, (SchedulerNode){ .idx = i }, | |
1433 | filters_visited, filters_stack); | ||
1434 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7542 times.
|
7542 | if (ret < 0) { |
1435 | ✗ | av_log(&sch->filters[i], AV_LOG_ERROR, "Transcoding graph has a cycle\n"); | |
1436 | ✗ | goto fail; | |
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | 7430 | fail: | |
1441 | 7430 | av_freep(&filters_visited); | |
1442 | 7430 | av_freep(&filters_stack); | |
1443 | 7430 | return ret; | |
1444 | } | ||
1445 | |||
1446 | 7924 | static int start_prepare(Scheduler *sch) | |
1447 | { | ||
1448 | int ret; | ||
1449 | |||
1450 |
2/2✓ Branch 0 taken 7173 times.
✓ Branch 1 taken 7924 times.
|
15097 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
1451 | 7173 | SchDemux *d = &sch->demux[i]; | |
1452 | |||
1453 |
2/2✓ Branch 0 taken 7426 times.
✓ Branch 1 taken 7173 times.
|
14599 | for (unsigned j = 0; j < d->nb_streams; j++) { |
1454 | 7426 | SchDemuxStream *ds = &d->streams[j]; | |
1455 | |||
1456 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7426 times.
|
7426 | if (!ds->nb_dst) { |
1457 | ✗ | av_log(d, AV_LOG_ERROR, | |
1458 | "Demuxer stream %u not connected to any sink\n", j); | ||
1459 | ✗ | return AVERROR(EINVAL); | |
1460 | } | ||
1461 | |||
1462 | 7426 | ds->dst_finished = av_calloc(ds->nb_dst, sizeof(*ds->dst_finished)); | |
1463 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7426 times.
|
7426 | if (!ds->dst_finished) |
1464 | ✗ | return AVERROR(ENOMEM); | |
1465 | } | ||
1466 | } | ||
1467 | |||
1468 |
2/2✓ Branch 0 taken 6758 times.
✓ Branch 1 taken 7924 times.
|
14682 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
1469 | 6758 | SchDec *dec = &sch->dec[i]; | |
1470 | |||
1471 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6758 times.
|
6758 | if (!dec->src.type) { |
1472 | ✗ | av_log(dec, AV_LOG_ERROR, | |
1473 | "Decoder not connected to a source\n"); | ||
1474 | ✗ | return AVERROR(EINVAL); | |
1475 | } | ||
1476 | |||
1477 |
2/2✓ Branch 0 taken 6764 times.
✓ Branch 1 taken 6758 times.
|
13522 | for (unsigned j = 0; j < dec->nb_outputs; j++) { |
1478 | 6764 | SchDecOutput *o = &dec->outputs[j]; | |
1479 | |||
1480 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6764 times.
|
6764 | if (!o->nb_dst) { |
1481 | ✗ | av_log(dec, AV_LOG_ERROR, | |
1482 | "Decoder output %u not connected to any sink\n", j); | ||
1483 | ✗ | return AVERROR(EINVAL); | |
1484 | } | ||
1485 | |||
1486 | 6764 | o->dst_finished = av_calloc(o->nb_dst, sizeof(*o->dst_finished)); | |
1487 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6764 times.
|
6764 | if (!o->dst_finished) |
1488 | ✗ | return AVERROR(ENOMEM); | |
1489 | } | ||
1490 | } | ||
1491 | |||
1492 |
2/2✓ Branch 0 taken 7710 times.
✓ Branch 1 taken 7924 times.
|
15634 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
1493 | 7710 | SchEnc *enc = &sch->enc[i]; | |
1494 | |||
1495 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (!enc->src.type) { |
1496 | ✗ | av_log(enc, AV_LOG_ERROR, | |
1497 | "Encoder not connected to a source\n"); | ||
1498 | ✗ | return AVERROR(EINVAL); | |
1499 | } | ||
1500 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (!enc->nb_dst) { |
1501 | ✗ | av_log(enc, AV_LOG_ERROR, | |
1502 | "Encoder not connected to any sink\n"); | ||
1503 | ✗ | return AVERROR(EINVAL); | |
1504 | } | ||
1505 | |||
1506 | 7710 | enc->dst_finished = av_calloc(enc->nb_dst, sizeof(*enc->dst_finished)); | |
1507 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (!enc->dst_finished) |
1508 | ✗ | return AVERROR(ENOMEM); | |
1509 | } | ||
1510 | |||
1511 |
2/2✓ Branch 0 taken 7926 times.
✓ Branch 1 taken 7924 times.
|
15850 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
1512 | 7926 | SchMux *mux = &sch->mux[i]; | |
1513 | |||
1514 |
2/2✓ Branch 0 taken 8402 times.
✓ Branch 1 taken 7926 times.
|
16328 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
1515 | 8402 | SchMuxStream *ms = &mux->streams[j]; | |
1516 | |||
1517 |
2/3✓ Branch 0 taken 7710 times.
✓ Branch 1 taken 692 times.
✗ Branch 2 not taken.
|
8402 | switch (ms->src.type) { |
1518 | 7710 | case SCH_NODE_TYPE_ENC: { | |
1519 | 7710 | SchEnc *enc = &sch->enc[ms->src.idx]; | |
1520 |
2/2✓ Branch 0 taken 38 times.
✓ Branch 1 taken 7672 times.
|
7710 | if (enc->src.type == SCH_NODE_TYPE_DEC) { |
1521 | 38 | ms->src_sched = sch->dec[enc->src.idx].src; | |
1522 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
|
38 | av_assert0(ms->src_sched.type == SCH_NODE_TYPE_DEMUX); |
1523 | } else { | ||
1524 | 7672 | ms->src_sched = enc->src; | |
1525 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | av_assert0(ms->src_sched.type == SCH_NODE_TYPE_FILTER_OUT); |
1526 | } | ||
1527 | 7710 | break; | |
1528 | } | ||
1529 | 692 | case SCH_NODE_TYPE_DEMUX: | |
1530 | 692 | ms->src_sched = ms->src; | |
1531 | 692 | break; | |
1532 | ✗ | default: | |
1533 | ✗ | av_log(mux, AV_LOG_ERROR, | |
1534 | "Muxer stream #%u not connected to a source\n", j); | ||
1535 | ✗ | return AVERROR(EINVAL); | |
1536 | } | ||
1537 | } | ||
1538 | |||
1539 | 7926 | ret = queue_alloc(&mux->queue, mux->nb_streams, mux->queue_size, | |
1540 | QUEUE_PACKETS); | ||
1541 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7926 times.
|
7926 | if (ret < 0) |
1542 | ✗ | return ret; | |
1543 | } | ||
1544 | |||
1545 |
2/2✓ Branch 0 taken 7542 times.
✓ Branch 1 taken 7924 times.
|
15466 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
1546 | 7542 | SchFilterGraph *fg = &sch->filters[i]; | |
1547 | |||
1548 |
2/2✓ Branch 0 taken 6787 times.
✓ Branch 1 taken 7542 times.
|
14329 | for (unsigned j = 0; j < fg->nb_inputs; j++) { |
1549 | 6787 | SchFilterIn *fi = &fg->inputs[j]; | |
1550 | SchDec *dec; | ||
1551 | |||
1552 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6787 times.
|
6787 | if (!fi->src.type) { |
1553 | ✗ | av_log(fg, AV_LOG_ERROR, | |
1554 | "Filtergraph input %u not connected to a source\n", j); | ||
1555 | ✗ | return AVERROR(EINVAL); | |
1556 | } | ||
1557 | |||
1558 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6787 times.
|
6787 | if (fi->src.type == SCH_NODE_TYPE_FILTER_OUT) |
1559 | ✗ | fi->src_sched = fi->src; | |
1560 | else { | ||
1561 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6787 times.
|
6787 | av_assert0(fi->src.type == SCH_NODE_TYPE_DEC); |
1562 | 6787 | dec = &sch->dec[fi->src.idx]; | |
1563 | |||
1564 |
2/3✓ Branch 0 taken 6786 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
6787 | switch (dec->src.type) { |
1565 | 6786 | case SCH_NODE_TYPE_DEMUX: fi->src_sched = dec->src; break; | |
1566 | 1 | case SCH_NODE_TYPE_ENC: fi->src_sched = sch->enc[dec->src.idx].src; break; | |
1567 | ✗ | default: av_assert0(0); | |
1568 | } | ||
1569 | } | ||
1570 | } | ||
1571 | |||
1572 |
2/2✓ Branch 0 taken 7672 times.
✓ Branch 1 taken 7542 times.
|
15214 | for (unsigned j = 0; j < fg->nb_outputs; j++) { |
1573 | 7672 | SchFilterOut *fo = &fg->outputs[j]; | |
1574 | |||
1575 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | if (!fo->dst.type) { |
1576 | ✗ | av_log(fg, AV_LOG_ERROR, | |
1577 | "Filtergraph %u output %u not connected to a sink\n", i, j); | ||
1578 | ✗ | return AVERROR(EINVAL); | |
1579 | } | ||
1580 | } | ||
1581 | } | ||
1582 | |||
1583 | // Check that the transcoding graph has no cycles. | ||
1584 | 7924 | ret = check_acyclic(sch); | |
1585 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (ret < 0) |
1586 | ✗ | return ret; | |
1587 | |||
1588 | 7924 | return 0; | |
1589 | } | ||
1590 | |||
1591 | 7924 | int sch_start(Scheduler *sch) | |
1592 | { | ||
1593 | int ret; | ||
1594 | |||
1595 | 7924 | ret = start_prepare(sch); | |
1596 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (ret < 0) |
1597 | ✗ | return ret; | |
1598 | |||
1599 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | av_assert0(sch->state == SCH_STATE_UNINIT); |
1600 | 7924 | sch->state = SCH_STATE_STARTED; | |
1601 | |||
1602 |
2/2✓ Branch 0 taken 7926 times.
✓ Branch 1 taken 7924 times.
|
15850 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
1603 | 7926 | SchMux *mux = &sch->mux[i]; | |
1604 | |||
1605 |
2/2✓ Branch 0 taken 459 times.
✓ Branch 1 taken 7467 times.
|
7926 | if (mux->nb_streams_ready == mux->nb_streams) { |
1606 | 459 | ret = mux_init(sch, mux); | |
1607 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 459 times.
|
459 | if (ret < 0) |
1608 | ✗ | goto fail; | |
1609 | } | ||
1610 | } | ||
1611 | |||
1612 |
2/2✓ Branch 0 taken 7710 times.
✓ Branch 1 taken 7924 times.
|
15634 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
1613 | 7710 | SchEnc *enc = &sch->enc[i]; | |
1614 | |||
1615 | 7710 | ret = task_start(&enc->task); | |
1616 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (ret < 0) |
1617 | ✗ | goto fail; | |
1618 | } | ||
1619 | |||
1620 |
2/2✓ Branch 0 taken 7542 times.
✓ Branch 1 taken 7924 times.
|
15466 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
1621 | 7542 | SchFilterGraph *fg = &sch->filters[i]; | |
1622 | |||
1623 | 7542 | ret = task_start(&fg->task); | |
1624 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7542 times.
|
7542 | if (ret < 0) |
1625 | ✗ | goto fail; | |
1626 | } | ||
1627 | |||
1628 |
2/2✓ Branch 0 taken 6758 times.
✓ Branch 1 taken 7924 times.
|
14682 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
1629 | 6758 | SchDec *dec = &sch->dec[i]; | |
1630 | |||
1631 | 6758 | ret = task_start(&dec->task); | |
1632 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6758 times.
|
6758 | if (ret < 0) |
1633 | ✗ | goto fail; | |
1634 | } | ||
1635 | |||
1636 |
2/2✓ Branch 0 taken 7173 times.
✓ Branch 1 taken 7924 times.
|
15097 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
1637 | 7173 | SchDemux *d = &sch->demux[i]; | |
1638 | |||
1639 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 7159 times.
|
7173 | if (!d->nb_streams) |
1640 | 14 | continue; | |
1641 | |||
1642 | 7159 | ret = task_start(&d->task); | |
1643 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7159 times.
|
7159 | if (ret < 0) |
1644 | ✗ | goto fail; | |
1645 | } | ||
1646 | |||
1647 | 7924 | pthread_mutex_lock(&sch->schedule_lock); | |
1648 | 7924 | schedule_update_locked(sch); | |
1649 | 7924 | pthread_mutex_unlock(&sch->schedule_lock); | |
1650 | |||
1651 | 7924 | return 0; | |
1652 | ✗ | fail: | |
1653 | ✗ | sch_stop(sch, NULL); | |
1654 | ✗ | return ret; | |
1655 | } | ||
1656 | |||
1657 | 24323 | int sch_wait(Scheduler *sch, uint64_t timeout_us, int64_t *transcode_ts) | |
1658 | { | ||
1659 | int ret; | ||
1660 | |||
1661 | // convert delay to absolute timestamp | ||
1662 | 24323 | timeout_us += av_gettime(); | |
1663 | |||
1664 | 24323 | pthread_mutex_lock(&sch->finish_lock); | |
1665 | |||
1666 |
1/2✓ Branch 0 taken 24323 times.
✗ Branch 1 not taken.
|
24323 | if (sch->nb_mux_done < sch->nb_mux) { |
1667 | 24323 | struct timespec tv = { .tv_sec = timeout_us / 1000000, | |
1668 | 24323 | .tv_nsec = (timeout_us % 1000000) * 1000 }; | |
1669 | 24323 | pthread_cond_timedwait(&sch->finish_cond, &sch->finish_lock, &tv); | |
1670 | } | ||
1671 | |||
1672 | // abort transcoding if any task failed | ||
1673 |
4/4✓ Branch 0 taken 16400 times.
✓ Branch 1 taken 7923 times.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 16399 times.
|
24323 | ret = sch->nb_mux_done == sch->nb_mux || sch->task_failed; |
1674 | |||
1675 | 24323 | pthread_mutex_unlock(&sch->finish_lock); | |
1676 | |||
1677 | 24323 | *transcode_ts = atomic_load(&sch->last_dts); | |
1678 | |||
1679 | 24323 | return ret; | |
1680 | } | ||
1681 | |||
1682 | 7672 | static int enc_open(Scheduler *sch, SchEnc *enc, const AVFrame *frame) | |
1683 | { | ||
1684 | int ret; | ||
1685 | |||
1686 | 7672 | ret = enc->open_cb(enc->task.func_arg, frame); | |
1687 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | if (ret < 0) |
1688 | ✗ | return ret; | |
1689 | |||
1690 | // ret>0 signals audio frame size, which means sync queue must | ||
1691 | // have been enabled during encoder creation | ||
1692 |
2/2✓ Branch 0 taken 173 times.
✓ Branch 1 taken 7499 times.
|
7672 | if (ret > 0) { |
1693 | SchSyncQueue *sq; | ||
1694 | |||
1695 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 173 times.
|
173 | av_assert0(enc->sq_idx[0] >= 0); |
1696 | 173 | sq = &sch->sq_enc[enc->sq_idx[0]]; | |
1697 | |||
1698 | 173 | pthread_mutex_lock(&sq->lock); | |
1699 | |||
1700 | 173 | sq_frame_samples(sq->sq, enc->sq_idx[1], ret); | |
1701 | |||
1702 | 173 | pthread_mutex_unlock(&sq->lock); | |
1703 | } | ||
1704 | |||
1705 | 7672 | return 0; | |
1706 | } | ||
1707 | |||
1708 | 451943 | static int send_to_enc_thread(Scheduler *sch, SchEnc *enc, AVFrame *frame) | |
1709 | { | ||
1710 | int ret; | ||
1711 | |||
1712 |
2/2✓ Branch 0 taken 15645 times.
✓ Branch 1 taken 436298 times.
|
451943 | if (!frame) { |
1713 | 15645 | tq_send_finish(enc->queue, 0); | |
1714 | 15645 | return 0; | |
1715 | } | ||
1716 | |||
1717 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 436298 times.
|
436298 | if (enc->in_finished) |
1718 | ✗ | return AVERROR_EOF; | |
1719 | |||
1720 | 436298 | ret = tq_send(enc->queue, 0, frame); | |
1721 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 436297 times.
|
436298 | if (ret < 0) |
1722 | 1 | enc->in_finished = 1; | |
1723 | |||
1724 | 436298 | return ret; | |
1725 | } | ||
1726 | |||
1727 | 35635 | static int send_to_enc_sq(Scheduler *sch, SchEnc *enc, AVFrame *frame) | |
1728 | { | ||
1729 | 35635 | SchSyncQueue *sq = &sch->sq_enc[enc->sq_idx[0]]; | |
1730 | 35635 | int ret = 0; | |
1731 | |||
1732 | // inform the scheduling code that no more input will arrive along this path; | ||
1733 | // this is necessary because the sync queue may not send an EOF downstream | ||
1734 | // until other streams finish | ||
1735 | // TODO: consider a cleaner way of passing this information through | ||
1736 | // the pipeline | ||
1737 |
2/2✓ Branch 0 taken 3319 times.
✓ Branch 1 taken 32316 times.
|
35635 | if (!frame) { |
1738 |
2/2✓ Branch 0 taken 3319 times.
✓ Branch 1 taken 3319 times.
|
6638 | for (unsigned i = 0; i < enc->nb_dst; i++) { |
1739 | SchMux *mux; | ||
1740 | SchMuxStream *ms; | ||
1741 | |||
1742 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3319 times.
|
3319 | if (enc->dst[i].type != SCH_NODE_TYPE_MUX) |
1743 | ✗ | continue; | |
1744 | |||
1745 | 3319 | mux = &sch->mux[enc->dst[i].idx]; | |
1746 | 3319 | ms = &mux->streams[enc->dst[i].idx_stream]; | |
1747 | |||
1748 | 3319 | pthread_mutex_lock(&sch->schedule_lock); | |
1749 | |||
1750 | 3319 | ms->source_finished = 1; | |
1751 | 3319 | schedule_update_locked(sch); | |
1752 | |||
1753 | 3319 | pthread_mutex_unlock(&sch->schedule_lock); | |
1754 | } | ||
1755 | } | ||
1756 | |||
1757 | 35635 | pthread_mutex_lock(&sq->lock); | |
1758 | |||
1759 | 35635 | ret = sq_send(sq->sq, enc->sq_idx[1], SQFRAME(frame)); | |
1760 |
2/2✓ Branch 0 taken 35633 times.
✓ Branch 1 taken 2 times.
|
35635 | if (ret < 0) |
1761 | 2 | goto finish; | |
1762 | |||
1763 | 83154 | while (1) { | |
1764 | SchEnc *enc; | ||
1765 | |||
1766 | // TODO: the SQ API should be extended to allow returning EOF | ||
1767 | // for individual streams | ||
1768 | 118787 | ret = sq_receive(sq->sq, -1, SQFRAME(sq->frame)); | |
1769 |
2/2✓ Branch 0 taken 35633 times.
✓ Branch 1 taken 83154 times.
|
118787 | if (ret < 0) { |
1770 |
2/2✓ Branch 0 taken 6175 times.
✓ Branch 1 taken 29458 times.
|
35633 | ret = (ret == AVERROR(EAGAIN)) ? 0 : ret; |
1771 | 35633 | break; | |
1772 | } | ||
1773 | |||
1774 | 83154 | enc = &sch->enc[sq->enc_idx[ret]]; | |
1775 | 83154 | ret = send_to_enc_thread(sch, enc, sq->frame); | |
1776 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 83154 times.
|
83154 | if (ret < 0) { |
1777 | ✗ | av_frame_unref(sq->frame); | |
1778 | ✗ | if (ret != AVERROR_EOF) | |
1779 | ✗ | break; | |
1780 | |||
1781 | ✗ | sq_send(sq->sq, enc->sq_idx[1], SQFRAME(NULL)); | |
1782 | ✗ | continue; | |
1783 | } | ||
1784 | } | ||
1785 | |||
1786 |
2/2✓ Branch 0 taken 29458 times.
✓ Branch 1 taken 6175 times.
|
35633 | if (ret < 0) { |
1787 | // close all encoders fed from this sync queue | ||
1788 |
2/2✓ Branch 0 taken 6526 times.
✓ Branch 1 taken 6175 times.
|
12701 | for (unsigned i = 0; i < sq->nb_enc_idx; i++) { |
1789 | 6526 | int err = send_to_enc_thread(sch, &sch->enc[sq->enc_idx[i]], NULL); | |
1790 | |||
1791 | // if the sync queue error is EOF and closing the encoder | ||
1792 | // produces a more serious error, make sure to pick the latter | ||
1793 |
2/4✓ Branch 0 taken 6526 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 6526 times.
✗ Branch 3 not taken.
|
6526 | ret = err_merge((ret == AVERROR_EOF && err < 0) ? 0 : ret, err); |
1794 | } | ||
1795 | } | ||
1796 | |||
1797 | 35633 | finish: | |
1798 | 35635 | pthread_mutex_unlock(&sq->lock); | |
1799 | |||
1800 | 35635 | return ret; | |
1801 | } | ||
1802 | |||
1803 | 397901 | static int send_to_enc(Scheduler *sch, SchEnc *enc, AVFrame *frame) | |
1804 | { | ||
1805 |
6/6✓ Branch 0 taken 396984 times.
✓ Branch 1 taken 917 times.
✓ Branch 2 taken 384584 times.
✓ Branch 3 taken 12400 times.
✓ Branch 4 taken 7672 times.
✓ Branch 5 taken 376912 times.
|
397901 | if (enc->open_cb && frame && !enc->opened) { |
1806 | 7672 | int ret = enc_open(sch, enc, frame); | |
1807 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7672 times.
|
7672 | if (ret < 0) |
1808 | ✗ | return ret; | |
1809 | 7672 | enc->opened = 1; | |
1810 | |||
1811 | // discard empty frames that only carry encoder init parameters | ||
1812 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 7669 times.
|
7672 | if (!frame->buf[0]) { |
1813 | 3 | av_frame_unref(frame); | |
1814 | 3 | return 0; | |
1815 | } | ||
1816 | } | ||
1817 | |||
1818 | 397898 | return (enc->sq_idx[0] >= 0) ? | |
1819 |
2/2✓ Branch 0 taken 35635 times.
✓ Branch 1 taken 362263 times.
|
760161 | send_to_enc_sq (sch, enc, frame) : |
1820 | 362263 | send_to_enc_thread(sch, enc, frame); | |
1821 | } | ||
1822 | |||
1823 | 3396 | static int mux_queue_packet(SchMux *mux, SchMuxStream *ms, AVPacket *pkt) | |
1824 | { | ||
1825 | 3396 | PreMuxQueue *q = &ms->pre_mux_queue; | |
1826 | 3396 | AVPacket *tmp_pkt = NULL; | |
1827 | int ret; | ||
1828 | |||
1829 |
2/2✓ Branch 1 taken 153 times.
✓ Branch 2 taken 3243 times.
|
3396 | if (!av_fifo_can_write(q->fifo)) { |
1830 | 153 | size_t packets = av_fifo_can_read(q->fifo); | |
1831 |
1/2✓ Branch 0 taken 153 times.
✗ Branch 1 not taken.
|
153 | size_t pkt_size = pkt ? pkt->size : 0; |
1832 | 153 | int thresh_reached = (q->data_size + pkt_size) > q->data_threshold; | |
1833 |
2/2✓ Branch 0 taken 148 times.
✓ Branch 1 taken 5 times.
|
153 | size_t max_packets = thresh_reached ? q->max_packets : SIZE_MAX; |
1834 | 153 | size_t new_size = FFMIN(2 * packets, max_packets); | |
1835 | |||
1836 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 153 times.
|
153 | if (new_size <= packets) { |
1837 | ✗ | av_log(mux, AV_LOG_ERROR, | |
1838 | "Too many packets buffered for output stream.\n"); | ||
1839 | ✗ | return AVERROR_BUFFER_TOO_SMALL; | |
1840 | } | ||
1841 | 153 | ret = av_fifo_grow2(q->fifo, new_size - packets); | |
1842 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 153 times.
|
153 | if (ret < 0) |
1843 | ✗ | return ret; | |
1844 | } | ||
1845 | |||
1846 |
2/2✓ Branch 0 taken 3318 times.
✓ Branch 1 taken 78 times.
|
3396 | if (pkt) { |
1847 | 3318 | tmp_pkt = av_packet_alloc(); | |
1848 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3318 times.
|
3318 | if (!tmp_pkt) |
1849 | ✗ | return AVERROR(ENOMEM); | |
1850 | |||
1851 | 3318 | av_packet_move_ref(tmp_pkt, pkt); | |
1852 | 3318 | q->data_size += tmp_pkt->size; | |
1853 | } | ||
1854 | 3396 | av_fifo_write(q->fifo, &tmp_pkt, 1); | |
1855 | |||
1856 | 3396 | return 0; | |
1857 | } | ||
1858 | |||
1859 | 507307 | static int send_to_mux(Scheduler *sch, SchMux *mux, unsigned stream_idx, | |
1860 | AVPacket *pkt) | ||
1861 | { | ||
1862 | 507307 | SchMuxStream *ms = &mux->streams[stream_idx]; | |
1863 |
2/2✓ Branch 0 taken 490866 times.
✓ Branch 1 taken 8039 times.
|
498905 | int64_t dts = (pkt && pkt->dts != AV_NOPTS_VALUE) ? |
1864 |
2/2✓ Branch 0 taken 498905 times.
✓ Branch 1 taken 8402 times.
|
1006212 | av_rescale_q(pkt->dts + pkt->duration, pkt->time_base, AV_TIME_BASE_Q) : |
1865 | AV_NOPTS_VALUE; | ||
1866 | |||
1867 | // queue the packet if the muxer cannot be started yet | ||
1868 |
2/2✓ Branch 0 taken 3453 times.
✓ Branch 1 taken 503854 times.
|
507307 | if (!atomic_load(&mux->mux_started)) { |
1869 | 3453 | int queued = 0; | |
1870 | |||
1871 | // the muxer could have started between the above atomic check and | ||
1872 | // locking the mutex, then this block falls through to normal send path | ||
1873 | 3453 | pthread_mutex_lock(&sch->mux_ready_lock); | |
1874 | |||
1875 |
2/2✓ Branch 0 taken 3396 times.
✓ Branch 1 taken 57 times.
|
3453 | if (!atomic_load(&mux->mux_started)) { |
1876 | 3396 | int ret = mux_queue_packet(mux, ms, pkt); | |
1877 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3396 times.
|
3396 | queued = ret < 0 ? ret : 1; |
1878 | } | ||
1879 | |||
1880 | 3453 | pthread_mutex_unlock(&sch->mux_ready_lock); | |
1881 | |||
1882 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3453 times.
|
3453 | if (queued < 0) |
1883 | ✗ | return queued; | |
1884 |
2/2✓ Branch 0 taken 3396 times.
✓ Branch 1 taken 57 times.
|
3453 | else if (queued) |
1885 | 3396 | goto update_schedule; | |
1886 | } | ||
1887 | |||
1888 |
2/2✓ Branch 0 taken 495587 times.
✓ Branch 1 taken 8324 times.
|
503911 | if (pkt) { |
1889 | int ret; | ||
1890 | |||
1891 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 495586 times.
|
495587 | if (ms->init_eof) |
1892 | 1 | return AVERROR_EOF; | |
1893 | |||
1894 | 495586 | ret = tq_send(mux->queue, stream_idx, pkt); | |
1895 |
2/2✓ Branch 0 taken 64 times.
✓ Branch 1 taken 495522 times.
|
495586 | if (ret < 0) |
1896 | 64 | return ret; | |
1897 | } else | ||
1898 | 8324 | tq_send_finish(mux->queue, stream_idx); | |
1899 | |||
1900 | 507242 | update_schedule: | |
1901 | // TODO: use atomics to check whether this changes trailing dts | ||
1902 | // to avoid locking unnecesarily | ||
1903 |
4/4✓ Branch 0 taken 16441 times.
✓ Branch 1 taken 490801 times.
✓ Branch 2 taken 8402 times.
✓ Branch 3 taken 8039 times.
|
507242 | if (dts != AV_NOPTS_VALUE || !pkt) { |
1904 | 499203 | pthread_mutex_lock(&sch->schedule_lock); | |
1905 | |||
1906 |
2/2✓ Branch 0 taken 490801 times.
✓ Branch 1 taken 8402 times.
|
499203 | if (pkt) ms->last_dts = dts; |
1907 | 8402 | else ms->source_finished = 1; | |
1908 | |||
1909 | 499203 | schedule_update_locked(sch); | |
1910 | |||
1911 | 499203 | pthread_mutex_unlock(&sch->schedule_lock); | |
1912 | } | ||
1913 | |||
1914 | 507242 | return 0; | |
1915 | } | ||
1916 | |||
1917 | static int | ||
1918 | 479473 | demux_stream_send_to_dst(Scheduler *sch, const SchedulerNode dst, | |
1919 | uint8_t *dst_finished, AVPacket *pkt, unsigned flags) | ||
1920 | { | ||
1921 | int ret; | ||
1922 | |||
1923 |
2/2✓ Branch 0 taken 3442 times.
✓ Branch 1 taken 476031 times.
|
479473 | if (*dst_finished) |
1924 | 3442 | return AVERROR_EOF; | |
1925 | |||
1926 |
4/4✓ Branch 0 taken 472024 times.
✓ Branch 1 taken 4007 times.
✓ Branch 2 taken 68771 times.
✓ Branch 3 taken 403253 times.
|
476031 | if (pkt && dst.type == SCH_NODE_TYPE_MUX && |
1927 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 68769 times.
|
68771 | (flags & DEMUX_SEND_STREAMCOPY_EOF)) { |
1928 | 2 | av_packet_unref(pkt); | |
1929 | 2 | pkt = NULL; | |
1930 | } | ||
1931 | |||
1932 |
2/2✓ Branch 0 taken 4009 times.
✓ Branch 1 taken 472022 times.
|
476031 | if (!pkt) |
1933 | 4009 | goto finish; | |
1934 | |||
1935 | 944044 | ret = (dst.type == SCH_NODE_TYPE_MUX) ? | |
1936 |
2/2✓ Branch 0 taken 68769 times.
✓ Branch 1 taken 403253 times.
|
472022 | send_to_mux(sch, &sch->mux[dst.idx], dst.idx_stream, pkt) : |
1937 | 403253 | tq_send(sch->dec[dst.idx].queue, 0, pkt); | |
1938 |
2/2✓ Branch 0 taken 3440 times.
✓ Branch 1 taken 468582 times.
|
472022 | if (ret == AVERROR_EOF) |
1939 | 3440 | goto finish; | |
1940 | |||
1941 | 468582 | return ret; | |
1942 | |||
1943 | 7449 | finish: | |
1944 |
2/2✓ Branch 0 taken 692 times.
✓ Branch 1 taken 6757 times.
|
7449 | if (dst.type == SCH_NODE_TYPE_MUX) |
1945 | 692 | send_to_mux(sch, &sch->mux[dst.idx], dst.idx_stream, NULL); | |
1946 | else | ||
1947 | 6757 | tq_send_finish(sch->dec[dst.idx].queue, 0); | |
1948 | |||
1949 | 7449 | *dst_finished = 1; | |
1950 | 7449 | return AVERROR_EOF; | |
1951 | } | ||
1952 | |||
1953 | 479010 | static int demux_send_for_stream(Scheduler *sch, SchDemux *d, SchDemuxStream *ds, | |
1954 | AVPacket *pkt, unsigned flags) | ||
1955 | { | ||
1956 | 479010 | unsigned nb_done = 0; | |
1957 | |||
1958 |
2/2✓ Branch 0 taken 479473 times.
✓ Branch 1 taken 479010 times.
|
958483 | for (unsigned i = 0; i < ds->nb_dst; i++) { |
1959 | 479473 | AVPacket *to_send = pkt; | |
1960 | 479473 | uint8_t *finished = &ds->dst_finished[i]; | |
1961 | |||
1962 | int ret; | ||
1963 | |||
1964 | // sending a packet consumes it, so make a temporary reference if needed | ||
1965 |
4/4✓ Branch 0 taken 472024 times.
✓ Branch 1 taken 7449 times.
✓ Branch 2 taken 440 times.
✓ Branch 3 taken 471584 times.
|
479473 | if (pkt && i < ds->nb_dst - 1) { |
1966 | 440 | to_send = d->send_pkt; | |
1967 | |||
1968 | 440 | ret = av_packet_ref(to_send, pkt); | |
1969 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 440 times.
|
440 | if (ret < 0) |
1970 | ✗ | return ret; | |
1971 | } | ||
1972 | |||
1973 | 479473 | ret = demux_stream_send_to_dst(sch, ds->dst[i], finished, to_send, flags); | |
1974 |
2/2✓ Branch 0 taken 472024 times.
✓ Branch 1 taken 7449 times.
|
479473 | if (to_send) |
1975 | 472024 | av_packet_unref(to_send); | |
1976 |
2/2✓ Branch 0 taken 10891 times.
✓ Branch 1 taken 468582 times.
|
479473 | if (ret == AVERROR_EOF) |
1977 | 10891 | nb_done++; | |
1978 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 468582 times.
|
468582 | else if (ret < 0) |
1979 | ✗ | return ret; | |
1980 | } | ||
1981 | |||
1982 |
2/2✓ Branch 0 taken 10867 times.
✓ Branch 1 taken 468143 times.
|
479010 | return (nb_done == ds->nb_dst) ? AVERROR_EOF : 0; |
1983 | } | ||
1984 | |||
1985 | 11 | static int demux_flush(Scheduler *sch, SchDemux *d, AVPacket *pkt) | |
1986 | { | ||
1987 | 11 | Timestamp max_end_ts = (Timestamp){ .ts = AV_NOPTS_VALUE }; | |
1988 | |||
1989 |
3/6✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 11 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 11 times.
|
11 | av_assert0(!pkt->buf && !pkt->data && !pkt->side_data_elems); |
1990 | |||
1991 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 11 times.
|
25 | for (unsigned i = 0; i < d->nb_streams; i++) { |
1992 | 14 | SchDemuxStream *ds = &d->streams[i]; | |
1993 | |||
1994 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 14 times.
|
28 | for (unsigned j = 0; j < ds->nb_dst; j++) { |
1995 | 14 | const SchedulerNode *dst = &ds->dst[j]; | |
1996 | SchDec *dec; | ||
1997 | int ret; | ||
1998 | |||
1999 |
3/4✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 6 times.
|
14 | if (ds->dst_finished[j] || dst->type != SCH_NODE_TYPE_DEC) |
2000 | 8 | continue; | |
2001 | |||
2002 | 6 | dec = &sch->dec[dst->idx]; | |
2003 | |||
2004 | 6 | ret = tq_send(dec->queue, 0, pkt); | |
2005 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (ret < 0) |
2006 | ✗ | return ret; | |
2007 | |||
2008 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
|
6 | if (dec->queue_end_ts) { |
2009 | Timestamp ts; | ||
2010 | 3 | ret = av_thread_message_queue_recv(dec->queue_end_ts, &ts, 0); | |
2011 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (ret < 0) |
2012 | ✗ | return ret; | |
2013 | |||
2014 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (max_end_ts.ts == AV_NOPTS_VALUE || |
2015 | ✗ | (ts.ts != AV_NOPTS_VALUE && | |
2016 | ✗ | av_compare_ts(max_end_ts.ts, max_end_ts.tb, ts.ts, ts.tb) < 0)) | |
2017 | 3 | max_end_ts = ts; | |
2018 | |||
2019 | } | ||
2020 | } | ||
2021 | } | ||
2022 | |||
2023 | 11 | pkt->pts = max_end_ts.ts; | |
2024 | 11 | pkt->time_base = max_end_ts.tb; | |
2025 | |||
2026 | 11 | return 0; | |
2027 | } | ||
2028 | |||
2029 | 471636 | int sch_demux_send(Scheduler *sch, unsigned demux_idx, AVPacket *pkt, | |
2030 | unsigned flags) | ||
2031 | { | ||
2032 | SchDemux *d; | ||
2033 | int terminate; | ||
2034 | |||
2035 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 471636 times.
|
471636 | av_assert0(demux_idx < sch->nb_demux); |
2036 | 471636 | d = &sch->demux[demux_idx]; | |
2037 | |||
2038 | 471636 | terminate = waiter_wait(sch, &d->waiter); | |
2039 |
2/2✓ Branch 0 taken 41 times.
✓ Branch 1 taken 471595 times.
|
471636 | if (terminate) |
2040 | 41 | return AVERROR_EXIT; | |
2041 | |||
2042 | // flush the downstreams after seek | ||
2043 |
2/2✓ Branch 0 taken 11 times.
✓ Branch 1 taken 471584 times.
|
471595 | if (pkt->stream_index == -1) |
2044 | 11 | return demux_flush(sch, d, pkt); | |
2045 | |||
2046 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 471584 times.
|
471584 | av_assert0(pkt->stream_index < d->nb_streams); |
2047 | |||
2048 | 471584 | return demux_send_for_stream(sch, d, &d->streams[pkt->stream_index], pkt, flags); | |
2049 | } | ||
2050 | |||
2051 | 7173 | static int demux_done(Scheduler *sch, unsigned demux_idx) | |
2052 | { | ||
2053 | 7173 | SchDemux *d = &sch->demux[demux_idx]; | |
2054 | 7173 | int ret = 0; | |
2055 | |||
2056 |
2/2✓ Branch 0 taken 7426 times.
✓ Branch 1 taken 7173 times.
|
14599 | for (unsigned i = 0; i < d->nb_streams; i++) { |
2057 | 7426 | int err = demux_send_for_stream(sch, d, &d->streams[i], NULL, 0); | |
2058 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7426 times.
|
7426 | if (err != AVERROR_EOF) |
2059 | ✗ | ret = err_merge(ret, err); | |
2060 | } | ||
2061 | |||
2062 | 7173 | pthread_mutex_lock(&sch->schedule_lock); | |
2063 | |||
2064 | 7173 | d->task_exited = 1; | |
2065 | |||
2066 | 7173 | schedule_update_locked(sch); | |
2067 | |||
2068 | 7173 | pthread_mutex_unlock(&sch->schedule_lock); | |
2069 | |||
2070 | 7173 | return ret; | |
2071 | } | ||
2072 | |||
2073 | 514883 | int sch_mux_receive(Scheduler *sch, unsigned mux_idx, AVPacket *pkt) | |
2074 | { | ||
2075 | SchMux *mux; | ||
2076 | int ret, stream_idx; | ||
2077 | |||
2078 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 514883 times.
|
514883 | av_assert0(mux_idx < sch->nb_mux); |
2079 | 514883 | mux = &sch->mux[mux_idx]; | |
2080 | |||
2081 | 514883 | ret = tq_receive(mux->queue, &stream_idx, pkt); | |
2082 | 514883 | pkt->stream_index = stream_idx; | |
2083 | 514883 | return ret; | |
2084 | } | ||
2085 | |||
2086 | 199 | void sch_mux_receive_finish(Scheduler *sch, unsigned mux_idx, unsigned stream_idx) | |
2087 | { | ||
2088 | SchMux *mux; | ||
2089 | |||
2090 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 199 times.
|
199 | av_assert0(mux_idx < sch->nb_mux); |
2091 | 199 | mux = &sch->mux[mux_idx]; | |
2092 | |||
2093 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 199 times.
|
199 | av_assert0(stream_idx < mux->nb_streams); |
2094 | 199 | tq_receive_finish(mux->queue, stream_idx); | |
2095 | |||
2096 | 199 | pthread_mutex_lock(&sch->schedule_lock); | |
2097 | 199 | mux->streams[stream_idx].source_finished = 1; | |
2098 | |||
2099 | 199 | schedule_update_locked(sch); | |
2100 | |||
2101 | 199 | pthread_mutex_unlock(&sch->schedule_lock); | |
2102 | 199 | } | |
2103 | |||
2104 | 461037 | int sch_mux_sub_heartbeat(Scheduler *sch, unsigned mux_idx, unsigned stream_idx, | |
2105 | const AVPacket *pkt) | ||
2106 | { | ||
2107 | SchMux *mux; | ||
2108 | SchMuxStream *ms; | ||
2109 | |||
2110 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 461037 times.
|
461037 | av_assert0(mux_idx < sch->nb_mux); |
2111 | 461037 | mux = &sch->mux[mux_idx]; | |
2112 | |||
2113 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 461037 times.
|
461037 | av_assert0(stream_idx < mux->nb_streams); |
2114 | 461037 | ms = &mux->streams[stream_idx]; | |
2115 | |||
2116 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 461037 times.
|
461042 | for (unsigned i = 0; i < ms->nb_sub_heartbeat_dst; i++) { |
2117 | 5 | SchDec *dst = &sch->dec[ms->sub_heartbeat_dst[i]]; | |
2118 | int ret; | ||
2119 | |||
2120 | 5 | ret = av_packet_copy_props(mux->sub_heartbeat_pkt, pkt); | |
2121 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | if (ret < 0) |
2122 | ✗ | return ret; | |
2123 | |||
2124 | 5 | tq_send(dst->queue, 0, mux->sub_heartbeat_pkt); | |
2125 | } | ||
2126 | |||
2127 | 461037 | return 0; | |
2128 | } | ||
2129 | |||
2130 | 7926 | static int mux_done(Scheduler *sch, unsigned mux_idx) | |
2131 | { | ||
2132 | 7926 | SchMux *mux = &sch->mux[mux_idx]; | |
2133 | |||
2134 | 7926 | pthread_mutex_lock(&sch->schedule_lock); | |
2135 | |||
2136 |
2/2✓ Branch 0 taken 8402 times.
✓ Branch 1 taken 7926 times.
|
16328 | for (unsigned i = 0; i < mux->nb_streams; i++) { |
2137 | 8402 | tq_receive_finish(mux->queue, i); | |
2138 | 8402 | mux->streams[i].source_finished = 1; | |
2139 | } | ||
2140 | |||
2141 | 7926 | schedule_update_locked(sch); | |
2142 | |||
2143 | 7926 | pthread_mutex_unlock(&sch->schedule_lock); | |
2144 | |||
2145 | 7926 | pthread_mutex_lock(&sch->finish_lock); | |
2146 | |||
2147 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7926 times.
|
7926 | av_assert0(sch->nb_mux_done < sch->nb_mux); |
2148 | 7926 | sch->nb_mux_done++; | |
2149 | |||
2150 | 7926 | pthread_cond_signal(&sch->finish_cond); | |
2151 | |||
2152 | 7926 | pthread_mutex_unlock(&sch->finish_lock); | |
2153 | |||
2154 | 7926 | return 0; | |
2155 | } | ||
2156 | |||
2157 | 376196 | int sch_dec_receive(Scheduler *sch, unsigned dec_idx, AVPacket *pkt) | |
2158 | { | ||
2159 | SchDec *dec; | ||
2160 | int ret, dummy; | ||
2161 | |||
2162 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 376196 times.
|
376196 | av_assert0(dec_idx < sch->nb_dec); |
2163 | 376196 | dec = &sch->dec[dec_idx]; | |
2164 | |||
2165 | // the decoder should have given us post-flush end timestamp in pkt | ||
2166 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 376193 times.
|
376196 | if (dec->expect_end_ts) { |
2167 | 3 | Timestamp ts = (Timestamp){ .ts = pkt->pts, .tb = pkt->time_base }; | |
2168 | 3 | ret = av_thread_message_queue_send(dec->queue_end_ts, &ts, 0); | |
2169 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (ret < 0) |
2170 | ✗ | return ret; | |
2171 | |||
2172 | 3 | dec->expect_end_ts = 0; | |
2173 | } | ||
2174 | |||
2175 | 376196 | ret = tq_receive(dec->queue, &dummy, pkt); | |
2176 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 376196 times.
|
376196 | av_assert0(dummy <= 0); |
2177 | |||
2178 | // got a flush packet, on the next call to this function the decoder | ||
2179 | // will give us post-flush end timestamp | ||
2180 |
7/8✓ Branch 0 taken 372868 times.
✓ Branch 1 taken 3328 times.
✓ Branch 2 taken 958 times.
✓ Branch 3 taken 371910 times.
✓ Branch 4 taken 958 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 3 times.
✓ Branch 7 taken 955 times.
|
376196 | if (ret >= 0 && !pkt->data && !pkt->side_data_elems && dec->queue_end_ts) |
2181 | 3 | dec->expect_end_ts = 1; | |
2182 | |||
2183 | 376196 | return ret; | |
2184 | } | ||
2185 | |||
2186 | 404958 | static int send_to_filter(Scheduler *sch, SchFilterGraph *fg, | |
2187 | unsigned in_idx, AVFrame *frame) | ||
2188 | { | ||
2189 |
2/2✓ Branch 0 taken 398171 times.
✓ Branch 1 taken 6787 times.
|
404958 | if (frame) |
2190 | 398171 | return tq_send(fg->queue, in_idx, frame); | |
2191 | |||
2192 |
1/2✓ Branch 0 taken 6787 times.
✗ Branch 1 not taken.
|
6787 | if (!fg->inputs[in_idx].send_finished) { |
2193 | 6787 | fg->inputs[in_idx].send_finished = 1; | |
2194 | 6787 | tq_send_finish(fg->queue, in_idx); | |
2195 | |||
2196 | // close the control stream when all actual inputs are done | ||
2197 |
2/2✓ Branch 0 taken 6702 times.
✓ Branch 1 taken 85 times.
|
6787 | if (atomic_fetch_add(&fg->nb_inputs_finished_send, 1) == fg->nb_inputs - 1) |
2198 | 6702 | tq_send_finish(fg->queue, fg->nb_inputs); | |
2199 | } | ||
2200 | 6787 | return 0; | |
2201 | } | ||
2202 | |||
2203 | 409379 | static int dec_send_to_dst(Scheduler *sch, const SchedulerNode dst, | |
2204 | uint8_t *dst_finished, AVFrame *frame) | ||
2205 | { | ||
2206 | int ret; | ||
2207 | |||
2208 |
2/2✓ Branch 0 taken 6990 times.
✓ Branch 1 taken 402389 times.
|
409379 | if (*dst_finished) |
2209 | 6990 | return AVERROR_EOF; | |
2210 | |||
2211 |
2/2✓ Branch 0 taken 3339 times.
✓ Branch 1 taken 399050 times.
|
402389 | if (!frame) |
2212 | 3339 | goto finish; | |
2213 | |||
2214 | 798100 | ret = (dst.type == SCH_NODE_TYPE_FILTER_IN) ? | |
2215 |
2/2✓ Branch 0 taken 398171 times.
✓ Branch 1 taken 879 times.
|
399050 | send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, frame) : |
2216 | 879 | send_to_enc(sch, &sch->enc[dst.idx], frame); | |
2217 |
2/2✓ Branch 0 taken 3486 times.
✓ Branch 1 taken 395564 times.
|
399050 | if (ret == AVERROR_EOF) |
2218 | 3486 | goto finish; | |
2219 | |||
2220 | 395564 | return ret; | |
2221 | |||
2222 | 6825 | finish: | |
2223 |
2/2✓ Branch 0 taken 6787 times.
✓ Branch 1 taken 38 times.
|
6825 | if (dst.type == SCH_NODE_TYPE_FILTER_IN) |
2224 | 6787 | send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, NULL); | |
2225 | else | ||
2226 | 38 | send_to_enc(sch, &sch->enc[dst.idx], NULL); | |
2227 | |||
2228 | 6825 | *dst_finished = 1; | |
2229 | |||
2230 | 6825 | return AVERROR_EOF; | |
2231 | } | ||
2232 | |||
2233 | 400980 | int sch_dec_send(Scheduler *sch, unsigned dec_idx, | |
2234 | unsigned out_idx, AVFrame *frame) | ||
2235 | { | ||
2236 | SchDec *dec; | ||
2237 | SchDecOutput *o; | ||
2238 | int ret; | ||
2239 | 400980 | unsigned nb_done = 0; | |
2240 | |||
2241 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 400980 times.
|
400980 | av_assert0(dec_idx < sch->nb_dec); |
2242 | 400980 | dec = &sch->dec[dec_idx]; | |
2243 | |||
2244 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 400980 times.
|
400980 | av_assert0(out_idx < dec->nb_outputs); |
2245 | 400980 | o = &dec->outputs[out_idx]; | |
2246 | |||
2247 |
2/2✓ Branch 0 taken 402554 times.
✓ Branch 1 taken 400980 times.
|
803534 | for (unsigned i = 0; i < o->nb_dst; i++) { |
2248 | 402554 | uint8_t *finished = &o->dst_finished[i]; | |
2249 | 402554 | AVFrame *to_send = frame; | |
2250 | |||
2251 | // sending a frame consumes it, so make a temporary reference if needed | ||
2252 |
2/2✓ Branch 0 taken 1574 times.
✓ Branch 1 taken 400980 times.
|
402554 | if (i < o->nb_dst - 1) { |
2253 | 1574 | to_send = dec->send_frame; | |
2254 | |||
2255 | // frame may sometimes contain props only, | ||
2256 | // e.g. to signal EOF timestamp | ||
2257 |
2/2✓ Branch 0 taken 1467 times.
✓ Branch 1 taken 107 times.
|
1574 | ret = frame->buf[0] ? av_frame_ref(to_send, frame) : |
2258 | 107 | av_frame_copy_props(to_send, frame); | |
2259 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1574 times.
|
1574 | if (ret < 0) |
2260 | ✗ | return ret; | |
2261 | } | ||
2262 | |||
2263 | 402554 | ret = dec_send_to_dst(sch, o->dst[i], finished, to_send); | |
2264 |
2/2✓ Branch 0 taken 6990 times.
✓ Branch 1 taken 395564 times.
|
402554 | if (ret < 0) { |
2265 | 6990 | av_frame_unref(to_send); | |
2266 |
1/2✓ Branch 0 taken 6990 times.
✗ Branch 1 not taken.
|
6990 | if (ret == AVERROR_EOF) { |
2267 | 6990 | nb_done++; | |
2268 | 6990 | continue; | |
2269 | } | ||
2270 | ✗ | return ret; | |
2271 | } | ||
2272 | } | ||
2273 | |||
2274 |
2/2✓ Branch 0 taken 6873 times.
✓ Branch 1 taken 394107 times.
|
400980 | return (nb_done == o->nb_dst) ? AVERROR_EOF : 0; |
2275 | } | ||
2276 | |||
2277 | 6758 | static int dec_done(Scheduler *sch, unsigned dec_idx) | |
2278 | { | ||
2279 | 6758 | SchDec *dec = &sch->dec[dec_idx]; | |
2280 | 6758 | int ret = 0; | |
2281 | |||
2282 | 6758 | tq_receive_finish(dec->queue, 0); | |
2283 | |||
2284 | // make sure our source does not get stuck waiting for end timestamps | ||
2285 | // that will never arrive | ||
2286 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 6757 times.
|
6758 | if (dec->queue_end_ts) |
2287 | 1 | av_thread_message_queue_set_err_recv(dec->queue_end_ts, AVERROR_EOF); | |
2288 | |||
2289 |
2/2✓ Branch 0 taken 6764 times.
✓ Branch 1 taken 6758 times.
|
13522 | for (unsigned i = 0; i < dec->nb_outputs; i++) { |
2290 | 6764 | SchDecOutput *o = &dec->outputs[i]; | |
2291 | |||
2292 |
2/2✓ Branch 0 taken 6825 times.
✓ Branch 1 taken 6764 times.
|
13589 | for (unsigned j = 0; j < o->nb_dst; j++) { |
2293 | 6825 | int err = dec_send_to_dst(sch, o->dst[j], &o->dst_finished[j], NULL); | |
2294 |
2/4✓ Branch 0 taken 6825 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 6825 times.
|
6825 | if (err < 0 && err != AVERROR_EOF) |
2295 | ✗ | ret = err_merge(ret, err); | |
2296 | } | ||
2297 | } | ||
2298 | |||
2299 | 6758 | return ret; | |
2300 | } | ||
2301 | |||
2302 | 444004 | int sch_enc_receive(Scheduler *sch, unsigned enc_idx, AVFrame *frame) | |
2303 | { | ||
2304 | SchEnc *enc; | ||
2305 | int ret, dummy; | ||
2306 | |||
2307 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 444004 times.
|
444004 | av_assert0(enc_idx < sch->nb_enc); |
2308 | 444004 | enc = &sch->enc[enc_idx]; | |
2309 | |||
2310 | 444004 | ret = tq_receive(enc->queue, &dummy, frame); | |
2311 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 444004 times.
|
444004 | av_assert0(dummy <= 0); |
2312 | |||
2313 | 444004 | return ret; | |
2314 | } | ||
2315 | |||
2316 | 437939 | static int enc_send_to_dst(Scheduler *sch, const SchedulerNode dst, | |
2317 | uint8_t *dst_finished, AVPacket *pkt) | ||
2318 | { | ||
2319 | int ret; | ||
2320 | |||
2321 |
2/2✓ Branch 0 taken 44 times.
✓ Branch 1 taken 437895 times.
|
437939 | if (*dst_finished) |
2322 | 44 | return AVERROR_EOF; | |