Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Inter-thread scheduling/synchronization. | ||
3 | * Copyright (c) 2023 Anton Khirnov | ||
4 | * | ||
5 | * This file is part of FFmpeg. | ||
6 | * | ||
7 | * FFmpeg is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU Lesser General Public | ||
9 | * License as published by the Free Software Foundation; either | ||
10 | * version 2.1 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * FFmpeg is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public | ||
18 | * License along with FFmpeg; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
20 | */ | ||
21 | |||
22 | #include <stdatomic.h> | ||
23 | #include <stddef.h> | ||
24 | #include <stdint.h> | ||
25 | |||
26 | #include "cmdutils.h" | ||
27 | #include "ffmpeg_sched.h" | ||
28 | #include "ffmpeg_utils.h" | ||
29 | #include "sync_queue.h" | ||
30 | #include "thread_queue.h" | ||
31 | |||
32 | #include "libavcodec/packet.h" | ||
33 | |||
34 | #include "libavutil/avassert.h" | ||
35 | #include "libavutil/error.h" | ||
36 | #include "libavutil/fifo.h" | ||
37 | #include "libavutil/frame.h" | ||
38 | #include "libavutil/mem.h" | ||
39 | #include "libavutil/thread.h" | ||
40 | #include "libavutil/threadmessage.h" | ||
41 | #include "libavutil/time.h" | ||
42 | |||
43 | // 100 ms | ||
44 | // FIXME: some other value? make this dynamic? | ||
45 | #define SCHEDULE_TOLERANCE (100 * 1000) | ||
46 | |||
47 | enum QueueType { | ||
48 | QUEUE_PACKETS, | ||
49 | QUEUE_FRAMES, | ||
50 | }; | ||
51 | |||
52 | typedef struct SchWaiter { | ||
53 | pthread_mutex_t lock; | ||
54 | pthread_cond_t cond; | ||
55 | atomic_int choked; | ||
56 | |||
57 | // the following are internal state of schedule_update_locked() and must not | ||
58 | // be accessed outside of it | ||
59 | int choked_prev; | ||
60 | int choked_next; | ||
61 | } SchWaiter; | ||
62 | |||
63 | typedef struct SchTask { | ||
64 | Scheduler *parent; | ||
65 | SchedulerNode node; | ||
66 | |||
67 | SchThreadFunc func; | ||
68 | void *func_arg; | ||
69 | |||
70 | pthread_t thread; | ||
71 | int thread_running; | ||
72 | } SchTask; | ||
73 | |||
74 | typedef struct SchDecOutput { | ||
75 | SchedulerNode *dst; | ||
76 | uint8_t *dst_finished; | ||
77 | unsigned nb_dst; | ||
78 | } SchDecOutput; | ||
79 | |||
80 | typedef struct SchDec { | ||
81 | const AVClass *class; | ||
82 | |||
83 | SchedulerNode src; | ||
84 | |||
85 | SchDecOutput *outputs; | ||
86 | unsigned nb_outputs; | ||
87 | |||
88 | SchTask task; | ||
89 | // Queue for receiving input packets, one stream. | ||
90 | ThreadQueue *queue; | ||
91 | |||
92 | // Queue for sending post-flush end timestamps back to the source | ||
93 | AVThreadMessageQueue *queue_end_ts; | ||
94 | int expect_end_ts; | ||
95 | |||
96 | // temporary storage used by sch_dec_send() | ||
97 | AVFrame *send_frame; | ||
98 | } SchDec; | ||
99 | |||
100 | typedef struct SchSyncQueue { | ||
101 | SyncQueue *sq; | ||
102 | AVFrame *frame; | ||
103 | pthread_mutex_t lock; | ||
104 | |||
105 | unsigned *enc_idx; | ||
106 | unsigned nb_enc_idx; | ||
107 | } SchSyncQueue; | ||
108 | |||
109 | typedef struct SchEnc { | ||
110 | const AVClass *class; | ||
111 | |||
112 | SchedulerNode src; | ||
113 | SchedulerNode *dst; | ||
114 | uint8_t *dst_finished; | ||
115 | unsigned nb_dst; | ||
116 | |||
117 | // [0] - index of the sync queue in Scheduler.sq_enc, | ||
118 | // [1] - index of this encoder in the sq | ||
119 | int sq_idx[2]; | ||
120 | |||
121 | /* Opening encoders is somewhat nontrivial due to their interaction with | ||
122 | * sync queues, which are (among other things) responsible for maintaining | ||
123 | * constant audio frame size, when it is required by the encoder. | ||
124 | * | ||
125 | * Opening the encoder requires stream parameters, obtained from the first | ||
126 | * frame. However, that frame cannot be properly chunked by the sync queue | ||
127 | * without knowing the required frame size, which is only available after | ||
128 | * opening the encoder. | ||
129 | * | ||
130 | * This apparent circular dependency is resolved in the following way: | ||
131 | * - the caller creating the encoder gives us a callback which opens the | ||
132 | * encoder and returns the required frame size (if any) | ||
133 | * - when the first frame is sent to the encoder, the sending thread | ||
134 | * - calls this callback, opening the encoder | ||
135 | * - passes the returned frame size to the sync queue | ||
136 | */ | ||
137 | int (*open_cb)(void *opaque, const AVFrame *frame); | ||
138 | int opened; | ||
139 | |||
140 | SchTask task; | ||
141 | // Queue for receiving input frames, one stream. | ||
142 | ThreadQueue *queue; | ||
143 | // tq_send() to queue returned EOF | ||
144 | int in_finished; | ||
145 | |||
146 | // temporary storage used by sch_enc_send() | ||
147 | AVPacket *send_pkt; | ||
148 | } SchEnc; | ||
149 | |||
150 | typedef struct SchDemuxStream { | ||
151 | SchedulerNode *dst; | ||
152 | uint8_t *dst_finished; | ||
153 | unsigned nb_dst; | ||
154 | } SchDemuxStream; | ||
155 | |||
156 | typedef struct SchDemux { | ||
157 | const AVClass *class; | ||
158 | |||
159 | SchDemuxStream *streams; | ||
160 | unsigned nb_streams; | ||
161 | |||
162 | SchTask task; | ||
163 | SchWaiter waiter; | ||
164 | |||
165 | // temporary storage used by sch_demux_send() | ||
166 | AVPacket *send_pkt; | ||
167 | |||
168 | // protected by schedule_lock | ||
169 | int task_exited; | ||
170 | } SchDemux; | ||
171 | |||
172 | typedef struct PreMuxQueue { | ||
173 | /** | ||
174 | * Queue for buffering the packets before the muxer task can be started. | ||
175 | */ | ||
176 | AVFifo *fifo; | ||
177 | /** | ||
178 | * Maximum number of packets in fifo. | ||
179 | */ | ||
180 | int max_packets; | ||
181 | /* | ||
182 | * The size of the AVPackets' buffers in queue. | ||
183 | * Updated when a packet is either pushed or pulled from the queue. | ||
184 | */ | ||
185 | size_t data_size; | ||
186 | /* Threshold after which max_packets will be in effect */ | ||
187 | size_t data_threshold; | ||
188 | } PreMuxQueue; | ||
189 | |||
190 | typedef struct SchMuxStream { | ||
191 | SchedulerNode src; | ||
192 | SchedulerNode src_sched; | ||
193 | |||
194 | unsigned *sub_heartbeat_dst; | ||
195 | unsigned nb_sub_heartbeat_dst; | ||
196 | |||
197 | PreMuxQueue pre_mux_queue; | ||
198 | |||
199 | // an EOF was generated while flushing the pre-mux queue | ||
200 | int init_eof; | ||
201 | |||
202 | //////////////////////////////////////////////////////////// | ||
203 | // The following are protected by Scheduler.schedule_lock // | ||
204 | |||
205 | /* dts+duration of the last packet sent to this stream | ||
206 | in AV_TIME_BASE_Q */ | ||
207 | int64_t last_dts; | ||
208 | // this stream no longer accepts input | ||
209 | int source_finished; | ||
210 | //////////////////////////////////////////////////////////// | ||
211 | } SchMuxStream; | ||
212 | |||
213 | typedef struct SchMux { | ||
214 | const AVClass *class; | ||
215 | |||
216 | SchMuxStream *streams; | ||
217 | unsigned nb_streams; | ||
218 | unsigned nb_streams_ready; | ||
219 | |||
220 | int (*init)(void *arg); | ||
221 | |||
222 | SchTask task; | ||
223 | /** | ||
224 | * Set to 1 after starting the muxer task and flushing the | ||
225 | * pre-muxing queues. | ||
226 | * Set either before any tasks have started, or with | ||
227 | * Scheduler.mux_ready_lock held. | ||
228 | */ | ||
229 | atomic_int mux_started; | ||
230 | ThreadQueue *queue; | ||
231 | unsigned queue_size; | ||
232 | |||
233 | AVPacket *sub_heartbeat_pkt; | ||
234 | } SchMux; | ||
235 | |||
236 | typedef struct SchFilterIn { | ||
237 | SchedulerNode src; | ||
238 | SchedulerNode src_sched; | ||
239 | int send_finished; | ||
240 | int receive_finished; | ||
241 | } SchFilterIn; | ||
242 | |||
243 | typedef struct SchFilterOut { | ||
244 | SchedulerNode dst; | ||
245 | } SchFilterOut; | ||
246 | |||
247 | typedef struct SchFilterGraph { | ||
248 | const AVClass *class; | ||
249 | |||
250 | SchFilterIn *inputs; | ||
251 | unsigned nb_inputs; | ||
252 | atomic_uint nb_inputs_finished_send; | ||
253 | unsigned nb_inputs_finished_receive; | ||
254 | |||
255 | SchFilterOut *outputs; | ||
256 | unsigned nb_outputs; | ||
257 | |||
258 | SchTask task; | ||
259 | // input queue, nb_inputs+1 streams | ||
260 | // last stream is control | ||
261 | ThreadQueue *queue; | ||
262 | SchWaiter waiter; | ||
263 | |||
264 | // protected by schedule_lock | ||
265 | unsigned best_input; | ||
266 | int task_exited; | ||
267 | } SchFilterGraph; | ||
268 | |||
269 | enum SchedulerState { | ||
270 | SCH_STATE_UNINIT, | ||
271 | SCH_STATE_STARTED, | ||
272 | SCH_STATE_STOPPED, | ||
273 | }; | ||
274 | |||
275 | struct Scheduler { | ||
276 | const AVClass *class; | ||
277 | |||
278 | SchDemux *demux; | ||
279 | unsigned nb_demux; | ||
280 | |||
281 | SchMux *mux; | ||
282 | unsigned nb_mux; | ||
283 | |||
284 | unsigned nb_mux_ready; | ||
285 | pthread_mutex_t mux_ready_lock; | ||
286 | |||
287 | unsigned nb_mux_done; | ||
288 | unsigned task_failed; | ||
289 | pthread_mutex_t finish_lock; | ||
290 | pthread_cond_t finish_cond; | ||
291 | |||
292 | |||
293 | SchDec *dec; | ||
294 | unsigned nb_dec; | ||
295 | |||
296 | SchEnc *enc; | ||
297 | unsigned nb_enc; | ||
298 | |||
299 | SchSyncQueue *sq_enc; | ||
300 | unsigned nb_sq_enc; | ||
301 | |||
302 | SchFilterGraph *filters; | ||
303 | unsigned nb_filters; | ||
304 | |||
305 | char *sdp_filename; | ||
306 | int sdp_auto; | ||
307 | |||
308 | enum SchedulerState state; | ||
309 | atomic_int terminate; | ||
310 | |||
311 | pthread_mutex_t schedule_lock; | ||
312 | |||
313 | atomic_int_least64_t last_dts; | ||
314 | }; | ||
315 | |||
316 | /** | ||
317 | * Wait until this task is allowed to proceed. | ||
318 | * | ||
319 | * @retval 0 the caller should proceed | ||
320 | * @retval 1 the caller should terminate | ||
321 | */ | ||
322 | 492509 | static int waiter_wait(Scheduler *sch, SchWaiter *w) | |
323 | { | ||
324 | int terminate; | ||
325 | |||
326 |
2/2✓ Branch 0 taken 492254 times.
✓ Branch 1 taken 255 times.
|
492509 | if (!atomic_load(&w->choked)) |
327 | 492254 | return 0; | |
328 | |||
329 | 255 | pthread_mutex_lock(&w->lock); | |
330 | |||
331 |
4/4✓ Branch 0 taken 258 times.
✓ Branch 1 taken 204 times.
✓ Branch 2 taken 207 times.
✓ Branch 3 taken 51 times.
|
462 | while (atomic_load(&w->choked) && !atomic_load(&sch->terminate)) |
332 | 207 | pthread_cond_wait(&w->cond, &w->lock); | |
333 | |||
334 | 255 | terminate = atomic_load(&sch->terminate); | |
335 | |||
336 | 255 | pthread_mutex_unlock(&w->lock); | |
337 | |||
338 | 255 | return terminate; | |
339 | } | ||
340 | |||
341 | 32899 | static void waiter_set(SchWaiter *w, int choked) | |
342 | { | ||
343 | 32899 | pthread_mutex_lock(&w->lock); | |
344 | |||
345 | 32899 | atomic_store(&w->choked, choked); | |
346 | 32899 | pthread_cond_signal(&w->cond); | |
347 | |||
348 | 32899 | pthread_mutex_unlock(&w->lock); | |
349 | 32899 | } | |
350 | |||
351 | 14713 | static int waiter_init(SchWaiter *w) | |
352 | { | ||
353 | int ret; | ||
354 | |||
355 | 14713 | atomic_init(&w->choked, 0); | |
356 | |||
357 | 14713 | ret = pthread_mutex_init(&w->lock, NULL); | |
358 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 14713 times.
|
14713 | if (ret) |
359 | ✗ | return AVERROR(ret); | |
360 | |||
361 | 14713 | ret = pthread_cond_init(&w->cond, NULL); | |
362 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 14713 times.
|
14713 | if (ret) |
363 | ✗ | return AVERROR(ret); | |
364 | |||
365 | 14713 | return 0; | |
366 | } | ||
367 | |||
368 | 14713 | static void waiter_uninit(SchWaiter *w) | |
369 | { | ||
370 | 14713 | pthread_mutex_destroy(&w->lock); | |
371 | 14713 | pthread_cond_destroy(&w->cond); | |
372 | 14713 | } | |
373 | |||
374 | 29932 | static int queue_alloc(ThreadQueue **ptq, unsigned nb_streams, unsigned queue_size, | |
375 | enum QueueType type) | ||
376 | { | ||
377 | ThreadQueue *tq; | ||
378 | |||
379 |
1/2✓ Branch 0 taken 29932 times.
✗ Branch 1 not taken.
|
29932 | if (queue_size <= 0) { |
380 |
2/2✓ Branch 0 taken 15250 times.
✓ Branch 1 taken 14682 times.
|
29932 | if (type == QUEUE_FRAMES) |
381 | 15250 | queue_size = DEFAULT_FRAME_THREAD_QUEUE_SIZE; | |
382 | else | ||
383 | 14682 | queue_size = DEFAULT_PACKET_THREAD_QUEUE_SIZE; | |
384 | } | ||
385 | |||
386 |
2/2✓ Branch 0 taken 15250 times.
✓ Branch 1 taken 14682 times.
|
29932 | if (type == QUEUE_FRAMES) { |
387 | // This queue length is used in the decoder code to ensure that | ||
388 | // there are enough entries in fixed-size frame pools to account | ||
389 | // for frames held in queues inside the ffmpeg utility. If this | ||
390 | // can ever dynamically change then the corresponding decode | ||
391 | // code needs to be updated as well. | ||
392 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 15250 times.
|
15250 | av_assert0(queue_size == DEFAULT_FRAME_THREAD_QUEUE_SIZE); |
393 | } | ||
394 | |||
395 | 29932 | tq = tq_alloc(nb_streams, queue_size, | |
396 | (type == QUEUE_PACKETS) ? THREAD_QUEUE_PACKETS : THREAD_QUEUE_FRAMES); | ||
397 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 29932 times.
|
29932 | if (!tq) |
398 | ✗ | return AVERROR(ENOMEM); | |
399 | |||
400 | 29932 | *ptq = tq; | |
401 | 29932 | return 0; | |
402 | } | ||
403 | |||
404 | static void *task_wrapper(void *arg); | ||
405 | |||
406 | 37090 | static int task_start(SchTask *task) | |
407 | { | ||
408 | int ret; | ||
409 | |||
410 | 37090 | av_log(task->func_arg, AV_LOG_VERBOSE, "Starting thread...\n"); | |
411 | |||
412 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 37090 times.
|
37090 | av_assert0(!task->thread_running); |
413 | |||
414 | 37090 | ret = pthread_create(&task->thread, NULL, task_wrapper, task); | |
415 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 37090 times.
|
37090 | if (ret) { |
416 | ✗ | av_log(task->func_arg, AV_LOG_ERROR, "pthread_create() failed: %s\n", | |
417 | strerror(ret)); | ||
418 | ✗ | return AVERROR(ret); | |
419 | } | ||
420 | |||
421 | 37090 | task->thread_running = 1; | |
422 | 37090 | return 0; | |
423 | } | ||
424 | |||
425 | 37104 | static void task_init(Scheduler *sch, SchTask *task, enum SchedulerNodeType type, unsigned idx, | |
426 | SchThreadFunc func, void *func_arg) | ||
427 | { | ||
428 | 37104 | task->parent = sch; | |
429 | |||
430 | 37104 | task->node.type = type; | |
431 | 37104 | task->node.idx = idx; | |
432 | |||
433 | 37104 | task->func = func; | |
434 | 37104 | task->func_arg = func_arg; | |
435 | 37104 | } | |
436 | |||
437 | 537938 | static int64_t trailing_dts(const Scheduler *sch, int count_finished) | |
438 | { | ||
439 | 537938 | int64_t min_dts = INT64_MAX; | |
440 | |||
441 |
2/2✓ Branch 0 taken 538196 times.
✓ Branch 1 taken 522887 times.
|
1061083 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
442 | 538196 | const SchMux *mux = &sch->mux[i]; | |
443 | |||
444 |
2/2✓ Branch 0 taken 595792 times.
✓ Branch 1 taken 523145 times.
|
1118937 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
445 | 595792 | const SchMuxStream *ms = &mux->streams[j]; | |
446 | |||
447 |
4/4✓ Branch 0 taken 45478 times.
✓ Branch 1 taken 550314 times.
✓ Branch 2 taken 37113 times.
✓ Branch 3 taken 8365 times.
|
595792 | if (ms->source_finished && !count_finished) |
448 | 37113 | continue; | |
449 |
2/2✓ Branch 0 taken 15051 times.
✓ Branch 1 taken 543628 times.
|
558679 | if (ms->last_dts == AV_NOPTS_VALUE) |
450 | 15051 | return AV_NOPTS_VALUE; | |
451 | |||
452 | 543628 | min_dts = FFMIN(min_dts, ms->last_dts); | |
453 | } | ||
454 | } | ||
455 | |||
456 |
2/2✓ Branch 0 taken 494751 times.
✓ Branch 1 taken 28136 times.
|
522887 | return min_dts == INT64_MAX ? AV_NOPTS_VALUE : min_dts; |
457 | } | ||
458 | |||
459 | 7924 | void sch_free(Scheduler **psch) | |
460 | { | ||
461 | 7924 | Scheduler *sch = *psch; | |
462 | |||
463 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (!sch) |
464 | ✗ | return; | |
465 | |||
466 | 7924 | sch_stop(sch, NULL); | |
467 | |||
468 |
2/2✓ Branch 0 taken 7172 times.
✓ Branch 1 taken 7924 times.
|
15096 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
469 | 7172 | SchDemux *d = &sch->demux[i]; | |
470 | |||
471 |
2/2✓ Branch 0 taken 7425 times.
✓ Branch 1 taken 7172 times.
|
14597 | for (unsigned j = 0; j < d->nb_streams; j++) { |
472 | 7425 | SchDemuxStream *ds = &d->streams[j]; | |
473 | 7425 | av_freep(&ds->dst); | |
474 | 7425 | av_freep(&ds->dst_finished); | |
475 | } | ||
476 | 7172 | av_freep(&d->streams); | |
477 | |||
478 | 7172 | av_packet_free(&d->send_pkt); | |
479 | |||
480 | 7172 | waiter_uninit(&d->waiter); | |
481 | } | ||
482 | 7924 | av_freep(&sch->demux); | |
483 | |||
484 |
2/2✓ Branch 0 taken 7925 times.
✓ Branch 1 taken 7924 times.
|
15849 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
485 | 7925 | SchMux *mux = &sch->mux[i]; | |
486 | |||
487 |
2/2✓ Branch 0 taken 8401 times.
✓ Branch 1 taken 7925 times.
|
16326 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
488 | 8401 | SchMuxStream *ms = &mux->streams[j]; | |
489 | |||
490 |
1/2✓ Branch 0 taken 8401 times.
✗ Branch 1 not taken.
|
8401 | if (ms->pre_mux_queue.fifo) { |
491 | AVPacket *pkt; | ||
492 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 8401 times.
|
8401 | while (av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1) >= 0) |
493 | ✗ | av_packet_free(&pkt); | |
494 | 8401 | av_fifo_freep2(&ms->pre_mux_queue.fifo); | |
495 | } | ||
496 | |||
497 | 8401 | av_freep(&ms->sub_heartbeat_dst); | |
498 | } | ||
499 | 7925 | av_freep(&mux->streams); | |
500 | |||
501 | 7925 | av_packet_free(&mux->sub_heartbeat_pkt); | |
502 | |||
503 | 7925 | tq_free(&mux->queue); | |
504 | } | ||
505 | 7924 | av_freep(&sch->mux); | |
506 | |||
507 |
2/2✓ Branch 0 taken 6757 times.
✓ Branch 1 taken 7924 times.
|
14681 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
508 | 6757 | SchDec *dec = &sch->dec[i]; | |
509 | |||
510 | 6757 | tq_free(&dec->queue); | |
511 | |||
512 | 6757 | av_thread_message_queue_free(&dec->queue_end_ts); | |
513 | |||
514 |
2/2✓ Branch 0 taken 6763 times.
✓ Branch 1 taken 6757 times.
|
13520 | for (unsigned j = 0; j < dec->nb_outputs; j++) { |
515 | 6763 | SchDecOutput *o = &dec->outputs[j]; | |
516 | |||
517 | 6763 | av_freep(&o->dst); | |
518 | 6763 | av_freep(&o->dst_finished); | |
519 | } | ||
520 | |||
521 | 6757 | av_freep(&dec->outputs); | |
522 | |||
523 | 6757 | av_frame_free(&dec->send_frame); | |
524 | } | ||
525 | 7924 | av_freep(&sch->dec); | |
526 | |||
527 |
2/2✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 7924 times.
|
15633 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
528 | 7709 | SchEnc *enc = &sch->enc[i]; | |
529 | |||
530 | 7709 | tq_free(&enc->queue); | |
531 | |||
532 | 7709 | av_packet_free(&enc->send_pkt); | |
533 | |||
534 | 7709 | av_freep(&enc->dst); | |
535 | 7709 | av_freep(&enc->dst_finished); | |
536 | } | ||
537 | 7924 | av_freep(&sch->enc); | |
538 | |||
539 |
2/2✓ Branch 0 taken 3069 times.
✓ Branch 1 taken 7924 times.
|
10993 | for (unsigned i = 0; i < sch->nb_sq_enc; i++) { |
540 | 3069 | SchSyncQueue *sq = &sch->sq_enc[i]; | |
541 | 3069 | sq_free(&sq->sq); | |
542 | 3069 | av_frame_free(&sq->frame); | |
543 | 3069 | pthread_mutex_destroy(&sq->lock); | |
544 | 3069 | av_freep(&sq->enc_idx); | |
545 | } | ||
546 | 7924 | av_freep(&sch->sq_enc); | |
547 | |||
548 |
2/2✓ Branch 0 taken 7541 times.
✓ Branch 1 taken 7924 times.
|
15465 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
549 | 7541 | SchFilterGraph *fg = &sch->filters[i]; | |
550 | |||
551 | 7541 | tq_free(&fg->queue); | |
552 | |||
553 | 7541 | av_freep(&fg->inputs); | |
554 | 7541 | av_freep(&fg->outputs); | |
555 | |||
556 | 7541 | waiter_uninit(&fg->waiter); | |
557 | } | ||
558 | 7924 | av_freep(&sch->filters); | |
559 | |||
560 | 7924 | av_freep(&sch->sdp_filename); | |
561 | |||
562 | 7924 | pthread_mutex_destroy(&sch->schedule_lock); | |
563 | |||
564 | 7924 | pthread_mutex_destroy(&sch->mux_ready_lock); | |
565 | |||
566 | 7924 | pthread_mutex_destroy(&sch->finish_lock); | |
567 | 7924 | pthread_cond_destroy(&sch->finish_cond); | |
568 | |||
569 | 7924 | av_freep(psch); | |
570 | } | ||
571 | |||
572 | static const AVClass scheduler_class = { | ||
573 | .class_name = "Scheduler", | ||
574 | .version = LIBAVUTIL_VERSION_INT, | ||
575 | }; | ||
576 | |||
577 | 7924 | Scheduler *sch_alloc(void) | |
578 | { | ||
579 | Scheduler *sch; | ||
580 | int ret; | ||
581 | |||
582 | 7924 | sch = av_mallocz(sizeof(*sch)); | |
583 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (!sch) |
584 | ✗ | return NULL; | |
585 | |||
586 | 7924 | sch->class = &scheduler_class; | |
587 | 7924 | sch->sdp_auto = 1; | |
588 | |||
589 | 7924 | ret = pthread_mutex_init(&sch->schedule_lock, NULL); | |
590 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (ret) |
591 | ✗ | goto fail; | |
592 | |||
593 | 7924 | ret = pthread_mutex_init(&sch->mux_ready_lock, NULL); | |
594 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (ret) |
595 | ✗ | goto fail; | |
596 | |||
597 | 7924 | ret = pthread_mutex_init(&sch->finish_lock, NULL); | |
598 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (ret) |
599 | ✗ | goto fail; | |
600 | |||
601 | 7924 | ret = pthread_cond_init(&sch->finish_cond, NULL); | |
602 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7924 times.
|
7924 | if (ret) |
603 | ✗ | goto fail; | |
604 | |||
605 | 7924 | return sch; | |
606 | ✗ | fail: | |
607 | ✗ | sch_free(&sch); | |
608 | ✗ | return NULL; | |
609 | } | ||
610 | |||
611 | ✗ | int sch_sdp_filename(Scheduler *sch, const char *sdp_filename) | |
612 | { | ||
613 | ✗ | av_freep(&sch->sdp_filename); | |
614 | ✗ | sch->sdp_filename = av_strdup(sdp_filename); | |
615 | ✗ | return sch->sdp_filename ? 0 : AVERROR(ENOMEM); | |
616 | } | ||
617 | |||
618 | static const AVClass sch_mux_class = { | ||
619 | .class_name = "SchMux", | ||
620 | .version = LIBAVUTIL_VERSION_INT, | ||
621 | .parent_log_context_offset = offsetof(SchMux, task.func_arg), | ||
622 | }; | ||
623 | |||
624 | 7925 | int sch_add_mux(Scheduler *sch, SchThreadFunc func, int (*init)(void *), | |
625 | void *arg, int sdp_auto, unsigned thread_queue_size) | ||
626 | { | ||
627 | 7925 | const unsigned idx = sch->nb_mux; | |
628 | |||
629 | SchMux *mux; | ||
630 | int ret; | ||
631 | |||
632 | 7925 | ret = GROW_ARRAY(sch->mux, sch->nb_mux); | |
633 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret < 0) |
634 | ✗ | return ret; | |
635 | |||
636 | 7925 | mux = &sch->mux[idx]; | |
637 | 7925 | mux->class = &sch_mux_class; | |
638 | 7925 | mux->init = init; | |
639 | 7925 | mux->queue_size = thread_queue_size; | |
640 | |||
641 | 7925 | task_init(sch, &mux->task, SCH_NODE_TYPE_MUX, idx, func, arg); | |
642 | |||
643 | 7925 | sch->sdp_auto &= sdp_auto; | |
644 | |||
645 | 7925 | return idx; | |
646 | } | ||
647 | |||
648 | 8401 | int sch_add_mux_stream(Scheduler *sch, unsigned mux_idx) | |
649 | { | ||
650 | SchMux *mux; | ||
651 | SchMuxStream *ms; | ||
652 | unsigned stream_idx; | ||
653 | int ret; | ||
654 | |||
655 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | av_assert0(mux_idx < sch->nb_mux); |
656 | 8401 | mux = &sch->mux[mux_idx]; | |
657 | |||
658 | 8401 | ret = GROW_ARRAY(mux->streams, mux->nb_streams); | |
659 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | if (ret < 0) |
660 | ✗ | return ret; | |
661 | 8401 | stream_idx = mux->nb_streams - 1; | |
662 | |||
663 | 8401 | ms = &mux->streams[stream_idx]; | |
664 | |||
665 | 8401 | ms->pre_mux_queue.fifo = av_fifo_alloc2(8, sizeof(AVPacket*), 0); | |
666 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | if (!ms->pre_mux_queue.fifo) |
667 | ✗ | return AVERROR(ENOMEM); | |
668 | |||
669 | 8401 | ms->last_dts = AV_NOPTS_VALUE; | |
670 | |||
671 | 8401 | return stream_idx; | |
672 | } | ||
673 | |||
674 | static const AVClass sch_demux_class = { | ||
675 | .class_name = "SchDemux", | ||
676 | .version = LIBAVUTIL_VERSION_INT, | ||
677 | .parent_log_context_offset = offsetof(SchDemux, task.func_arg), | ||
678 | }; | ||
679 | |||
680 | 7172 | int sch_add_demux(Scheduler *sch, SchThreadFunc func, void *ctx) | |
681 | { | ||
682 | 7172 | const unsigned idx = sch->nb_demux; | |
683 | |||
684 | SchDemux *d; | ||
685 | int ret; | ||
686 | |||
687 | 7172 | ret = GROW_ARRAY(sch->demux, sch->nb_demux); | |
688 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7172 times.
|
7172 | if (ret < 0) |
689 | ✗ | return ret; | |
690 | |||
691 | 7172 | d = &sch->demux[idx]; | |
692 | |||
693 | 7172 | task_init(sch, &d->task, SCH_NODE_TYPE_DEMUX, idx, func, ctx); | |
694 | |||
695 | 7172 | d->class = &sch_demux_class; | |
696 | 7172 | d->send_pkt = av_packet_alloc(); | |
697 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7172 times.
|
7172 | if (!d->send_pkt) |
698 | ✗ | return AVERROR(ENOMEM); | |
699 | |||
700 | 7172 | ret = waiter_init(&d->waiter); | |
701 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7172 times.
|
7172 | if (ret < 0) |
702 | ✗ | return ret; | |
703 | |||
704 | 7172 | return idx; | |
705 | } | ||
706 | |||
707 | 7425 | int sch_add_demux_stream(Scheduler *sch, unsigned demux_idx) | |
708 | { | ||
709 | SchDemux *d; | ||
710 | int ret; | ||
711 | |||
712 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7425 times.
|
7425 | av_assert0(demux_idx < sch->nb_demux); |
713 | 7425 | d = &sch->demux[demux_idx]; | |
714 | |||
715 | 7425 | ret = GROW_ARRAY(d->streams, d->nb_streams); | |
716 |
1/2✓ Branch 0 taken 7425 times.
✗ Branch 1 not taken.
|
7425 | return ret < 0 ? ret : d->nb_streams - 1; |
717 | } | ||
718 | |||
719 | 6763 | int sch_add_dec_output(Scheduler *sch, unsigned dec_idx) | |
720 | { | ||
721 | SchDec *dec; | ||
722 | int ret; | ||
723 | |||
724 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6763 times.
|
6763 | av_assert0(dec_idx < sch->nb_dec); |
725 | 6763 | dec = &sch->dec[dec_idx]; | |
726 | |||
727 | 6763 | ret = GROW_ARRAY(dec->outputs, dec->nb_outputs); | |
728 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6763 times.
|
6763 | if (ret < 0) |
729 | ✗ | return ret; | |
730 | |||
731 | 6763 | return dec->nb_outputs - 1; | |
732 | } | ||
733 | |||
734 | static const AVClass sch_dec_class = { | ||
735 | .class_name = "SchDec", | ||
736 | .version = LIBAVUTIL_VERSION_INT, | ||
737 | .parent_log_context_offset = offsetof(SchDec, task.func_arg), | ||
738 | }; | ||
739 | |||
740 | 6757 | int sch_add_dec(Scheduler *sch, SchThreadFunc func, void *ctx, int send_end_ts) | |
741 | { | ||
742 | 6757 | const unsigned idx = sch->nb_dec; | |
743 | |||
744 | SchDec *dec; | ||
745 | int ret; | ||
746 | |||
747 | 6757 | ret = GROW_ARRAY(sch->dec, sch->nb_dec); | |
748 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | if (ret < 0) |
749 | ✗ | return ret; | |
750 | |||
751 | 6757 | dec = &sch->dec[idx]; | |
752 | |||
753 | 6757 | task_init(sch, &dec->task, SCH_NODE_TYPE_DEC, idx, func, ctx); | |
754 | |||
755 | 6757 | dec->class = &sch_dec_class; | |
756 | 6757 | dec->send_frame = av_frame_alloc(); | |
757 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | if (!dec->send_frame) |
758 | ✗ | return AVERROR(ENOMEM); | |
759 | |||
760 | 6757 | ret = sch_add_dec_output(sch, idx); | |
761 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | if (ret < 0) |
762 | ✗ | return ret; | |
763 | |||
764 | 6757 | ret = queue_alloc(&dec->queue, 1, 0, QUEUE_PACKETS); | |
765 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | if (ret < 0) |
766 | ✗ | return ret; | |
767 | |||
768 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 6756 times.
|
6757 | if (send_end_ts) { |
769 | 1 | ret = av_thread_message_queue_alloc(&dec->queue_end_ts, 1, sizeof(Timestamp)); | |
770 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
771 | ✗ | return ret; | |
772 | } | ||
773 | |||
774 | 6757 | return idx; | |
775 | } | ||
776 | |||
777 | static const AVClass sch_enc_class = { | ||
778 | .class_name = "SchEnc", | ||
779 | .version = LIBAVUTIL_VERSION_INT, | ||
780 | .parent_log_context_offset = offsetof(SchEnc, task.func_arg), | ||
781 | }; | ||
782 | |||
783 | 7709 | int sch_add_enc(Scheduler *sch, SchThreadFunc func, void *ctx, | |
784 | int (*open_cb)(void *opaque, const AVFrame *frame)) | ||
785 | { | ||
786 | 7709 | const unsigned idx = sch->nb_enc; | |
787 | |||
788 | SchEnc *enc; | ||
789 | int ret; | ||
790 | |||
791 | 7709 | ret = GROW_ARRAY(sch->enc, sch->nb_enc); | |
792 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (ret < 0) |
793 | ✗ | return ret; | |
794 | |||
795 | 7709 | enc = &sch->enc[idx]; | |
796 | |||
797 | 7709 | enc->class = &sch_enc_class; | |
798 | 7709 | enc->open_cb = open_cb; | |
799 | 7709 | enc->sq_idx[0] = -1; | |
800 | 7709 | enc->sq_idx[1] = -1; | |
801 | |||
802 | 7709 | task_init(sch, &enc->task, SCH_NODE_TYPE_ENC, idx, func, ctx); | |
803 | |||
804 | 7709 | enc->send_pkt = av_packet_alloc(); | |
805 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (!enc->send_pkt) |
806 | ✗ | return AVERROR(ENOMEM); | |
807 | |||
808 | 7709 | ret = queue_alloc(&enc->queue, 1, 0, QUEUE_FRAMES); | |
809 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (ret < 0) |
810 | ✗ | return ret; | |
811 | |||
812 | 7709 | return idx; | |
813 | } | ||
814 | |||
815 | static const AVClass sch_fg_class = { | ||
816 | .class_name = "SchFilterGraph", | ||
817 | .version = LIBAVUTIL_VERSION_INT, | ||
818 | .parent_log_context_offset = offsetof(SchFilterGraph, task.func_arg), | ||
819 | }; | ||
820 | |||
821 | 7541 | int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, | |
822 | SchThreadFunc func, void *ctx) | ||
823 | { | ||
824 | 7541 | const unsigned idx = sch->nb_filters; | |
825 | |||
826 | SchFilterGraph *fg; | ||
827 | int ret; | ||
828 | |||
829 | 7541 | ret = GROW_ARRAY(sch->filters, sch->nb_filters); | |
830 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7541 times.
|
7541 | if (ret < 0) |
831 | ✗ | return ret; | |
832 | 7541 | fg = &sch->filters[idx]; | |
833 | |||
834 | 7541 | fg->class = &sch_fg_class; | |
835 | |||
836 | 7541 | task_init(sch, &fg->task, SCH_NODE_TYPE_FILTER_IN, idx, func, ctx); | |
837 | |||
838 |
2/2✓ Branch 0 taken 6701 times.
✓ Branch 1 taken 840 times.
|
7541 | if (nb_inputs) { |
839 | 6701 | fg->inputs = av_calloc(nb_inputs, sizeof(*fg->inputs)); | |
840 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6701 times.
|
6701 | if (!fg->inputs) |
841 | ✗ | return AVERROR(ENOMEM); | |
842 | 6701 | fg->nb_inputs = nb_inputs; | |
843 | } | ||
844 | |||
845 |
1/2✓ Branch 0 taken 7541 times.
✗ Branch 1 not taken.
|
7541 | if (nb_outputs) { |
846 | 7541 | fg->outputs = av_calloc(nb_outputs, sizeof(*fg->outputs)); | |
847 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7541 times.
|
7541 | if (!fg->outputs) |
848 | ✗ | return AVERROR(ENOMEM); | |
849 | 7541 | fg->nb_outputs = nb_outputs; | |
850 | } | ||
851 | |||
852 | 7541 | ret = waiter_init(&fg->waiter); | |
853 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7541 times.
|
7541 | if (ret < 0) |
854 | ✗ | return ret; | |
855 | |||
856 | 7541 | ret = queue_alloc(&fg->queue, fg->nb_inputs + 1, 0, QUEUE_FRAMES); | |
857 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7541 times.
|
7541 | if (ret < 0) |
858 | ✗ | return ret; | |
859 | |||
860 | 7541 | return idx; | |
861 | } | ||
862 | |||
863 | 3069 | int sch_add_sq_enc(Scheduler *sch, uint64_t buf_size_us, void *logctx) | |
864 | { | ||
865 | SchSyncQueue *sq; | ||
866 | int ret; | ||
867 | |||
868 | 3069 | ret = GROW_ARRAY(sch->sq_enc, sch->nb_sq_enc); | |
869 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (ret < 0) |
870 | ✗ | return ret; | |
871 | 3069 | sq = &sch->sq_enc[sch->nb_sq_enc - 1]; | |
872 | |||
873 | 3069 | sq->sq = sq_alloc(SYNC_QUEUE_FRAMES, buf_size_us, logctx); | |
874 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (!sq->sq) |
875 | ✗ | return AVERROR(ENOMEM); | |
876 | |||
877 | 3069 | sq->frame = av_frame_alloc(); | |
878 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (!sq->frame) |
879 | ✗ | return AVERROR(ENOMEM); | |
880 | |||
881 | 3069 | ret = pthread_mutex_init(&sq->lock, NULL); | |
882 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3069 times.
|
3069 | if (ret) |
883 | ✗ | return AVERROR(ret); | |
884 | |||
885 | 3069 | return sq - sch->sq_enc; | |
886 | } | ||
887 | |||
888 | 3131 | int sch_sq_add_enc(Scheduler *sch, unsigned sq_idx, unsigned enc_idx, | |
889 | int limiting, uint64_t max_frames) | ||
890 | { | ||
891 | SchSyncQueue *sq; | ||
892 | SchEnc *enc; | ||
893 | int ret; | ||
894 | |||
895 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | av_assert0(sq_idx < sch->nb_sq_enc); |
896 | 3131 | sq = &sch->sq_enc[sq_idx]; | |
897 | |||
898 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | av_assert0(enc_idx < sch->nb_enc); |
899 | 3131 | enc = &sch->enc[enc_idx]; | |
900 | |||
901 | 3131 | ret = GROW_ARRAY(sq->enc_idx, sq->nb_enc_idx); | |
902 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | if (ret < 0) |
903 | ✗ | return ret; | |
904 | 3131 | sq->enc_idx[sq->nb_enc_idx - 1] = enc_idx; | |
905 | |||
906 | 3131 | ret = sq_add_stream(sq->sq, limiting); | |
907 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3131 times.
|
3131 | if (ret < 0) |
908 | ✗ | return ret; | |
909 | |||
910 | 3131 | enc->sq_idx[0] = sq_idx; | |
911 | 3131 | enc->sq_idx[1] = ret; | |
912 | |||
913 |
2/2✓ Branch 0 taken 2942 times.
✓ Branch 1 taken 189 times.
|
3131 | if (max_frames != INT64_MAX) |
914 | 2942 | sq_limit_frames(sq->sq, enc->sq_idx[1], max_frames); | |
915 | |||
916 | 3131 | return 0; | |
917 | } | ||
918 | |||
919 | 29653 | int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst) | |
920 | { | ||
921 | int ret; | ||
922 | |||
923 |
4/5✓ Branch 0 taken 7448 times.
✓ Branch 1 taken 6824 times.
✓ Branch 2 taken 7671 times.
✓ Branch 3 taken 7710 times.
✗ Branch 4 not taken.
|
29653 | switch (src.type) { |
924 | 7448 | case SCH_NODE_TYPE_DEMUX: { | |
925 | SchDemuxStream *ds; | ||
926 | |||
927 |
2/4✓ Branch 0 taken 7448 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7448 times.
|
7448 | av_assert0(src.idx < sch->nb_demux && |
928 | src.idx_stream < sch->demux[src.idx].nb_streams); | ||
929 | 7448 | ds = &sch->demux[src.idx].streams[src.idx_stream]; | |
930 | |||
931 | 7448 | ret = GROW_ARRAY(ds->dst, ds->nb_dst); | |
932 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7448 times.
|
7448 | if (ret < 0) |
933 | ✗ | return ret; | |
934 | |||
935 | 7448 | ds->dst[ds->nb_dst - 1] = dst; | |
936 | |||
937 | // demuxed packets go to decoding or streamcopy | ||
938 |
2/3✓ Branch 0 taken 6756 times.
✓ Branch 1 taken 692 times.
✗ Branch 2 not taken.
|
7448 | switch (dst.type) { |
939 | 6756 | case SCH_NODE_TYPE_DEC: { | |
940 | SchDec *dec; | ||
941 | |||
942 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6756 times.
|
6756 | av_assert0(dst.idx < sch->nb_dec); |
943 | 6756 | dec = &sch->dec[dst.idx]; | |
944 | |||
945 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6756 times.
|
6756 | av_assert0(!dec->src.type); |
946 | 6756 | dec->src = src; | |
947 | 6756 | break; | |
948 | } | ||
949 | 692 | case SCH_NODE_TYPE_MUX: { | |
950 | SchMuxStream *ms; | ||
951 | |||
952 |
2/4✓ Branch 0 taken 692 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 692 times.
|
692 | av_assert0(dst.idx < sch->nb_mux && |
953 | dst.idx_stream < sch->mux[dst.idx].nb_streams); | ||
954 | 692 | ms = &sch->mux[dst.idx].streams[dst.idx_stream]; | |
955 | |||
956 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 692 times.
|
692 | av_assert0(!ms->src.type); |
957 | 692 | ms->src = src; | |
958 | |||
959 | 692 | break; | |
960 | } | ||
961 | ✗ | default: av_assert0(0); | |
962 | } | ||
963 | |||
964 | 7448 | break; | |
965 | } | ||
966 | 6824 | case SCH_NODE_TYPE_DEC: { | |
967 | SchDec *dec; | ||
968 | SchDecOutput *o; | ||
969 | |||
970 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6824 times.
|
6824 | av_assert0(src.idx < sch->nb_dec); |
971 | 6824 | dec = &sch->dec[src.idx]; | |
972 | |||
973 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6824 times.
|
6824 | av_assert0(src.idx_stream < dec->nb_outputs); |
974 | 6824 | o = &dec->outputs[src.idx_stream]; | |
975 | |||
976 | 6824 | ret = GROW_ARRAY(o->dst, o->nb_dst); | |
977 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6824 times.
|
6824 | if (ret < 0) |
978 | ✗ | return ret; | |
979 | |||
980 | 6824 | o->dst[o->nb_dst - 1] = dst; | |
981 | |||
982 | // decoded frames go to filters or encoding | ||
983 |
2/3✓ Branch 0 taken 6786 times.
✓ Branch 1 taken 38 times.
✗ Branch 2 not taken.
|
6824 | switch (dst.type) { |
984 | 6786 | case SCH_NODE_TYPE_FILTER_IN: { | |
985 | SchFilterIn *fi; | ||
986 | |||
987 |
2/4✓ Branch 0 taken 6786 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 6786 times.
|
6786 | av_assert0(dst.idx < sch->nb_filters && |
988 | dst.idx_stream < sch->filters[dst.idx].nb_inputs); | ||
989 | 6786 | fi = &sch->filters[dst.idx].inputs[dst.idx_stream]; | |
990 | |||
991 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6786 times.
|
6786 | av_assert0(!fi->src.type); |
992 | 6786 | fi->src = src; | |
993 | 6786 | break; | |
994 | } | ||
995 | 38 | case SCH_NODE_TYPE_ENC: { | |
996 | SchEnc *enc; | ||
997 | |||
998 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
|
38 | av_assert0(dst.idx < sch->nb_enc); |
999 | 38 | enc = &sch->enc[dst.idx]; | |
1000 | |||
1001 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
|
38 | av_assert0(!enc->src.type); |
1002 | 38 | enc->src = src; | |
1003 | 38 | break; | |
1004 | } | ||
1005 | ✗ | default: av_assert0(0); | |
1006 | } | ||
1007 | |||
1008 | 6824 | break; | |
1009 | } | ||
1010 | 7671 | case SCH_NODE_TYPE_FILTER_OUT: { | |
1011 | SchFilterOut *fo; | ||
1012 | |||
1013 |
2/4✓ Branch 0 taken 7671 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7671 times.
|
7671 | av_assert0(src.idx < sch->nb_filters && |
1014 | src.idx_stream < sch->filters[src.idx].nb_outputs); | ||
1015 | 7671 | fo = &sch->filters[src.idx].outputs[src.idx_stream]; | |
1016 | |||
1017 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | av_assert0(!fo->dst.type); |
1018 | 7671 | fo->dst = dst; | |
1019 | |||
1020 | // filtered frames go to encoding or another filtergraph | ||
1021 |
1/3✓ Branch 0 taken 7671 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
|
7671 | switch (dst.type) { |
1022 | 7671 | case SCH_NODE_TYPE_ENC: { | |
1023 | SchEnc *enc; | ||
1024 | |||
1025 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | av_assert0(dst.idx < sch->nb_enc); |
1026 | 7671 | enc = &sch->enc[dst.idx]; | |
1027 | |||
1028 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | av_assert0(!enc->src.type); |
1029 | 7671 | enc->src = src; | |
1030 | 7671 | break; | |
1031 | } | ||
1032 | ✗ | case SCH_NODE_TYPE_FILTER_IN: { | |
1033 | SchFilterIn *fi; | ||
1034 | |||
1035 | ✗ | av_assert0(dst.idx < sch->nb_filters && | |
1036 | dst.idx_stream < sch->filters[dst.idx].nb_inputs); | ||
1037 | ✗ | fi = &sch->filters[dst.idx].inputs[dst.idx_stream]; | |
1038 | |||
1039 | ✗ | av_assert0(!fi->src.type); | |
1040 | ✗ | fi->src = src; | |
1041 | ✗ | break; | |
1042 | } | ||
1043 | ✗ | default: av_assert0(0); | |
1044 | } | ||
1045 | |||
1046 | |||
1047 | 7671 | break; | |
1048 | } | ||
1049 | 7710 | case SCH_NODE_TYPE_ENC: { | |
1050 | SchEnc *enc; | ||
1051 | |||
1052 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | av_assert0(src.idx < sch->nb_enc); |
1053 | 7710 | enc = &sch->enc[src.idx]; | |
1054 | |||
1055 | 7710 | ret = GROW_ARRAY(enc->dst, enc->nb_dst); | |
1056 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7710 times.
|
7710 | if (ret < 0) |
1057 | ✗ | return ret; | |
1058 | |||
1059 | 7710 | enc->dst[enc->nb_dst - 1] = dst; | |
1060 | |||
1061 | // encoding packets go to muxing or decoding | ||
1062 |
2/3✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
7710 | switch (dst.type) { |
1063 | 7709 | case SCH_NODE_TYPE_MUX: { | |
1064 | SchMuxStream *ms; | ||
1065 | |||
1066 |
2/4✓ Branch 0 taken 7709 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7709 times.
|
7709 | av_assert0(dst.idx < sch->nb_mux && |
1067 | dst.idx_stream < sch->mux[dst.idx].nb_streams); | ||
1068 | 7709 | ms = &sch->mux[dst.idx].streams[dst.idx_stream]; | |
1069 | |||
1070 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | av_assert0(!ms->src.type); |
1071 | 7709 | ms->src = src; | |
1072 | |||
1073 | 7709 | break; | |
1074 | } | ||
1075 | 1 | case SCH_NODE_TYPE_DEC: { | |
1076 | SchDec *dec; | ||
1077 | |||
1078 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(dst.idx < sch->nb_dec); |
1079 | 1 | dec = &sch->dec[dst.idx]; | |
1080 | |||
1081 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(!dec->src.type); |
1082 | 1 | dec->src = src; | |
1083 | |||
1084 | 1 | break; | |
1085 | } | ||
1086 | ✗ | default: av_assert0(0); | |
1087 | } | ||
1088 | |||
1089 | 7710 | break; | |
1090 | } | ||
1091 | ✗ | default: av_assert0(0); | |
1092 | } | ||
1093 | |||
1094 | 29653 | return 0; | |
1095 | } | ||
1096 | |||
1097 | 7925 | static int mux_task_start(SchMux *mux) | |
1098 | { | ||
1099 | 7925 | int ret = 0; | |
1100 | |||
1101 | 7925 | ret = task_start(&mux->task); | |
1102 |
1/2✓ Branch 0 taken 7925 times.
✗ Branch 1 not taken.
|
7925 | if (ret < 0) |
1103 | ✗ | return ret; | |
1104 | |||
1105 | /* flush the pre-muxing queues */ | ||
1106 | 3421 | while (1) { | |
1107 | 11346 | int min_stream = -1; | |
1108 | 11346 | Timestamp min_ts = { .ts = AV_NOPTS_VALUE }; | |
1109 | |||
1110 | AVPacket *pkt; | ||
1111 | |||
1112 | // find the stream with the earliest dts or EOF in pre-muxing queue | ||
1113 |
2/2✓ Branch 0 taken 22042 times.
✓ Branch 1 taken 11236 times.
|
33278 | for (unsigned i = 0; i < mux->nb_streams; i++) { |
1114 | 22042 | SchMuxStream *ms = &mux->streams[i]; | |
1115 | |||
1116 |
2/2✓ Branch 1 taken 16307 times.
✓ Branch 2 taken 5735 times.
|
22042 | if (av_fifo_peek(ms->pre_mux_queue.fifo, &pkt, 1, 0) < 0) |
1117 | 16307 | continue; | |
1118 | |||
1119 |
4/4✓ Branch 0 taken 5638 times.
✓ Branch 1 taken 97 times.
✓ Branch 2 taken 13 times.
✓ Branch 3 taken 5625 times.
|
5735 | if (!pkt || pkt->dts == AV_NOPTS_VALUE) { |
1120 | 110 | min_stream = i; | |
1121 | 110 | break; | |
1122 | } | ||
1123 | |||
1124 |
4/4✓ Branch 0 taken 2287 times.
✓ Branch 1 taken 3338 times.
✓ Branch 2 taken 48 times.
✓ Branch 3 taken 2239 times.
|
7912 | if (min_ts.ts == AV_NOPTS_VALUE || |
1125 | 2287 | av_compare_ts(min_ts.ts, min_ts.tb, pkt->dts, pkt->time_base) > 0) { | |
1126 | 3386 | min_stream = i; | |
1127 | 3386 | min_ts = (Timestamp){ .ts = pkt->dts, .tb = pkt->time_base }; | |
1128 | } | ||
1129 | } | ||
1130 | |||
1131 |
2/2✓ Branch 0 taken 3421 times.
✓ Branch 1 taken 7925 times.
|
11346 | if (min_stream >= 0) { |
1132 | 3421 | SchMuxStream *ms = &mux->streams[min_stream]; | |
1133 | |||
1134 | 3421 | ret = av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1); | |
1135 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3421 times.
|
3421 | av_assert0(ret >= 0); |
1136 | |||
1137 |
2/2✓ Branch 0 taken 3324 times.
✓ Branch 1 taken 97 times.
|
3421 | if (pkt) { |
1138 |
2/2✓ Branch 0 taken 3307 times.
✓ Branch 1 taken 17 times.
|
3324 | if (!ms->init_eof) |
1139 | 3307 | ret = tq_send(mux->queue, min_stream, pkt); | |
1140 | 3324 | av_packet_free(&pkt); | |
1141 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 3323 times.
|
3324 | if (ret == AVERROR_EOF) |
1142 | 1 | ms->init_eof = 1; | |
1143 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3323 times.
|
3323 | else if (ret < 0) |
1144 | ✗ | return ret; | |
1145 | } else | ||
1146 | 97 | tq_send_finish(mux->queue, min_stream); | |
1147 | |||
1148 | 3421 | continue; | |
1149 | } | ||
1150 | |||
1151 | 7925 | break; | |
1152 | } | ||
1153 | |||
1154 | 7925 | atomic_store(&mux->mux_started, 1); | |
1155 | |||
1156 | 7925 | return 0; | |
1157 | } | ||
1158 | |||
1159 | int print_sdp(const char *filename); | ||
1160 | |||
1161 | 7925 | static int mux_init(Scheduler *sch, SchMux *mux) | |
1162 | { | ||
1163 | int ret; | ||
1164 | |||
1165 | 7925 | ret = mux->init(mux->task.func_arg); | |
1166 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret < 0) |
1167 | ✗ | return ret; | |
1168 | |||
1169 | 7925 | sch->nb_mux_ready++; | |
1170 | |||
1171 |
2/4✓ Branch 0 taken 7925 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7925 times.
|
7925 | if (sch->sdp_filename || sch->sdp_auto) { |
1172 | ✗ | if (sch->nb_mux_ready < sch->nb_mux) | |
1173 | ✗ | return 0; | |
1174 | |||
1175 | ✗ | ret = print_sdp(sch->sdp_filename); | |
1176 | ✗ | if (ret < 0) { | |
1177 | ✗ | av_log(sch, AV_LOG_ERROR, "Error writing the SDP.\n"); | |
1178 | ✗ | return ret; | |
1179 | } | ||
1180 | |||
1181 | /* SDP is written only after all the muxers are ready, so now we | ||
1182 | * start ALL the threads */ | ||
1183 | ✗ | for (unsigned i = 0; i < sch->nb_mux; i++) { | |
1184 | ✗ | ret = mux_task_start(&sch->mux[i]); | |
1185 | ✗ | if (ret < 0) | |
1186 | ✗ | return ret; | |
1187 | } | ||
1188 | } else { | ||
1189 | 7925 | ret = mux_task_start(mux); | |
1190 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret < 0) |
1191 | ✗ | return ret; | |
1192 | } | ||
1193 | |||
1194 | 7925 | return 0; | |
1195 | } | ||
1196 | |||
1197 | 8401 | void sch_mux_stream_buffering(Scheduler *sch, unsigned mux_idx, unsigned stream_idx, | |
1198 | size_t data_threshold, int max_packets) | ||
1199 | { | ||
1200 | SchMux *mux; | ||
1201 | SchMuxStream *ms; | ||
1202 | |||
1203 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | av_assert0(mux_idx < sch->nb_mux); |
1204 | 8401 | mux = &sch->mux[mux_idx]; | |
1205 | |||
1206 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | av_assert0(stream_idx < mux->nb_streams); |
1207 | 8401 | ms = &mux->streams[stream_idx]; | |
1208 | |||
1209 | 8401 | ms->pre_mux_queue.max_packets = max_packets; | |
1210 | 8401 | ms->pre_mux_queue.data_threshold = data_threshold; | |
1211 | 8401 | } | |
1212 | |||
1213 | 8401 | int sch_mux_stream_ready(Scheduler *sch, unsigned mux_idx, unsigned stream_idx) | |
1214 | { | ||
1215 | SchMux *mux; | ||
1216 | 8401 | int ret = 0; | |
1217 | |||
1218 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | av_assert0(mux_idx < sch->nb_mux); |
1219 | 8401 | mux = &sch->mux[mux_idx]; | |
1220 | |||
1221 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | av_assert0(stream_idx < mux->nb_streams); |
1222 | |||
1223 | 8401 | pthread_mutex_lock(&sch->mux_ready_lock); | |
1224 | |||
1225 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8401 times.
|
8401 | av_assert0(mux->nb_streams_ready < mux->nb_streams); |
1226 | |||
1227 | // this may be called during initialization - do not start | ||
1228 | // threads before sch_start() is called | ||
1229 |
2/2✓ Branch 0 taken 7925 times.
✓ Branch 1 taken 476 times.
|
8401 | if (++mux->nb_streams_ready == mux->nb_streams && |
1230 |
2/2✓ Branch 0 taken 7466 times.
✓ Branch 1 taken 459 times.
|
7925 | sch->state >= SCH_STATE_STARTED) |
1231 | 7466 | ret = mux_init(sch, mux); | |
1232 | |||
1233 | 8401 | pthread_mutex_unlock(&sch->mux_ready_lock); | |
1234 | |||
1235 | 8401 | return ret; | |
1236 | } | ||
1237 | |||
1238 | 1 | int sch_mux_sub_heartbeat_add(Scheduler *sch, unsigned mux_idx, unsigned stream_idx, | |
1239 | unsigned dec_idx) | ||
1240 | { | ||
1241 | SchMux *mux; | ||
1242 | SchMuxStream *ms; | ||
1243 | 1 | int ret = 0; | |
1244 | |||
1245 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(mux_idx < sch->nb_mux); |
1246 | 1 | mux = &sch->mux[mux_idx]; | |
1247 | |||
1248 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(stream_idx < mux->nb_streams); |
1249 | 1 | ms = &mux->streams[stream_idx]; | |
1250 | |||
1251 | 1 | ret = GROW_ARRAY(ms->sub_heartbeat_dst, ms->nb_sub_heartbeat_dst); | |
1252 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
1253 | ✗ | return ret; | |
1254 | |||
1255 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(dec_idx < sch->nb_dec); |
1256 | 1 | ms->sub_heartbeat_dst[ms->nb_sub_heartbeat_dst - 1] = dec_idx; | |
1257 | |||
1258 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (!mux->sub_heartbeat_pkt) { |
1259 | 1 | mux->sub_heartbeat_pkt = av_packet_alloc(); | |
1260 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!mux->sub_heartbeat_pkt) |
1261 | ✗ | return AVERROR(ENOMEM); | |
1262 | } | ||
1263 | |||
1264 | 1 | return 0; | |
1265 | } | ||
1266 | |||
1267 | 530588 | static void unchoke_for_stream(Scheduler *sch, SchedulerNode src) | |
1268 | { | ||
1269 | 428821 | while (1) { | |
1270 | SchFilterGraph *fg; | ||
1271 | |||
1272 | // fed directly by a demuxer (i.e. not through a filtergraph) | ||
1273 |
2/2✓ Branch 0 taken 506026 times.
✓ Branch 1 taken 453383 times.
|
959409 | if (src.type == SCH_NODE_TYPE_DEMUX) { |
1274 | 506026 | sch->demux[src.idx].waiter.choked_next = 0; | |
1275 | 506026 | return; | |
1276 | } | ||
1277 | |||
1278 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 453383 times.
|
453383 | av_assert0(src.type == SCH_NODE_TYPE_FILTER_OUT); |
1279 | 453383 | fg = &sch->filters[src.idx]; | |
1280 | |||
1281 | // the filtergraph contains internal sources and | ||
1282 | // requested to be scheduled directly | ||
1283 |
2/2✓ Branch 0 taken 24562 times.
✓ Branch 1 taken 428821 times.
|
453383 | if (fg->best_input == fg->nb_inputs) { |
1284 | 24562 | fg->waiter.choked_next = 0; | |
1285 | 24562 | return; | |
1286 | } | ||
1287 | |||
1288 | 428821 | src = fg->inputs[fg->best_input].src_sched; | |
1289 | } | ||
1290 | } | ||
1291 | |||
1292 | 534119 | static void schedule_update_locked(Scheduler *sch) | |
1293 | { | ||
1294 | int64_t dts; | ||
1295 | 534119 | int have_unchoked = 0; | |
1296 | |||
1297 | // on termination request all waiters are choked, | ||
1298 | // we are not to unchoke them | ||
1299 |
2/2✓ Branch 0 taken 4104 times.
✓ Branch 1 taken 530015 times.
|
534119 | if (atomic_load(&sch->terminate)) |
1300 | 4104 | return; | |
1301 | |||
1302 | 530015 | dts = trailing_dts(sch, 0); | |
1303 | |||
1304 | 530015 | atomic_store(&sch->last_dts, dts); | |
1305 | |||
1306 | // initialize our internal state | ||
1307 |
2/2✓ Branch 0 taken 1060030 times.
✓ Branch 1 taken 530015 times.
|
1590045 | for (unsigned type = 0; type < 2; type++) |
1308 |
4/4✓ Branch 0 taken 1007695 times.
✓ Branch 1 taken 1042104 times.
✓ Branch 2 taken 989769 times.
✓ Branch 3 taken 1060030 times.
|
2049799 | for (unsigned i = 0; i < (type ? sch->nb_filters : sch->nb_demux); i++) { |
1309 |
2/2✓ Branch 0 taken 477680 times.
✓ Branch 1 taken 512089 times.
|
989769 | SchWaiter *w = type ? &sch->filters[i].waiter : &sch->demux[i].waiter; |
1310 | 989769 | w->choked_prev = atomic_load(&w->choked); | |
1311 | 989769 | w->choked_next = 1; | |
1312 | } | ||
1313 | |||
1314 | // figure out the sources that are allowed to proceed | ||
1315 |
2/2✓ Branch 0 taken 530283 times.
✓ Branch 1 taken 530015 times.
|
1060298 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
1316 | 530283 | SchMux *mux = &sch->mux[i]; | |
1317 | |||
1318 |
2/2✓ Branch 0 taken 596333 times.
✓ Branch 1 taken 530283 times.
|
1126616 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
1319 | 596333 | SchMuxStream *ms = &mux->streams[j]; | |
1320 | |||
1321 | // unblock sources for output streams that are not finished | ||
1322 | // and not too far ahead of the trailing stream | ||
1323 |
2/2✓ Branch 0 taken 37306 times.
✓ Branch 1 taken 559027 times.
|
596333 | if (ms->source_finished) |
1324 | 37306 | continue; | |
1325 |
4/4✓ Branch 0 taken 30578 times.
✓ Branch 1 taken 528449 times.
✓ Branch 2 taken 10190 times.
✓ Branch 3 taken 20388 times.
|
559027 | if (dts == AV_NOPTS_VALUE && ms->last_dts != AV_NOPTS_VALUE) |
1326 | 10190 | continue; | |
1327 |
4/4✓ Branch 0 taken 528449 times.
✓ Branch 1 taken 20388 times.
✓ Branch 2 taken 18249 times.
✓ Branch 3 taken 510200 times.
|
548837 | if (dts != AV_NOPTS_VALUE && ms->last_dts - dts >= SCHEDULE_TOLERANCE) |
1328 | 18249 | continue; | |
1329 | |||
1330 | // resolve the source to unchoke | ||
1331 | 530588 | unchoke_for_stream(sch, ms->src_sched); | |
1332 | 530588 | have_unchoked = 1; | |
1333 | } | ||
1334 | } | ||
1335 | |||
1336 | // make sure to unchoke at least one source, if still available | ||
1337 |
4/4✓ Branch 0 taken 61538 times.
✓ Branch 1 taken 516273 times.
✓ Branch 2 taken 47796 times.
✓ Branch 3 taken 13742 times.
|
577811 | for (unsigned type = 0; !have_unchoked && type < 2; type++) |
1338 |
4/4✓ Branch 0 taken 32776 times.
✓ Branch 1 taken 46589 times.
✓ Branch 2 taken 45963 times.
✓ Branch 3 taken 33402 times.
|
79365 | for (unsigned i = 0; i < (type ? sch->nb_filters : sch->nb_demux); i++) { |
1339 |
2/2✓ Branch 0 taken 19034 times.
✓ Branch 1 taken 26929 times.
|
45963 | int exited = type ? sch->filters[i].task_exited : sch->demux[i].task_exited; |
1340 |
2/2✓ Branch 0 taken 19034 times.
✓ Branch 1 taken 26929 times.
|
45963 | SchWaiter *w = type ? &sch->filters[i].waiter : &sch->demux[i].waiter; |
1341 |
2/2✓ Branch 0 taken 14394 times.
✓ Branch 1 taken 31569 times.
|
45963 | if (!exited) { |
1342 | 14394 | w->choked_next = 0; | |
1343 | 14394 | have_unchoked = 1; | |
1344 | 14394 | break; | |
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | |||
1349 |
2/2✓ Branch 0 taken 1060030 times.
✓ Branch 1 taken 530015 times.
|
1590045 | for (unsigned type = 0; type < 2; type++) |
1350 |
4/4✓ Branch 0 taken 1007695 times.
✓ Branch 1 taken 1042104 times.
✓ Branch 2 taken 989769 times.
✓ Branch 3 taken 1060030 times.
|
2049799 | for (unsigned i = 0; i < (type ? sch->nb_filters : sch->nb_demux); i++) { |
1351 |
2/2✓ Branch 0 taken 477680 times.
✓ Branch 1 taken 512089 times.
|
989769 | SchWaiter *w = type ? &sch->filters[i].waiter : &sch->demux[i].waiter; |
1352 |
2/2✓ Branch 0 taken 18186 times.
✓ Branch 1 taken 971583 times.
|
989769 | if (w->choked_prev != w->choked_next) |
1353 | 18186 | waiter_set(w, w->choked_next); | |
1354 | } | ||
1355 | |||
1356 | } | ||
1357 | |||
1358 | enum { | ||
1359 | CYCLE_NODE_NEW = 0, | ||
1360 | CYCLE_NODE_STARTED, | ||
1361 | CYCLE_NODE_DONE, | ||
1362 | }; | ||
1363 | |||
1364 | static int | ||
1365 | 7541 | check_acyclic_for_output(const Scheduler *sch, SchedulerNode src, | |
1366 | uint8_t *filters_visited, SchedulerNode *filters_stack) | ||
1367 | { | ||
1368 | 7541 | unsigned nb_filters_stack = 0; | |
1369 | |||
1370 | 7541 | memset(filters_visited, 0, sch->nb_filters * sizeof(*filters_visited)); | |
1371 | |||
1372 | 6788 | while (1) { | |
1373 | 14329 | const SchFilterGraph *fg = &sch->filters[src.idx]; | |
1374 | |||
1375 | 14329 | filters_visited[src.idx] = CYCLE_NODE_STARTED; | |
1376 | |||
1377 | // descend into every input, depth first | ||
1378 |
2/2✓ Branch 0 taken 6787 times.
✓ Branch 1 taken 7542 times.
|
14329 | if (src.idx_stream < fg->nb_inputs) { |
1379 | 6787 | const SchFilterIn *fi = &fg->inputs[src.idx_stream++]; | |
1380 | |||
1381 | // connected to demuxer, no cycles possible | ||
1382 |
2/2✓ Branch 0 taken 6786 times.
✓ Branch 1 taken 1 times.
|
6787 | if (fi->src_sched.type == SCH_NODE_TYPE_DEMUX) |
1383 | 6787 | continue; | |
1384 | |||
1385 | // otherwise connected to another filtergraph | ||
1386 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(fi->src_sched.type == SCH_NODE_TYPE_FILTER_OUT); |
1387 | |||
1388 | // found a cycle | ||
1389 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (filters_visited[fi->src_sched.idx] == CYCLE_NODE_STARTED) |
1390 | ✗ | return AVERROR(EINVAL); | |
1391 | |||
1392 | // place current position on stack and descend | ||
1393 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(nb_filters_stack < sch->nb_filters); |
1394 | 1 | filters_stack[nb_filters_stack++] = src; | |
1395 | 1 | src = (SchedulerNode){ .idx = fi->src_sched.idx, .idx_stream = 0 }; | |
1396 | 1 | continue; | |
1397 | } | ||
1398 | |||
1399 | 7542 | filters_visited[src.idx] = CYCLE_NODE_DONE; | |
1400 | |||
1401 | // previous search finished, | ||
1402 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 7541 times.
|
7542 | if (nb_filters_stack) { |
1403 | 1 | src = filters_stack[--nb_filters_stack]; | |
1404 | 1 | continue; | |
1405 | } | ||
1406 | 7541 | return 0; | |
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | 7923 | static int check_acyclic(Scheduler *sch) | |
1411 | { | ||
1412 | 7923 | uint8_t *filters_visited = NULL; | |
1413 | 7923 | SchedulerNode *filters_stack = NULL; | |
1414 | |||
1415 | 7923 | int ret = 0; | |
1416 | |||
1417 |
2/2✓ Branch 0 taken 494 times.
✓ Branch 1 taken 7429 times.
|
7923 | if (!sch->nb_filters) |
1418 | 494 | return 0; | |
1419 | |||
1420 | 7429 | filters_visited = av_malloc_array(sch->nb_filters, sizeof(*filters_visited)); | |
1421 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7429 times.
|
7429 | if (!filters_visited) |
1422 | ✗ | return AVERROR(ENOMEM); | |
1423 | |||
1424 | 7429 | filters_stack = av_malloc_array(sch->nb_filters, sizeof(*filters_stack)); | |
1425 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7429 times.
|
7429 | if (!filters_stack) { |
1426 | ✗ | ret = AVERROR(ENOMEM); | |
1427 | ✗ | goto fail; | |
1428 | } | ||
1429 | |||
1430 | // trace the transcoding graph upstream from every filtegraph | ||
1431 |
2/2✓ Branch 0 taken 7541 times.
✓ Branch 1 taken 7429 times.
|
14970 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
1432 | 7541 | ret = check_acyclic_for_output(sch, (SchedulerNode){ .idx = i }, | |
1433 | filters_visited, filters_stack); | ||
1434 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7541 times.
|
7541 | if (ret < 0) { |
1435 | ✗ | av_log(&sch->filters[i], AV_LOG_ERROR, "Transcoding graph has a cycle\n"); | |
1436 | ✗ | goto fail; | |
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | 7429 | fail: | |
1441 | 7429 | av_freep(&filters_visited); | |
1442 | 7429 | av_freep(&filters_stack); | |
1443 | 7429 | return ret; | |
1444 | } | ||
1445 | |||
1446 | 7923 | static int start_prepare(Scheduler *sch) | |
1447 | { | ||
1448 | int ret; | ||
1449 | |||
1450 |
2/2✓ Branch 0 taken 7172 times.
✓ Branch 1 taken 7923 times.
|
15095 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
1451 | 7172 | SchDemux *d = &sch->demux[i]; | |
1452 | |||
1453 |
2/2✓ Branch 0 taken 7425 times.
✓ Branch 1 taken 7172 times.
|
14597 | for (unsigned j = 0; j < d->nb_streams; j++) { |
1454 | 7425 | SchDemuxStream *ds = &d->streams[j]; | |
1455 | |||
1456 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7425 times.
|
7425 | if (!ds->nb_dst) { |
1457 | ✗ | av_log(d, AV_LOG_ERROR, | |
1458 | "Demuxer stream %u not connected to any sink\n", j); | ||
1459 | ✗ | return AVERROR(EINVAL); | |
1460 | } | ||
1461 | |||
1462 | 7425 | ds->dst_finished = av_calloc(ds->nb_dst, sizeof(*ds->dst_finished)); | |
1463 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7425 times.
|
7425 | if (!ds->dst_finished) |
1464 | ✗ | return AVERROR(ENOMEM); | |
1465 | } | ||
1466 | } | ||
1467 | |||
1468 |
2/2✓ Branch 0 taken 6757 times.
✓ Branch 1 taken 7923 times.
|
14680 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
1469 | 6757 | SchDec *dec = &sch->dec[i]; | |
1470 | |||
1471 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | if (!dec->src.type) { |
1472 | ✗ | av_log(dec, AV_LOG_ERROR, | |
1473 | "Decoder not connected to a source\n"); | ||
1474 | ✗ | return AVERROR(EINVAL); | |
1475 | } | ||
1476 | |||
1477 |
2/2✓ Branch 0 taken 6763 times.
✓ Branch 1 taken 6757 times.
|
13520 | for (unsigned j = 0; j < dec->nb_outputs; j++) { |
1478 | 6763 | SchDecOutput *o = &dec->outputs[j]; | |
1479 | |||
1480 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6763 times.
|
6763 | if (!o->nb_dst) { |
1481 | ✗ | av_log(dec, AV_LOG_ERROR, | |
1482 | "Decoder output %u not connected to any sink\n", j); | ||
1483 | ✗ | return AVERROR(EINVAL); | |
1484 | } | ||
1485 | |||
1486 | 6763 | o->dst_finished = av_calloc(o->nb_dst, sizeof(*o->dst_finished)); | |
1487 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6763 times.
|
6763 | if (!o->dst_finished) |
1488 | ✗ | return AVERROR(ENOMEM); | |
1489 | } | ||
1490 | } | ||
1491 | |||
1492 |
2/2✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 7923 times.
|
15632 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
1493 | 7709 | SchEnc *enc = &sch->enc[i]; | |
1494 | |||
1495 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (!enc->src.type) { |
1496 | ✗ | av_log(enc, AV_LOG_ERROR, | |
1497 | "Encoder not connected to a source\n"); | ||
1498 | ✗ | return AVERROR(EINVAL); | |
1499 | } | ||
1500 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (!enc->nb_dst) { |
1501 | ✗ | av_log(enc, AV_LOG_ERROR, | |
1502 | "Encoder not connected to any sink\n"); | ||
1503 | ✗ | return AVERROR(EINVAL); | |
1504 | } | ||
1505 | |||
1506 | 7709 | enc->dst_finished = av_calloc(enc->nb_dst, sizeof(*enc->dst_finished)); | |
1507 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (!enc->dst_finished) |
1508 | ✗ | return AVERROR(ENOMEM); | |
1509 | } | ||
1510 | |||
1511 |
2/2✓ Branch 0 taken 7925 times.
✓ Branch 1 taken 7923 times.
|
15848 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
1512 | 7925 | SchMux *mux = &sch->mux[i]; | |
1513 | |||
1514 |
2/2✓ Branch 0 taken 8401 times.
✓ Branch 1 taken 7925 times.
|
16326 | for (unsigned j = 0; j < mux->nb_streams; j++) { |
1515 | 8401 | SchMuxStream *ms = &mux->streams[j]; | |
1516 | |||
1517 |
2/3✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 692 times.
✗ Branch 2 not taken.
|
8401 | switch (ms->src.type) { |
1518 | 7709 | case SCH_NODE_TYPE_ENC: { | |
1519 | 7709 | SchEnc *enc = &sch->enc[ms->src.idx]; | |
1520 |
2/2✓ Branch 0 taken 38 times.
✓ Branch 1 taken 7671 times.
|
7709 | if (enc->src.type == SCH_NODE_TYPE_DEC) { |
1521 | 38 | ms->src_sched = sch->dec[enc->src.idx].src; | |
1522 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
|
38 | av_assert0(ms->src_sched.type == SCH_NODE_TYPE_DEMUX); |
1523 | } else { | ||
1524 | 7671 | ms->src_sched = enc->src; | |
1525 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | av_assert0(ms->src_sched.type == SCH_NODE_TYPE_FILTER_OUT); |
1526 | } | ||
1527 | 7709 | break; | |
1528 | } | ||
1529 | 692 | case SCH_NODE_TYPE_DEMUX: | |
1530 | 692 | ms->src_sched = ms->src; | |
1531 | 692 | break; | |
1532 | ✗ | default: | |
1533 | ✗ | av_log(mux, AV_LOG_ERROR, | |
1534 | "Muxer stream #%u not connected to a source\n", j); | ||
1535 | ✗ | return AVERROR(EINVAL); | |
1536 | } | ||
1537 | } | ||
1538 | |||
1539 | 7925 | ret = queue_alloc(&mux->queue, mux->nb_streams, mux->queue_size, | |
1540 | QUEUE_PACKETS); | ||
1541 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | if (ret < 0) |
1542 | ✗ | return ret; | |
1543 | } | ||
1544 | |||
1545 |
2/2✓ Branch 0 taken 7541 times.
✓ Branch 1 taken 7923 times.
|
15464 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
1546 | 7541 | SchFilterGraph *fg = &sch->filters[i]; | |
1547 | |||
1548 |
2/2✓ Branch 0 taken 6786 times.
✓ Branch 1 taken 7541 times.
|
14327 | for (unsigned j = 0; j < fg->nb_inputs; j++) { |
1549 | 6786 | SchFilterIn *fi = &fg->inputs[j]; | |
1550 | SchDec *dec; | ||
1551 | |||
1552 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6786 times.
|
6786 | if (!fi->src.type) { |
1553 | ✗ | av_log(fg, AV_LOG_ERROR, | |
1554 | "Filtergraph input %u not connected to a source\n", j); | ||
1555 | ✗ | return AVERROR(EINVAL); | |
1556 | } | ||
1557 | |||
1558 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6786 times.
|
6786 | if (fi->src.type == SCH_NODE_TYPE_FILTER_OUT) |
1559 | ✗ | fi->src_sched = fi->src; | |
1560 | else { | ||
1561 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6786 times.
|
6786 | av_assert0(fi->src.type == SCH_NODE_TYPE_DEC); |
1562 | 6786 | dec = &sch->dec[fi->src.idx]; | |
1563 | |||
1564 |
2/3✓ Branch 0 taken 6785 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
6786 | switch (dec->src.type) { |
1565 | 6785 | case SCH_NODE_TYPE_DEMUX: fi->src_sched = dec->src; break; | |
1566 | 1 | case SCH_NODE_TYPE_ENC: fi->src_sched = sch->enc[dec->src.idx].src; break; | |
1567 | ✗ | default: av_assert0(0); | |
1568 | } | ||
1569 | } | ||
1570 | } | ||
1571 | |||
1572 |
2/2✓ Branch 0 taken 7671 times.
✓ Branch 1 taken 7541 times.
|
15212 | for (unsigned j = 0; j < fg->nb_outputs; j++) { |
1573 | 7671 | SchFilterOut *fo = &fg->outputs[j]; | |
1574 | |||
1575 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | if (!fo->dst.type) { |
1576 | ✗ | av_log(fg, AV_LOG_ERROR, | |
1577 | "Filtergraph %u output %u not connected to a sink\n", i, j); | ||
1578 | ✗ | return AVERROR(EINVAL); | |
1579 | } | ||
1580 | } | ||
1581 | } | ||
1582 | |||
1583 | // Check that the transcoding graph has no cycles. | ||
1584 | 7923 | ret = check_acyclic(sch); | |
1585 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7923 times.
|
7923 | if (ret < 0) |
1586 | ✗ | return ret; | |
1587 | |||
1588 | 7923 | return 0; | |
1589 | } | ||
1590 | |||
1591 | 7923 | int sch_start(Scheduler *sch) | |
1592 | { | ||
1593 | int ret; | ||
1594 | |||
1595 | 7923 | ret = start_prepare(sch); | |
1596 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7923 times.
|
7923 | if (ret < 0) |
1597 | ✗ | return ret; | |
1598 | |||
1599 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7923 times.
|
7923 | av_assert0(sch->state == SCH_STATE_UNINIT); |
1600 | 7923 | sch->state = SCH_STATE_STARTED; | |
1601 | |||
1602 |
2/2✓ Branch 0 taken 7925 times.
✓ Branch 1 taken 7923 times.
|
15848 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
1603 | 7925 | SchMux *mux = &sch->mux[i]; | |
1604 | |||
1605 |
2/2✓ Branch 0 taken 459 times.
✓ Branch 1 taken 7466 times.
|
7925 | if (mux->nb_streams_ready == mux->nb_streams) { |
1606 | 459 | ret = mux_init(sch, mux); | |
1607 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 459 times.
|
459 | if (ret < 0) |
1608 | ✗ | goto fail; | |
1609 | } | ||
1610 | } | ||
1611 | |||
1612 |
2/2✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 7923 times.
|
15632 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
1613 | 7709 | SchEnc *enc = &sch->enc[i]; | |
1614 | |||
1615 | 7709 | ret = task_start(&enc->task); | |
1616 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7709 times.
|
7709 | if (ret < 0) |
1617 | ✗ | goto fail; | |
1618 | } | ||
1619 | |||
1620 |
2/2✓ Branch 0 taken 7541 times.
✓ Branch 1 taken 7923 times.
|
15464 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
1621 | 7541 | SchFilterGraph *fg = &sch->filters[i]; | |
1622 | |||
1623 | 7541 | ret = task_start(&fg->task); | |
1624 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7541 times.
|
7541 | if (ret < 0) |
1625 | ✗ | goto fail; | |
1626 | } | ||
1627 | |||
1628 |
2/2✓ Branch 0 taken 6757 times.
✓ Branch 1 taken 7923 times.
|
14680 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
1629 | 6757 | SchDec *dec = &sch->dec[i]; | |
1630 | |||
1631 | 6757 | ret = task_start(&dec->task); | |
1632 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6757 times.
|
6757 | if (ret < 0) |
1633 | ✗ | goto fail; | |
1634 | } | ||
1635 | |||
1636 |
2/2✓ Branch 0 taken 7172 times.
✓ Branch 1 taken 7923 times.
|
15095 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
1637 | 7172 | SchDemux *d = &sch->demux[i]; | |
1638 | |||
1639 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 7158 times.
|
7172 | if (!d->nb_streams) |
1640 | 14 | continue; | |
1641 | |||
1642 | 7158 | ret = task_start(&d->task); | |
1643 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7158 times.
|
7158 | if (ret < 0) |
1644 | ✗ | goto fail; | |
1645 | } | ||
1646 | |||
1647 | 7923 | pthread_mutex_lock(&sch->schedule_lock); | |
1648 | 7923 | schedule_update_locked(sch); | |
1649 | 7923 | pthread_mutex_unlock(&sch->schedule_lock); | |
1650 | |||
1651 | 7923 | return 0; | |
1652 | ✗ | fail: | |
1653 | ✗ | sch_stop(sch, NULL); | |
1654 | ✗ | return ret; | |
1655 | } | ||
1656 | |||
1657 | 24572 | int sch_wait(Scheduler *sch, uint64_t timeout_us, int64_t *transcode_ts) | |
1658 | { | ||
1659 | int ret; | ||
1660 | |||
1661 | // convert delay to absolute timestamp | ||
1662 | 24572 | timeout_us += av_gettime(); | |
1663 | |||
1664 | 24572 | pthread_mutex_lock(&sch->finish_lock); | |
1665 | |||
1666 |
1/2✓ Branch 0 taken 24572 times.
✗ Branch 1 not taken.
|
24572 | if (sch->nb_mux_done < sch->nb_mux) { |
1667 | 24572 | struct timespec tv = { .tv_sec = timeout_us / 1000000, | |
1668 | 24572 | .tv_nsec = (timeout_us % 1000000) * 1000 }; | |
1669 | 24572 | pthread_cond_timedwait(&sch->finish_cond, &sch->finish_lock, &tv); | |
1670 | } | ||
1671 | |||
1672 | // abort transcoding if any task failed | ||
1673 |
4/4✓ Branch 0 taken 16650 times.
✓ Branch 1 taken 7922 times.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 16649 times.
|
24572 | ret = sch->nb_mux_done == sch->nb_mux || sch->task_failed; |
1674 | |||
1675 | 24572 | pthread_mutex_unlock(&sch->finish_lock); | |
1676 | |||
1677 | 24572 | *transcode_ts = atomic_load(&sch->last_dts); | |
1678 | |||
1679 | 24572 | return ret; | |
1680 | } | ||
1681 | |||
1682 | 7671 | static int enc_open(Scheduler *sch, SchEnc *enc, const AVFrame *frame) | |
1683 | { | ||
1684 | int ret; | ||
1685 | |||
1686 | 7671 | ret = enc->open_cb(enc->task.func_arg, frame); | |
1687 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | if (ret < 0) |
1688 | ✗ | return ret; | |
1689 | |||
1690 | // ret>0 signals audio frame size, which means sync queue must | ||
1691 | // have been enabled during encoder creation | ||
1692 |
2/2✓ Branch 0 taken 173 times.
✓ Branch 1 taken 7498 times.
|
7671 | if (ret > 0) { |
1693 | SchSyncQueue *sq; | ||
1694 | |||
1695 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 173 times.
|
173 | av_assert0(enc->sq_idx[0] >= 0); |
1696 | 173 | sq = &sch->sq_enc[enc->sq_idx[0]]; | |
1697 | |||
1698 | 173 | pthread_mutex_lock(&sq->lock); | |
1699 | |||
1700 | 173 | sq_frame_samples(sq->sq, enc->sq_idx[1], ret); | |
1701 | |||
1702 | 173 | pthread_mutex_unlock(&sq->lock); | |
1703 | } | ||
1704 | |||
1705 | 7671 | return 0; | |
1706 | } | ||
1707 | |||
1708 | 451876 | static int send_to_enc_thread(Scheduler *sch, SchEnc *enc, AVFrame *frame) | |
1709 | { | ||
1710 | int ret; | ||
1711 | |||
1712 |
2/2✓ Branch 0 taken 15643 times.
✓ Branch 1 taken 436233 times.
|
451876 | if (!frame) { |
1713 | 15643 | tq_send_finish(enc->queue, 0); | |
1714 | 15643 | return 0; | |
1715 | } | ||
1716 | |||
1717 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 436233 times.
|
436233 | if (enc->in_finished) |
1718 | ✗ | return AVERROR_EOF; | |
1719 | |||
1720 | 436233 | ret = tq_send(enc->queue, 0, frame); | |
1721 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 436232 times.
|
436233 | if (ret < 0) |
1722 | 1 | enc->in_finished = 1; | |
1723 | |||
1724 | 436233 | return ret; | |
1725 | } | ||
1726 | |||
1727 | 35632 | static int send_to_enc_sq(Scheduler *sch, SchEnc *enc, AVFrame *frame) | |
1728 | { | ||
1729 | 35632 | SchSyncQueue *sq = &sch->sq_enc[enc->sq_idx[0]]; | |
1730 | 35632 | int ret = 0; | |
1731 | |||
1732 | // inform the scheduling code that no more input will arrive along this path; | ||
1733 | // this is necessary because the sync queue may not send an EOF downstream | ||
1734 | // until other streams finish | ||
1735 | // TODO: consider a cleaner way of passing this information through | ||
1736 | // the pipeline | ||
1737 |
2/2✓ Branch 0 taken 3319 times.
✓ Branch 1 taken 32313 times.
|
35632 | if (!frame) { |
1738 |
2/2✓ Branch 0 taken 3319 times.
✓ Branch 1 taken 3319 times.
|
6638 | for (unsigned i = 0; i < enc->nb_dst; i++) { |
1739 | SchMux *mux; | ||
1740 | SchMuxStream *ms; | ||
1741 | |||
1742 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3319 times.
|
3319 | if (enc->dst[i].type != SCH_NODE_TYPE_MUX) |
1743 | ✗ | continue; | |
1744 | |||
1745 | 3319 | mux = &sch->mux[enc->dst[i].idx]; | |
1746 | 3319 | ms = &mux->streams[enc->dst[i].idx_stream]; | |
1747 | |||
1748 | 3319 | pthread_mutex_lock(&sch->schedule_lock); | |
1749 | |||
1750 | 3319 | ms->source_finished = 1; | |
1751 | 3319 | schedule_update_locked(sch); | |
1752 | |||
1753 | 3319 | pthread_mutex_unlock(&sch->schedule_lock); | |
1754 | } | ||
1755 | } | ||
1756 | |||
1757 | 35632 | pthread_mutex_lock(&sq->lock); | |
1758 | |||
1759 | 35632 | ret = sq_send(sq->sq, enc->sq_idx[1], SQFRAME(frame)); | |
1760 |
2/2✓ Branch 0 taken 35630 times.
✓ Branch 1 taken 2 times.
|
35632 | if (ret < 0) |
1761 | 2 | goto finish; | |
1762 | |||
1763 | 83152 | while (1) { | |
1764 | SchEnc *enc; | ||
1765 | |||
1766 | // TODO: the SQ API should be extended to allow returning EOF | ||
1767 | // for individual streams | ||
1768 | 118782 | ret = sq_receive(sq->sq, -1, SQFRAME(sq->frame)); | |
1769 |
2/2✓ Branch 0 taken 35630 times.
✓ Branch 1 taken 83152 times.
|
118782 | if (ret < 0) { |
1770 |
2/2✓ Branch 0 taken 6175 times.
✓ Branch 1 taken 29455 times.
|
35630 | ret = (ret == AVERROR(EAGAIN)) ? 0 : ret; |
1771 | 35630 | break; | |
1772 | } | ||
1773 | |||
1774 | 83152 | enc = &sch->enc[sq->enc_idx[ret]]; | |
1775 | 83152 | ret = send_to_enc_thread(sch, enc, sq->frame); | |
1776 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 83152 times.
|
83152 | if (ret < 0) { |
1777 | ✗ | av_frame_unref(sq->frame); | |
1778 | ✗ | if (ret != AVERROR_EOF) | |
1779 | ✗ | break; | |
1780 | |||
1781 | ✗ | sq_send(sq->sq, enc->sq_idx[1], SQFRAME(NULL)); | |
1782 | ✗ | continue; | |
1783 | } | ||
1784 | } | ||
1785 | |||
1786 |
2/2✓ Branch 0 taken 29455 times.
✓ Branch 1 taken 6175 times.
|
35630 | if (ret < 0) { |
1787 | // close all encoders fed from this sync queue | ||
1788 |
2/2✓ Branch 0 taken 6526 times.
✓ Branch 1 taken 6175 times.
|
12701 | for (unsigned i = 0; i < sq->nb_enc_idx; i++) { |
1789 | 6526 | int err = send_to_enc_thread(sch, &sch->enc[sq->enc_idx[i]], NULL); | |
1790 | |||
1791 | // if the sync queue error is EOF and closing the encoder | ||
1792 | // produces a more serious error, make sure to pick the latter | ||
1793 |
2/4✓ Branch 0 taken 6526 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 6526 times.
✗ Branch 3 not taken.
|
6526 | ret = err_merge((ret == AVERROR_EOF && err < 0) ? 0 : ret, err); |
1794 | } | ||
1795 | } | ||
1796 | |||
1797 | 35630 | finish: | |
1798 | 35632 | pthread_mutex_unlock(&sq->lock); | |
1799 | |||
1800 | 35632 | return ret; | |
1801 | } | ||
1802 | |||
1803 | 397833 | static int send_to_enc(Scheduler *sch, SchEnc *enc, AVFrame *frame) | |
1804 | { | ||
1805 |
6/6✓ Branch 0 taken 396916 times.
✓ Branch 1 taken 917 times.
✓ Branch 2 taken 384518 times.
✓ Branch 3 taken 12398 times.
✓ Branch 4 taken 7671 times.
✓ Branch 5 taken 376847 times.
|
397833 | if (enc->open_cb && frame && !enc->opened) { |
1806 | 7671 | int ret = enc_open(sch, enc, frame); | |
1807 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7671 times.
|
7671 | if (ret < 0) |
1808 | ✗ | return ret; | |
1809 | 7671 | enc->opened = 1; | |
1810 | |||
1811 | // discard empty frames that only carry encoder init parameters | ||
1812 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 7668 times.
|
7671 | if (!frame->buf[0]) { |
1813 | 3 | av_frame_unref(frame); | |
1814 | 3 | return 0; | |
1815 | } | ||
1816 | } | ||
1817 | |||
1818 | 397830 | return (enc->sq_idx[0] >= 0) ? | |
1819 |
2/2✓ Branch 0 taken 35632 times.
✓ Branch 1 taken 362198 times.
|
760028 | send_to_enc_sq (sch, enc, frame) : |
1820 | 362198 | send_to_enc_thread(sch, enc, frame); | |
1821 | } | ||
1822 | |||
1823 | 3421 | static int mux_queue_packet(SchMux *mux, SchMuxStream *ms, AVPacket *pkt) | |
1824 | { | ||
1825 | 3421 | PreMuxQueue *q = &ms->pre_mux_queue; | |
1826 | 3421 | AVPacket *tmp_pkt = NULL; | |
1827 | int ret; | ||
1828 | |||
1829 |
2/2✓ Branch 1 taken 154 times.
✓ Branch 2 taken 3267 times.
|
3421 | if (!av_fifo_can_write(q->fifo)) { |
1830 | 154 | size_t packets = av_fifo_can_read(q->fifo); | |
1831 |
1/2✓ Branch 0 taken 154 times.
✗ Branch 1 not taken.
|
154 | size_t pkt_size = pkt ? pkt->size : 0; |
1832 | 154 | int thresh_reached = (q->data_size + pkt_size) > q->data_threshold; | |
1833 |
2/2✓ Branch 0 taken 149 times.
✓ Branch 1 taken 5 times.
|
154 | size_t max_packets = thresh_reached ? q->max_packets : SIZE_MAX; |
1834 | 154 | size_t new_size = FFMIN(2 * packets, max_packets); | |
1835 | |||
1836 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 154 times.
|
154 | if (new_size <= packets) { |
1837 | ✗ | av_log(mux, AV_LOG_ERROR, | |
1838 | "Too many packets buffered for output stream.\n"); | ||
1839 | ✗ | return AVERROR_BUFFER_TOO_SMALL; | |
1840 | } | ||
1841 | 154 | ret = av_fifo_grow2(q->fifo, new_size - packets); | |
1842 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 154 times.
|
154 | if (ret < 0) |
1843 | ✗ | return ret; | |
1844 | } | ||
1845 | |||
1846 |
2/2✓ Branch 0 taken 3324 times.
✓ Branch 1 taken 97 times.
|
3421 | if (pkt) { |
1847 | 3324 | tmp_pkt = av_packet_alloc(); | |
1848 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3324 times.
|
3324 | if (!tmp_pkt) |
1849 | ✗ | return AVERROR(ENOMEM); | |
1850 | |||
1851 | 3324 | av_packet_move_ref(tmp_pkt, pkt); | |
1852 | 3324 | q->data_size += tmp_pkt->size; | |
1853 | } | ||
1854 | 3421 | av_fifo_write(q->fifo, &tmp_pkt, 1); | |
1855 | |||
1856 | 3421 | return 0; | |
1857 | } | ||
1858 | |||
1859 | 507222 | static int send_to_mux(Scheduler *sch, SchMux *mux, unsigned stream_idx, | |
1860 | AVPacket *pkt) | ||
1861 | { | ||
1862 | 507222 | SchMuxStream *ms = &mux->streams[stream_idx]; | |
1863 |
2/2✓ Branch 0 taken 490782 times.
✓ Branch 1 taken 8039 times.
|
498821 | int64_t dts = (pkt && pkt->dts != AV_NOPTS_VALUE) ? |
1864 |
2/2✓ Branch 0 taken 498821 times.
✓ Branch 1 taken 8401 times.
|
1006043 | av_rescale_q(pkt->dts + pkt->duration, pkt->time_base, AV_TIME_BASE_Q) : |
1865 | AV_NOPTS_VALUE; | ||
1866 | |||
1867 | // queue the packet if the muxer cannot be started yet | ||
1868 |
2/2✓ Branch 0 taken 3460 times.
✓ Branch 1 taken 503762 times.
|
507222 | if (!atomic_load(&mux->mux_started)) { |
1869 | 3460 | int queued = 0; | |
1870 | |||
1871 | // the muxer could have started between the above atomic check and | ||
1872 | // locking the mutex, then this block falls through to normal send path | ||
1873 | 3460 | pthread_mutex_lock(&sch->mux_ready_lock); | |
1874 | |||
1875 |
2/2✓ Branch 0 taken 3421 times.
✓ Branch 1 taken 39 times.
|
3460 | if (!atomic_load(&mux->mux_started)) { |
1876 | 3421 | int ret = mux_queue_packet(mux, ms, pkt); | |
1877 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3421 times.
|
3421 | queued = ret < 0 ? ret : 1; |
1878 | } | ||
1879 | |||
1880 | 3460 | pthread_mutex_unlock(&sch->mux_ready_lock); | |
1881 | |||
1882 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3460 times.
|
3460 | if (queued < 0) |
1883 | ✗ | return queued; | |
1884 |
2/2✓ Branch 0 taken 3421 times.
✓ Branch 1 taken 39 times.
|
3460 | else if (queued) |
1885 | 3421 | goto update_schedule; | |
1886 | } | ||
1887 | |||
1888 |
2/2✓ Branch 0 taken 495497 times.
✓ Branch 1 taken 8304 times.
|
503801 | if (pkt) { |
1889 | int ret; | ||
1890 | |||
1891 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 495497 times.
|
495497 | if (ms->init_eof) |
1892 | ✗ | return AVERROR_EOF; | |
1893 | |||
1894 | 495497 | ret = tq_send(mux->queue, stream_idx, pkt); | |
1895 |
2/2✓ Branch 0 taken 59 times.
✓ Branch 1 taken 495438 times.
|
495497 | if (ret < 0) |
1896 | 59 | return ret; | |
1897 | } else | ||
1898 | 8304 | tq_send_finish(mux->queue, stream_idx); | |
1899 | |||
1900 | 507163 | update_schedule: | |
1901 | // TODO: use atomics to check whether this changes trailing dts | ||
1902 | // to avoid locking unnecesarily | ||
1903 |
4/4✓ Branch 0 taken 16440 times.
✓ Branch 1 taken 490723 times.
✓ Branch 2 taken 8401 times.
✓ Branch 3 taken 8039 times.
|
507163 | if (dts != AV_NOPTS_VALUE || !pkt) { |
1904 | 499124 | pthread_mutex_lock(&sch->schedule_lock); | |
1905 | |||
1906 |
2/2✓ Branch 0 taken 490723 times.
✓ Branch 1 taken 8401 times.
|
499124 | if (pkt) ms->last_dts = dts; |
1907 | 8401 | else ms->source_finished = 1; | |
1908 | |||
1909 | 499124 | schedule_update_locked(sch); | |
1910 | |||
1911 | 499124 | pthread_mutex_unlock(&sch->schedule_lock); | |
1912 | } | ||
1913 | |||
1914 | 507163 | return 0; | |
1915 | } | ||
1916 | |||
1917 | static int | ||
1918 | 479330 | demux_stream_send_to_dst(Scheduler *sch, const SchedulerNode dst, | |
1919 | uint8_t *dst_finished, AVPacket *pkt, unsigned flags) | ||
1920 | { | ||
1921 | int ret; | ||
1922 | |||
1923 |
2/2✓ Branch 0 taken 3428 times.
✓ Branch 1 taken 475902 times.
|
479330 | if (*dst_finished) |
1924 | 3428 | return AVERROR_EOF; | |
1925 | |||
1926 |
4/4✓ Branch 0 taken 471882 times.
✓ Branch 1 taken 4020 times.
✓ Branch 2 taken 68749 times.
✓ Branch 3 taken 403133 times.
|
475902 | if (pkt && dst.type == SCH_NODE_TYPE_MUX && |
1927 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 68747 times.
|
68749 | (flags & DEMUX_SEND_STREAMCOPY_EOF)) { |
1928 | 2 | av_packet_unref(pkt); | |
1929 | 2 | pkt = NULL; | |
1930 | } | ||
1931 | |||
1932 |
2/2✓ Branch 0 taken 4022 times.
✓ Branch 1 taken 471880 times.
|
475902 | if (!pkt) |
1933 | 4022 | goto finish; | |
1934 | |||
1935 | 943760 | ret = (dst.type == SCH_NODE_TYPE_MUX) ? | |
1936 |
2/2✓ Branch 0 taken 68747 times.
✓ Branch 1 taken 403133 times.
|
471880 | send_to_mux(sch, &sch->mux[dst.idx], dst.idx_stream, pkt) : |
1937 | 403133 | tq_send(sch->dec[dst.idx].queue, 0, pkt); | |
1938 |
2/2✓ Branch 0 taken 3426 times.
✓ Branch 1 taken 468454 times.
|
471880 | if (ret == AVERROR_EOF) |
1939 | 3426 | goto finish; | |
1940 | |||
1941 | 468454 | return ret; | |
1942 | |||
1943 | 7448 | finish: | |
1944 |
2/2✓ Branch 0 taken 692 times.
✓ Branch 1 taken 6756 times.
|
7448 | if (dst.type == SCH_NODE_TYPE_MUX) |
1945 | 692 | send_to_mux(sch, &sch->mux[dst.idx], dst.idx_stream, NULL); | |
1946 | else | ||
1947 | 6756 | tq_send_finish(sch->dec[dst.idx].queue, 0); | |
1948 | |||
1949 | 7448 | *dst_finished = 1; | |
1950 | 7448 | return AVERROR_EOF; | |
1951 | } | ||
1952 | |||
1953 | 478869 | static int demux_send_for_stream(Scheduler *sch, SchDemux *d, SchDemuxStream *ds, | |
1954 | AVPacket *pkt, unsigned flags) | ||
1955 | { | ||
1956 | 478869 | unsigned nb_done = 0; | |
1957 | |||
1958 |
2/2✓ Branch 0 taken 479330 times.
✓ Branch 1 taken 478869 times.
|
958199 | for (unsigned i = 0; i < ds->nb_dst; i++) { |
1959 | 479330 | AVPacket *to_send = pkt; | |
1960 | 479330 | uint8_t *finished = &ds->dst_finished[i]; | |
1961 | |||
1962 | int ret; | ||
1963 | |||
1964 | // sending a packet consumes it, so make a temporary reference if needed | ||
1965 |
4/4✓ Branch 0 taken 471882 times.
✓ Branch 1 taken 7448 times.
✓ Branch 2 taken 438 times.
✓ Branch 3 taken 471444 times.
|
479330 | if (pkt && i < ds->nb_dst - 1) { |
1966 | 438 | to_send = d->send_pkt; | |
1967 | |||
1968 | 438 | ret = av_packet_ref(to_send, pkt); | |
1969 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 438 times.
|
438 | if (ret < 0) |
1970 | ✗ | return ret; | |
1971 | } | ||
1972 | |||
1973 | 479330 | ret = demux_stream_send_to_dst(sch, ds->dst[i], finished, to_send, flags); | |
1974 |
2/2✓ Branch 0 taken 471882 times.
✓ Branch 1 taken 7448 times.
|
479330 | if (to_send) |
1975 | 471882 | av_packet_unref(to_send); | |
1976 |
2/2✓ Branch 0 taken 10876 times.
✓ Branch 1 taken 468454 times.
|
479330 | if (ret == AVERROR_EOF) |
1977 | 10876 | nb_done++; | |
1978 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 468454 times.
|
468454 | else if (ret < 0) |
1979 | ✗ | return ret; | |
1980 | } | ||
1981 | |||
1982 |
2/2✓ Branch 0 taken 10853 times.
✓ Branch 1 taken 468016 times.
|
478869 | return (nb_done == ds->nb_dst) ? AVERROR_EOF : 0; |
1983 | } | ||
1984 | |||
1985 | 11 | static int demux_flush(Scheduler *sch, SchDemux *d, AVPacket *pkt) | |
1986 | { | ||
1987 | 11 | Timestamp max_end_ts = (Timestamp){ .ts = AV_NOPTS_VALUE }; | |
1988 | |||
1989 |
3/6✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 11 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 11 times.
|
11 | av_assert0(!pkt->buf && !pkt->data && !pkt->side_data_elems); |
1990 | |||
1991 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 11 times.
|
25 | for (unsigned i = 0; i < d->nb_streams; i++) { |
1992 | 14 | SchDemuxStream *ds = &d->streams[i]; | |
1993 | |||
1994 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 14 times.
|
28 | for (unsigned j = 0; j < ds->nb_dst; j++) { |
1995 | 14 | const SchedulerNode *dst = &ds->dst[j]; | |
1996 | SchDec *dec; | ||
1997 | int ret; | ||
1998 | |||
1999 |
3/4✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 6 times.
|
14 | if (ds->dst_finished[j] || dst->type != SCH_NODE_TYPE_DEC) |
2000 | 8 | continue; | |
2001 | |||
2002 | 6 | dec = &sch->dec[dst->idx]; | |
2003 | |||
2004 | 6 | ret = tq_send(dec->queue, 0, pkt); | |
2005 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (ret < 0) |
2006 | ✗ | return ret; | |
2007 | |||
2008 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
|
6 | if (dec->queue_end_ts) { |
2009 | Timestamp ts; | ||
2010 | 3 | ret = av_thread_message_queue_recv(dec->queue_end_ts, &ts, 0); | |
2011 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (ret < 0) |
2012 | ✗ | return ret; | |
2013 | |||
2014 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (max_end_ts.ts == AV_NOPTS_VALUE || |
2015 | ✗ | (ts.ts != AV_NOPTS_VALUE && | |
2016 | ✗ | av_compare_ts(max_end_ts.ts, max_end_ts.tb, ts.ts, ts.tb) < 0)) | |
2017 | 3 | max_end_ts = ts; | |
2018 | |||
2019 | } | ||
2020 | } | ||
2021 | } | ||
2022 | |||
2023 | 11 | pkt->pts = max_end_ts.ts; | |
2024 | 11 | pkt->time_base = max_end_ts.tb; | |
2025 | |||
2026 | 11 | return 0; | |
2027 | } | ||
2028 | |||
2029 | 471506 | int sch_demux_send(Scheduler *sch, unsigned demux_idx, AVPacket *pkt, | |
2030 | unsigned flags) | ||
2031 | { | ||
2032 | SchDemux *d; | ||
2033 | int terminate; | ||
2034 | |||
2035 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 471506 times.
|
471506 | av_assert0(demux_idx < sch->nb_demux); |
2036 | 471506 | d = &sch->demux[demux_idx]; | |
2037 | |||
2038 | 471506 | terminate = waiter_wait(sch, &d->waiter); | |
2039 |
2/2✓ Branch 0 taken 51 times.
✓ Branch 1 taken 471455 times.
|
471506 | if (terminate) |
2040 | 51 | return AVERROR_EXIT; | |
2041 | |||
2042 | // flush the downstreams after seek | ||
2043 |
2/2✓ Branch 0 taken 11 times.
✓ Branch 1 taken 471444 times.
|
471455 | if (pkt->stream_index == -1) |
2044 | 11 | return demux_flush(sch, d, pkt); | |
2045 | |||
2046 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 471444 times.
|
471444 | av_assert0(pkt->stream_index < d->nb_streams); |
2047 | |||
2048 | 471444 | return demux_send_for_stream(sch, d, &d->streams[pkt->stream_index], pkt, flags); | |
2049 | } | ||
2050 | |||
2051 | 7172 | static int demux_done(Scheduler *sch, unsigned demux_idx) | |
2052 | { | ||
2053 | 7172 | SchDemux *d = &sch->demux[demux_idx]; | |
2054 | 7172 | int ret = 0; | |
2055 | |||
2056 |
2/2✓ Branch 0 taken 7425 times.
✓ Branch 1 taken 7172 times.
|
14597 | for (unsigned i = 0; i < d->nb_streams; i++) { |
2057 | 7425 | int err = demux_send_for_stream(sch, d, &d->streams[i], NULL, 0); | |
2058 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7425 times.
|
7425 | if (err != AVERROR_EOF) |
2059 | ✗ | ret = err_merge(ret, err); | |
2060 | } | ||
2061 | |||
2062 | 7172 | pthread_mutex_lock(&sch->schedule_lock); | |
2063 | |||
2064 | 7172 | d->task_exited = 1; | |
2065 | |||
2066 | 7172 | schedule_update_locked(sch); | |
2067 | |||
2068 | 7172 | pthread_mutex_unlock(&sch->schedule_lock); | |
2069 | |||
2070 | 7172 | return ret; | |
2071 | } | ||
2072 | |||
2073 | 514816 | int sch_mux_receive(Scheduler *sch, unsigned mux_idx, AVPacket *pkt) | |
2074 | { | ||
2075 | SchMux *mux; | ||
2076 | int ret, stream_idx; | ||
2077 | |||
2078 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 514816 times.
|
514816 | av_assert0(mux_idx < sch->nb_mux); |
2079 | 514816 | mux = &sch->mux[mux_idx]; | |
2080 | |||
2081 | 514816 | ret = tq_receive(mux->queue, &stream_idx, pkt); | |
2082 | 514816 | pkt->stream_index = stream_idx; | |
2083 | 514816 | return ret; | |
2084 | } | ||
2085 | |||
2086 | 199 | void sch_mux_receive_finish(Scheduler *sch, unsigned mux_idx, unsigned stream_idx) | |
2087 | { | ||
2088 | SchMux *mux; | ||
2089 | |||
2090 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 199 times.
|
199 | av_assert0(mux_idx < sch->nb_mux); |
2091 | 199 | mux = &sch->mux[mux_idx]; | |
2092 | |||
2093 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 199 times.
|
199 | av_assert0(stream_idx < mux->nb_streams); |
2094 | 199 | tq_receive_finish(mux->queue, stream_idx); | |
2095 | |||
2096 | 199 | pthread_mutex_lock(&sch->schedule_lock); | |
2097 | 199 | mux->streams[stream_idx].source_finished = 1; | |
2098 | |||
2099 | 199 | schedule_update_locked(sch); | |
2100 | |||
2101 | 199 | pthread_mutex_unlock(&sch->schedule_lock); | |
2102 | 199 | } | |
2103 | |||
2104 | 460972 | int sch_mux_sub_heartbeat(Scheduler *sch, unsigned mux_idx, unsigned stream_idx, | |
2105 | const AVPacket *pkt) | ||
2106 | { | ||
2107 | SchMux *mux; | ||
2108 | SchMuxStream *ms; | ||
2109 | |||
2110 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 460972 times.
|
460972 | av_assert0(mux_idx < sch->nb_mux); |
2111 | 460972 | mux = &sch->mux[mux_idx]; | |
2112 | |||
2113 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 460972 times.
|
460972 | av_assert0(stream_idx < mux->nb_streams); |
2114 | 460972 | ms = &mux->streams[stream_idx]; | |
2115 | |||
2116 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 460972 times.
|
460977 | for (unsigned i = 0; i < ms->nb_sub_heartbeat_dst; i++) { |
2117 | 5 | SchDec *dst = &sch->dec[ms->sub_heartbeat_dst[i]]; | |
2118 | int ret; | ||
2119 | |||
2120 | 5 | ret = av_packet_copy_props(mux->sub_heartbeat_pkt, pkt); | |
2121 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | if (ret < 0) |
2122 | ✗ | return ret; | |
2123 | |||
2124 | 5 | tq_send(dst->queue, 0, mux->sub_heartbeat_pkt); | |
2125 | } | ||
2126 | |||
2127 | 460972 | return 0; | |
2128 | } | ||
2129 | |||
2130 | 7925 | static int mux_done(Scheduler *sch, unsigned mux_idx) | |
2131 | { | ||
2132 | 7925 | SchMux *mux = &sch->mux[mux_idx]; | |
2133 | |||
2134 | 7925 | pthread_mutex_lock(&sch->schedule_lock); | |
2135 | |||
2136 |
2/2✓ Branch 0 taken 8401 times.
✓ Branch 1 taken 7925 times.
|
16326 | for (unsigned i = 0; i < mux->nb_streams; i++) { |
2137 | 8401 | tq_receive_finish(mux->queue, i); | |
2138 | 8401 | mux->streams[i].source_finished = 1; | |
2139 | } | ||
2140 | |||
2141 | 7925 | schedule_update_locked(sch); | |
2142 | |||
2143 | 7925 | pthread_mutex_unlock(&sch->schedule_lock); | |
2144 | |||
2145 | 7925 | pthread_mutex_lock(&sch->finish_lock); | |
2146 | |||
2147 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7925 times.
|
7925 | av_assert0(sch->nb_mux_done < sch->nb_mux); |
2148 | 7925 | sch->nb_mux_done++; | |
2149 | |||
2150 | 7925 | pthread_cond_signal(&sch->finish_cond); | |
2151 | |||
2152 | 7925 | pthread_mutex_unlock(&sch->finish_lock); | |
2153 | |||
2154 | 7925 | return 0; | |
2155 | } | ||
2156 | |||
2157 | 376070 | int sch_dec_receive(Scheduler *sch, unsigned dec_idx, AVPacket *pkt) | |
2158 | { | ||
2159 | SchDec *dec; | ||
2160 | int ret, dummy; | ||
2161 | |||
2162 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 376070 times.
|
376070 | av_assert0(dec_idx < sch->nb_dec); |
2163 | 376070 | dec = &sch->dec[dec_idx]; | |
2164 | |||
2165 | // the decoder should have given us post-flush end timestamp in pkt | ||
2166 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 376067 times.
|
376070 | if (dec->expect_end_ts) { |
2167 | 3 | Timestamp ts = (Timestamp){ .ts = pkt->pts, .tb = pkt->time_base }; | |
2168 | 3 | ret = av_thread_message_queue_send(dec->queue_end_ts, &ts, 0); | |
2169 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (ret < 0) |
2170 | ✗ | return ret; | |
2171 | |||
2172 | 3 | dec->expect_end_ts = 0; | |
2173 | } | ||
2174 | |||
2175 | 376070 | ret = tq_receive(dec->queue, &dummy, pkt); | |
2176 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 376070 times.
|
376070 | av_assert0(dummy <= 0); |
2177 | |||
2178 | // got a flush packet, on the next call to this function the decoder | ||
2179 | // will give us post-flush end timestamp | ||
2180 |
7/8✓ Branch 0 taken 372747 times.
✓ Branch 1 taken 3323 times.
✓ Branch 2 taken 958 times.
✓ Branch 3 taken 371789 times.
✓ Branch 4 taken 958 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 3 times.
✓ Branch 7 taken 955 times.
|
376070 | if (ret >= 0 && !pkt->data && !pkt->side_data_elems && dec->queue_end_ts) |
2181 | 3 | dec->expect_end_ts = 1; | |
2182 | |||
2183 | 376070 | return ret; | |
2184 | } | ||
2185 | |||
2186 | 404830 | static int send_to_filter(Scheduler *sch, SchFilterGraph *fg, | |
2187 | unsigned in_idx, AVFrame *frame) | ||
2188 | { | ||
2189 |
2/2✓ Branch 0 taken 398044 times.
✓ Branch 1 taken 6786 times.
|
404830 | if (frame) |
2190 | 398044 | return tq_send(fg->queue, in_idx, frame); | |
2191 | |||
2192 |
1/2✓ Branch 0 taken 6786 times.
✗ Branch 1 not taken.
|
6786 | if (!fg->inputs[in_idx].send_finished) { |
2193 | 6786 | fg->inputs[in_idx].send_finished = 1; | |
2194 | 6786 | tq_send_finish(fg->queue, in_idx); | |
2195 | |||
2196 | // close the control stream when all actual inputs are done | ||
2197 |
2/2✓ Branch 0 taken 6701 times.
✓ Branch 1 taken 85 times.
|
6786 | if (atomic_fetch_add(&fg->nb_inputs_finished_send, 1) == fg->nb_inputs - 1) |
2198 | 6701 | tq_send_finish(fg->queue, fg->nb_inputs); | |
2199 | } | ||
2200 | 6786 | return 0; | |
2201 | } | ||
2202 | |||
2203 | 409248 | static int dec_send_to_dst(Scheduler *sch, const SchedulerNode dst, | |
2204 | uint8_t *dst_finished, AVFrame *frame) | ||
2205 | { | ||
2206 | int ret; | ||
2207 | |||
2208 |
2/2✓ Branch 0 taken 6988 times.
✓ Branch 1 taken 402260 times.
|
409248 | if (*dst_finished) |
2209 | 6988 | return AVERROR_EOF; | |
2210 | |||
2211 |
2/2✓ Branch 0 taken 3337 times.
✓ Branch 1 taken 398923 times.
|
402260 | if (!frame) |
2212 | 3337 | goto finish; | |
2213 | |||
2214 | 797846 | ret = (dst.type == SCH_NODE_TYPE_FILTER_IN) ? | |
2215 |
2/2✓ Branch 0 taken 398044 times.
✓ Branch 1 taken 879 times.
|
398923 | send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, frame) : |
2216 | 879 | send_to_enc(sch, &sch->enc[dst.idx], frame); | |
2217 |
2/2✓ Branch 0 taken 3487 times.
✓ Branch 1 taken 395436 times.
|
398923 | if (ret == AVERROR_EOF) |
2218 | 3487 | goto finish; | |
2219 | |||
2220 | 395436 | return ret; | |
2221 | |||
2222 | 6824 | finish: | |
2223 |
2/2✓ Branch 0 taken 6786 times.
✓ Branch 1 taken 38 times.
|
6824 | if (dst.type == SCH_NODE_TYPE_FILTER_IN) |
2224 | 6786 | send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, NULL); | |
2225 | else | ||
2226 | 38 | send_to_enc(sch, &sch->enc[dst.idx], NULL); | |
2227 | |||
2228 | 6824 | *dst_finished = 1; | |
2229 | |||
2230 | 6824 | return AVERROR_EOF; | |
2231 | } | ||
2232 | |||
2233 | 400857 | int sch_dec_send(Scheduler *sch, unsigned dec_idx, | |
2234 | unsigned out_idx, AVFrame *frame) | ||
2235 | { | ||
2236 | SchDec *dec; | ||
2237 | SchDecOutput *o; | ||
2238 | int ret; | ||
2239 | 400857 | unsigned nb_done = 0; | |
2240 | |||
2241 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 400857 times.
|
400857 | av_assert0(dec_idx < sch->nb_dec); |
2242 | 400857 | dec = &sch->dec[dec_idx]; | |
2243 | |||
2244 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 400857 times.
|
400857 | av_assert0(out_idx < dec->nb_outputs); |
2245 | 400857 | o = &dec->outputs[out_idx]; | |
2246 | |||
2247 |
2/2✓ Branch 0 taken 402424 times.
✓ Branch 1 taken 400857 times.
|
803281 | for (unsigned i = 0; i < o->nb_dst; i++) { |
2248 | 402424 | uint8_t *finished = &o->dst_finished[i]; | |
2249 | 402424 | AVFrame *to_send = frame; | |
2250 | |||
2251 | // sending a frame consumes it, so make a temporary reference if needed | ||
2252 |
2/2✓ Branch 0 taken 1567 times.
✓ Branch 1 taken 400857 times.
|
402424 | if (i < o->nb_dst - 1) { |
2253 | 1567 | to_send = dec->send_frame; | |
2254 | |||
2255 | // frame may sometimes contain props only, | ||
2256 | // e.g. to signal EOF timestamp | ||
2257 |
2/2✓ Branch 0 taken 1460 times.
✓ Branch 1 taken 107 times.
|
1567 | ret = frame->buf[0] ? av_frame_ref(to_send, frame) : |
2258 | 107 | av_frame_copy_props(to_send, frame); | |
2259 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1567 times.
|
1567 | if (ret < 0) |
2260 | ✗ | return ret; | |
2261 | } | ||
2262 | |||
2263 | 402424 | ret = dec_send_to_dst(sch, o->dst[i], finished, to_send); | |
2264 |
2/2✓ Branch 0 taken 6988 times.
✓ Branch 1 taken 395436 times.
|
402424 | if (ret < 0) { |
2265 | 6988 | av_frame_unref(to_send); | |
2266 |
1/2✓ Branch 0 taken 6988 times.
✗ Branch 1 not taken.
|
6988 | if (ret == AVERROR_EOF) { |
2267 | 6988 | nb_done++; | |
2268 | 6988 | continue; | |
2269 | } | ||
2270 | ✗ | return ret; | |
2271 | } | ||
2272 | } | ||
2273 | |||
2274 |
2/2✓ Branch 0 taken 6878 times.
✓ Branch 1 taken 393979 times.
|
400857 | return (nb_done == o->nb_dst) ? AVERROR_EOF : 0; |
2275 | } | ||
2276 | |||
2277 | 6757 | static int dec_done(Scheduler *sch, unsigned dec_idx) | |
2278 | { | ||
2279 | 6757 | SchDec *dec = &sch->dec[dec_idx]; | |
2280 | 6757 | int ret = 0; | |
2281 | |||
2282 | 6757 | tq_receive_finish(dec->queue, 0); | |
2283 | |||
2284 | // make sure our source does not get stuck waiting for end timestamps | ||
2285 | // that will never arrive | ||
2286 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 6756 times.
|
6757 | if (dec->queue_end_ts) |
2287 | 1 | av_thread_message_queue_set_err_recv(dec->queue_end_ts, AVERROR_EOF); | |
2288 | |||
2289 |
2/2✓ Branch 0 taken 6763 times.
✓ Branch 1 taken 6757 times.
|
13520 | for (unsigned i = 0; i < dec->nb_outputs; i++) { |
2290 | 6763 | SchDecOutput *o = &dec->outputs[i]; | |
2291 | |||
2292 |
2/2✓ Branch 0 taken 6824 times.
✓ Branch 1 taken 6763 times.
|
13587 | for (unsigned j = 0; j < o->nb_dst; j++) { |
2293 | 6824 | int err = dec_send_to_dst(sch, o->dst[j], &o->dst_finished[j], NULL); | |
2294 |
2/4✓ Branch 0 taken 6824 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 6824 times.
|
6824 | if (err < 0 && err != AVERROR_EOF) |
2295 | ✗ | ret = err_merge(ret, err); | |
2296 | } | ||
2297 | } | ||
2298 | |||
2299 | 6757 | return ret; | |
2300 | } | ||
2301 | |||
2302 | 443936 | int sch_enc_receive(Scheduler *sch, unsigned enc_idx, AVFrame *frame) | |
2303 | { | ||
2304 | SchEnc *enc; | ||
2305 | int ret, dummy; | ||
2306 | |||
2307 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 443936 times.
|
443936 | av_assert0(enc_idx < sch->nb_enc); |
2308 | 443936 | enc = &sch->enc[enc_idx]; | |
2309 | |||
2310 | 443936 | ret = tq_receive(enc->queue, &dummy, frame); | |
2311 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 443936 times.
|
443936 | av_assert0(dummy <= 0); |
2312 | |||
2313 | 443936 | return ret; | |
2314 | } | ||
2315 | |||
2316 | 437871 | static int enc_send_to_dst(Scheduler *sch, const SchedulerNode dst, | |
2317 | uint8_t *dst_finished, AVPacket *pkt) | ||
2318 | { | ||
2319 | int ret; | ||
2320 | |||
2321 |
2/2✓ Branch 0 taken 39 times.
✓ Branch 1 taken 437832 times.
|
437871 | if (*dst_finished) |
2322 | 39 | return AVERROR_EOF; | |
2323 | |||
2324 |
2/2✓ Branch 0 taken 7708 times.
✓ Branch 1 taken 430124 times.
|
437832 | if (!pkt) |
2325 | 7708 | goto finish; | |
2326 | |||
2327 | 860248 | ret = (dst.type == SCH_NODE_TYPE_MUX) ? | |
2328 |
2/2✓ Branch 0 taken 430074 times.
✓ Branch 1 taken 50 times.
|
430124 | send_to_mux(sch, &sch->mux[dst.idx], dst.idx_stream, pkt) : |
2329 | 50 | tq_send(sch->dec[dst.idx].queue, 0, pkt); | |
2330 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 430122 times.
|
430124 | if (ret == AVERROR_EOF) |
2331 | 2 | goto finish; | |
2332 | |||
2333 | 430122 | return ret; | |
2334 | |||
2335 | 7710 | finish: | |
2336 |
2/2✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 1 times.
|
7710 | if (dst.type == SCH_NODE_TYPE_MUX) |
2337 | 7709 | send_to_mux(sch, &sch->mux[dst.idx], dst.idx_stream, NULL); | |
2338 | else | ||
2339 | 1 | tq_send_finish(sch->dec[dst.idx].queue, 0); | |
2340 | |||
2341 | 7710 | *dst_finished = 1; | |
2342 | |||
2343 | 7710 | return AVERROR_EOF; | |
2344 | } | ||
2345 | |||
2346 | 430111 | int sch_enc_send(Scheduler *sch, unsigned enc_idx, AVPacket *pkt) | |
2347 | { | ||
2348 | SchEnc *enc; | ||
2349 | int ret; | ||
2350 | |||
2351 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 430111 times.
|
430111 | av_assert0(enc_idx < sch->nb_enc); |
2352 | 430111 | enc = &sch->enc[enc_idx]; | |
2353 | |||
2354 |
2/2✓ Branch 0 taken 430161 times.
✓ Branch 1 taken 430111 times.
|
860272 | for (unsigned i = 0; i < enc->nb_dst; i++) { |
2355 | 430161 | uint8_t *finished = &enc->dst_finished[i]; | |
2356 | 430161 | AVPacket *to_send = pkt; | |
2357 | |||
2358 | // sending a packet consumes it, so make a temporary reference if needed | ||
2359 |
2/2✓ Branch 0 taken 50 times.
✓ Branch 1 taken 430111 times.
|
430161 | if (i < enc->nb_dst - 1) { |
2360 | 50 | to_send = enc->send_pkt; | |
2361 | |||
2362 | 50 | ret = av_packet_ref(to_send, pkt); | |
2363 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 50 times.
|
50 | if (ret < 0) |
2364 | ✗ | return ret; | |
2365 | } | ||
2366 | |||
2367 | 430161 | ret = enc_send_to_dst(sch, enc->dst[i], finished, to_send); | |
2368 |
2/2✓ Branch 0 taken 39 times.
✓ Branch 1 taken 430122 times.
|
430161 | if (ret < 0) { |
2369 | 39 | av_packet_unref(to_send); | |
2370 |
1/2✓ Branch 0 taken 39 times.
✗ Branch 1 not taken.
|
39 | if (ret == AVERROR_EOF) |
2371 | 39 | continue; | |
2372 | ✗ | return ret; | |
2373 | } | ||
2374 | } | ||
2375 | |||
2376 | 430111 | return 0; | |
2377 | } | ||
2378 | |||
2379 | 7709 | static int enc_done(Scheduler *sch, unsigned enc_idx) | |
2380 | { | ||
2381 | 7709 | SchEnc *enc = &sch->enc[enc_idx]; | |
2382 | 7709 | int ret = 0; | |
2383 | |||
2384 | 7709 | tq_receive_finish(enc->queue, 0); | |
2385 | |||
2386 |
2/2✓ Branch 0 taken 7710 times.
✓ Branch 1 taken 7709 times.
|
15419 | for (unsigned i = 0; i < enc->nb_dst; i++) { |
2387 | 7710 | int err = enc_send_to_dst(sch, enc->dst[i], &enc->dst_finished[i], NULL); | |
2388 |
2/4✓ Branch 0 taken 7710 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 7710 times.
|
7710 | if (err < 0 && err != AVERROR_EOF) |
2389 | ✗ | ret = err_merge(ret, err); | |
2390 | } | ||
2391 | |||
2392 | 7709 | return ret; | |
2393 | } | ||
2394 | |||
2395 | 388807 | int sch_filter_receive(Scheduler *sch, unsigned fg_idx, | |
2396 | unsigned *in_idx, AVFrame *frame) | ||
2397 | { | ||
2398 | SchFilterGraph *fg; | ||
2399 | |||
2400 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 388807 times.
|
388807 | av_assert0(fg_idx < sch->nb_filters); |
2401 | 388807 | fg = &sch->filters[fg_idx]; | |
2402 | |||
2403 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 388807 times.
|
388807 | av_assert0(*in_idx <= fg->nb_inputs); |
2404 | |||
2405 | // update scheduling to account for desired input stream, if it changed | ||
2406 | // | ||
2407 | // this check needs no locking because only the filtering thread | ||
2408 | // updates this value | ||
2409 |
2/2✓ Branch 0 taken 916 times.
✓ Branch 1 taken 387891 times.
|
388807 | if (*in_idx != fg->best_input) { |
2410 | 916 | pthread_mutex_lock(&sch->schedule_lock); | |
2411 | |||
2412 | 916 | fg->best_input = *in_idx; | |
2413 | 916 | schedule_update_locked(sch); | |
2414 | |||
2415 | 916 | pthread_mutex_unlock(&sch->schedule_lock); | |
2416 | } | ||
2417 | |||
2418 |
2/2✓ Branch 0 taken 367804 times.
✓ Branch 1 taken 21003 times.
|
388807 | if (*in_idx == fg->nb_inputs) { |
2419 | 21003 | int terminate = waiter_wait(sch, &fg->waiter); | |
2420 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 21003 times.
|
21003 | return terminate ? AVERROR_EOF : AVERROR(EAGAIN); |
2421 | } | ||
2422 | |||
2423 | 26 | while (1) { | |
2424 | int ret, idx; | ||
2425 | |||
2426 | 367830 | ret = tq_receive(fg->queue, &idx, frame); | |
2427 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 367824 times.
|
367830 | if (idx < 0) |
2428 | 367804 | return AVERROR_EOF; | |
2429 |
2/2✓ Branch 0 taken 367798 times.
✓ Branch 1 taken 26 times.
|
367824 | else if (ret >= 0) { |
2430 | 367798 | *in_idx = idx; | |
2431 | 367798 | return 0; | |
2432 | } | ||
2433 | |||
2434 | // disregard EOFs for specific streams - they should always be | ||
2435 | // preceded by an EOF frame | ||
2436 | } | ||
2437 | } | ||
2438 | |||
2439 | 1 | void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx) | |
2440 | { | ||
2441 | SchFilterGraph *fg; | ||
2442 | SchFilterIn *fi; | ||
2443 | |||
2444 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(fg_idx < sch->nb_filters); |
2445 | 1 | fg = &sch->filters[fg_idx]; | |
2446 | |||
2447 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | av_assert0(in_idx < fg->nb_inputs); |
2448 | 1 | fi = &fg->inputs[in_idx]; | |
2449 | |||
2450 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (!fi->receive_finished) { |
2451 | 1 | fi->receive_finished = 1; | |
2452 | 1 | tq_receive_finish(fg->queue, in_idx); | |
2453 | |||
2454 | // close the control stream when all actual inputs are done | ||
2455 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (++fg->nb_inputs_finished_receive == fg->nb_inputs) |
2456 | ✗ | tq_receive_finish(fg->queue, fg->nb_inputs); | |
2457 | } | ||
2458 | 1 | } | |
2459 | |||
2460 | 389245 | int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame) | |
2461 | { | ||
2462 | SchFilterGraph *fg; | ||
2463 | SchedulerNode dst; | ||
2464 | |||
2465 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 389245 times.
|
389245 | av_assert0(fg_idx < sch->nb_filters); |
2466 | 389245 | fg = &sch->filters[fg_idx]; | |
2467 | |||
2468 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 389245 times.
|
389245 | av_assert0(out_idx < fg->nb_outputs); |
2469 | 389245 | dst = fg->outputs[out_idx].dst; | |
2470 | |||
2471 | 389245 | return (dst.type == SCH_NODE_TYPE_ENC) ? | |
2472 |
1/2✓ Branch 0 taken 389245 times.
✗ Branch 1 not taken.
|
389245 | send_to_enc (sch, &sch->enc[dst.idx], frame) : |
2473 | ✗ | send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, frame); | |
2474 | } | ||
2475 | |||
2476 | 7541 | static int filter_done(Scheduler *sch, unsigned fg_idx) | |
2477 | { | ||
2478 | 7541 | SchFilterGraph *fg = &sch->filters[fg_idx]; | |
2479 | 7541 | int ret = 0; | |
2480 | |||
2481 |
2/2✓ Branch 0 taken 14327 times.
✓ Branch 1 taken 7541 times.
|
21868 | for (unsigned i = 0; i <= fg->nb_inputs; i++) |
2482 | 14327 | tq_receive_finish(fg->queue, i); | |
2483 | |||
2484 |
2/2✓ Branch 0 taken 7671 times.
✓ Branch 1 taken 7541 times.
|
15212 | for (unsigned i = 0; i < fg->nb_outputs; i++) { |
2485 | 7671 | SchedulerNode dst = fg->outputs[i].dst; | |
2486 | 15342 | int err = (dst.type == SCH_NODE_TYPE_ENC) ? | |
2487 |
1/2✓ Branch 0 taken 7671 times.
✗ Branch 1 not taken.
|
7671 | send_to_enc (sch, &sch->enc[dst.idx], NULL) : |
2488 | ✗ | send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, NULL); | |
2489 | |||
2490 |
3/4✓ Branch 0 taken 3106 times.
✓ Branch 1 taken 4565 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 3106 times.
|
7671 | if (err < 0 && err != AVERROR_EOF) |
2491 | ✗ | ret = err_merge(ret, err); | |
2492 | } | ||
2493 | |||
2494 | 7541 | pthread_mutex_lock(&sch->schedule_lock); | |
2495 | |||
2496 | 7541 | fg->task_exited = 1; | |
2497 | |||
2498 | 7541 | schedule_update_locked(sch); | |
2499 | |||
2500 | 7541 | pthread_mutex_unlock(&sch->schedule_lock); | |
2501 | |||
2502 | 7541 | return ret; | |
2503 | } | ||
2504 | |||
2505 | ✗ | int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame) | |
2506 | { | ||
2507 | SchFilterGraph *fg; | ||
2508 | |||
2509 | ✗ | av_assert0(fg_idx < sch->nb_filters); | |
2510 | ✗ | fg = &sch->filters[fg_idx]; | |
2511 | |||
2512 | ✗ | return send_to_filter(sch, fg, fg->nb_inputs, frame); | |
2513 | } | ||
2514 | |||
2515 | 37104 | static int task_cleanup(Scheduler *sch, SchedulerNode node) | |
2516 | { | ||
2517 |
5/6✓ Branch 0 taken 7172 times.
✓ Branch 1 taken 7925 times.
✓ Branch 2 taken 6757 times.
✓ Branch 3 taken 7709 times.
✓ Branch 4 taken 7541 times.
✗ Branch 5 not taken.
|
37104 | switch (node.type) { |
2518 | 7172 | case SCH_NODE_TYPE_DEMUX: return demux_done (sch, node.idx); | |
2519 | 7925 | case SCH_NODE_TYPE_MUX: return mux_done (sch, node.idx); | |
2520 | 6757 | case SCH_NODE_TYPE_DEC: return dec_done (sch, node.idx); | |
2521 | 7709 | case SCH_NODE_TYPE_ENC: return enc_done (sch, node.idx); | |
2522 | 7541 | case SCH_NODE_TYPE_FILTER_IN: return filter_done(sch, node.idx); | |
2523 | ✗ | default: av_assert0(0); | |
2524 | } | ||
2525 | } | ||
2526 | |||
2527 | 37090 | static void *task_wrapper(void *arg) | |
2528 | { | ||
2529 | 37090 | SchTask *task = arg; | |
2530 | 37090 | Scheduler *sch = task->parent; | |
2531 | int ret; | ||
2532 | 37090 | int err = 0; | |
2533 | |||
2534 | 37090 | ret = task->func(task->func_arg); | |
2535 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 37089 times.
|
37090 | if (ret < 0) |
2536 | 1 | av_log(task->func_arg, AV_LOG_ERROR, | |
2537 | 1 | "Task finished with error code: %d (%s)\n", ret, av_err2str(ret)); | |
2538 | |||
2539 | 37090 | err = task_cleanup(sch, task->node); | |
2540 | 37090 | ret = err_merge(ret, err); | |
2541 | |||
2542 | // EOF is considered normal termination | ||
2543 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 37090 times.
|
37090 | if (ret == AVERROR_EOF) |
2544 | ✗ | ret = 0; | |
2545 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 37089 times.
|
37090 | if (ret < 0) { |
2546 | 1 | pthread_mutex_lock(&sch->finish_lock); | |
2547 | 1 | sch->task_failed = 1; | |
2548 | 1 | pthread_cond_signal(&sch->finish_cond); | |
2549 | 1 | pthread_mutex_unlock(&sch->finish_lock); | |
2550 | } | ||
2551 | |||
2552 |
4/4✓ Branch 0 taken 1 times.
✓ Branch 1 taken 37089 times.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 37089 times.
|
37091 | av_log(task->func_arg, ret < 0 ? AV_LOG_ERROR : AV_LOG_VERBOSE, |
2553 | "Terminating thread with return code %d (%s)\n", ret, | ||
2554 | 1 | ret < 0 ? av_err2str(ret) : "success"); | |
2555 | |||
2556 | 37090 | return (void*)(intptr_t)ret; | |
2557 | } | ||
2558 | |||
2559 | 37104 | static int task_stop(Scheduler *sch, SchTask *task) | |
2560 | { | ||
2561 | int ret; | ||
2562 | void *thread_ret; | ||
2563 | |||
2564 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 37090 times.
|
37104 | if (!task->thread_running) |
2565 | 14 | return task_cleanup(sch, task->node); | |
2566 | |||
2567 | 37090 | ret = pthread_join(task->thread, &thread_ret); | |
2568 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 37090 times.
|
37090 | av_assert0(ret == 0); |
2569 | |||
2570 | 37090 | task->thread_running = 0; | |
2571 | |||
2572 | 37090 | return (intptr_t)thread_ret; | |
2573 | } | ||
2574 | |||
2575 | 15847 | int sch_stop(Scheduler *sch, int64_t *finish_ts) | |
2576 | { | ||
2577 | 15847 | int ret = 0, err; | |
2578 | |||
2579 |
2/2✓ Branch 0 taken 7924 times.
✓ Branch 1 taken 7923 times.
|
15847 | if (sch->state != SCH_STATE_STARTED) |
2580 | 7924 | return 0; | |
2581 | |||
2582 | 7923 | atomic_store(&sch->terminate, 1); | |
2583 | |||
2584 |
2/2✓ Branch 0 taken 15846 times.
✓ Branch 1 taken 7923 times.
|
23769 | for (unsigned type = 0; type < 2; type++) |
2585 |
4/4✓ Branch 0 taken 15095 times.
✓ Branch 1 taken 15464 times.
✓ Branch 2 taken 14713 times.
✓ Branch 3 taken 15846 times.
|
30559 | for (unsigned i = 0; i < (type ? sch->nb_demux : sch->nb_filters); i++) { |
2586 |
2/2✓ Branch 0 taken 7172 times.
✓ Branch 1 taken 7541 times.
|
14713 | SchWaiter *w = type ? &sch->demux[i].waiter : &sch->filters[i].waiter; |
2587 | 14713 | waiter_set(w, 1); | |
2588 | } | ||
2589 | |||
2590 |
2/2✓ Branch 0 taken 7172 times.
✓ Branch 1 taken 7923 times.
|
15095 | for (unsigned i = 0; i < sch->nb_demux; i++) { |
2591 | 7172 | SchDemux *d = &sch->demux[i]; | |
2592 | |||
2593 | 7172 | err = task_stop(sch, &d->task); | |
2594 | 7172 | ret = err_merge(ret, err); | |
2595 | } | ||
2596 | |||
2597 |
2/2✓ Branch 0 taken 6757 times.
✓ Branch 1 taken 7923 times.
|
14680 | for (unsigned i = 0; i < sch->nb_dec; i++) { |
2598 | 6757 | SchDec *dec = &sch->dec[i]; | |
2599 | |||
2600 | 6757 | err = task_stop(sch, &dec->task); | |
2601 | 6757 | ret = err_merge(ret, err); | |
2602 | } | ||
2603 | |||
2604 |
2/2✓ Branch 0 taken 7541 times.
✓ Branch 1 taken 7923 times.
|
15464 | for (unsigned i = 0; i < sch->nb_filters; i++) { |
2605 | 7541 | SchFilterGraph *fg = &sch->filters[i]; | |
2606 | |||
2607 | 7541 | err = task_stop(sch, &fg->task); | |
2608 | 7541 | ret = err_merge(ret, err); | |
2609 | } | ||
2610 | |||
2611 |
2/2✓ Branch 0 taken 7709 times.
✓ Branch 1 taken 7923 times.
|
15632 | for (unsigned i = 0; i < sch->nb_enc; i++) { |
2612 | 7709 | SchEnc *enc = &sch->enc[i]; | |
2613 | |||
2614 | 7709 | err = task_stop(sch, &enc->task); | |
2615 | 7709 | ret = err_merge(ret, err); | |
2616 | } | ||
2617 | |||
2618 |
2/2✓ Branch 0 taken 7925 times.
✓ Branch 1 taken 7923 times.
|
15848 | for (unsigned i = 0; i < sch->nb_mux; i++) { |
2619 | 7925 | SchMux *mux = &sch->mux[i]; | |
2620 | |||
2621 | 7925 | err = task_stop(sch, &mux->task); | |
2622 | 7925 | ret = err_merge(ret, err); | |
2623 | } | ||
2624 | |||
2625 |
1/2✓ Branch 0 taken 7923 times.
✗ Branch 1 not taken.
|
7923 | if (finish_ts) |
2626 | 7923 | *finish_ts = trailing_dts(sch, 1); | |
2627 | |||
2628 | 7923 | sch->state = SCH_STATE_STOPPED; | |
2629 | |||
2630 | 7923 | return ret; | |
2631 | } | ||
2632 |