FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/refstruct.c
Date: 2024-07-26 21:54:09
Exec Total Coverage
Lines: 150 163 92.0%
Functions: 20 20 100.0%
Branches: 46 58 79.3%

Line Branch Exec Source
1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <stdatomic.h>
20 #include <stdint.h>
21 #include <string.h>
22
23 #include "refstruct.h"
24
25 #include "libavutil/avassert.h"
26 #include "libavutil/error.h"
27 #include "libavutil/macros.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/mem_internal.h"
30 #include "libavutil/thread.h"
31
32 #ifndef REFSTRUCT_CHECKED
33 #ifndef ASSERT_LEVEL
34 #define ASSERT_LEVEL 0
35 #endif
36 #define REFSTRUCT_CHECKED (ASSERT_LEVEL >= 1)
37 #endif
38
39 #if REFSTRUCT_CHECKED
40 #define ff_assert(cond) av_assert0(cond)
41 #else
42 #define ff_assert(cond) ((void)0)
43 #endif
44
45 #define REFSTRUCT_COOKIE AV_NE((uint64_t)MKBETAG('R', 'e', 'f', 'S') << 32 | MKBETAG('t', 'r', 'u', 'c'), \
46 MKTAG('R', 'e', 'f', 'S') | (uint64_t)MKTAG('t', 'r', 'u', 'c') << 32)
47
48 #if __STDC_VERSION__ >= 201112L && !defined(_MSC_VER)
49 #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), FFMAX(ALIGN_64, _Alignof(max_align_t)))
50 #else
51 #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), ALIGN_64)
52 #endif
53
54 typedef struct RefCount {
55 /**
56 * An uintptr_t is big enough to hold the address of every reference,
57 * so no overflow can happen when incrementing the refcount as long as
58 * the user does not throw away references.
59 */
60 atomic_uintptr_t refcount;
61 FFRefStructOpaque opaque;
62 void (*free_cb)(FFRefStructOpaque opaque, void *obj);
63 void (*free)(void *ref);
64
65 #if REFSTRUCT_CHECKED
66 uint64_t cookie;
67 #endif
68 } RefCount;
69
70 3442381 static RefCount *get_refcount(void *obj)
71 {
72 3442381 RefCount *ref = (RefCount*)((char*)obj - REFCOUNT_OFFSET);
73 ff_assert(ref->cookie == REFSTRUCT_COOKIE);
74 3442381 return ref;
75 }
76
77 307 static const RefCount *cget_refcount(const void *obj)
78 {
79 307 const RefCount *ref = (const RefCount*)((const char*)obj - REFCOUNT_OFFSET);
80 ff_assert(ref->cookie == REFSTRUCT_COOKIE);
81 307 return ref;
82 }
83
84 2525999 static void *get_userdata(void *buf)
85 {
86 2525999 return (char*)buf + REFCOUNT_OFFSET;
87 }
88
89 269404 static void refcount_init(RefCount *ref, FFRefStructOpaque opaque,
90 void (*free_cb)(FFRefStructOpaque opaque, void *obj))
91 {
92 269404 atomic_init(&ref->refcount, 1);
93 269404 ref->opaque = opaque;
94 269404 ref->free_cb = free_cb;
95 269404 ref->free = av_free;
96
97 #if REFSTRUCT_CHECKED
98 ref->cookie = REFSTRUCT_COOKIE;
99 #endif
100 269404 }
101
102 269404 void *ff_refstruct_alloc_ext_c(size_t size, unsigned flags, FFRefStructOpaque opaque,
103 void (*free_cb)(FFRefStructOpaque opaque, void *obj))
104 {
105 void *buf, *obj;
106
107
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 269404 times.
269404 if (size > SIZE_MAX - REFCOUNT_OFFSET)
108 return NULL;
109 269404 buf = av_malloc(size + REFCOUNT_OFFSET);
110
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 269404 times.
269404 if (!buf)
111 return NULL;
112 269404 refcount_init(buf, opaque, free_cb);
113 269404 obj = get_userdata(buf);
114
2/2
✓ Branch 0 taken 260189 times.
✓ Branch 1 taken 9215 times.
269404 if (!(flags & FF_REFSTRUCT_FLAG_NO_ZEROING))
115 260189 memset(obj, 0, size);
116
117 269404 return obj;
118 }
119
120 5304277 void ff_refstruct_unref(void *objp)
121 {
122 void *obj;
123 RefCount *ref;
124
125 5304277 memcpy(&obj, objp, sizeof(obj));
126
2/2
✓ Branch 0 taken 2416769 times.
✓ Branch 1 taken 2887508 times.
5304277 if (!obj)
127 2416769 return;
128 2887508 memcpy(objp, &(void *){ NULL }, sizeof(obj));
129
130 2887508 ref = get_refcount(obj);
131
2/2
✓ Branch 0 taken 2511960 times.
✓ Branch 1 taken 375548 times.
2887508 if (atomic_fetch_sub_explicit(&ref->refcount, 1, memory_order_acq_rel) == 1) {
132
2/2
✓ Branch 0 taken 127098 times.
✓ Branch 1 taken 2384862 times.
2511960 if (ref->free_cb)
133 127098 ref->free_cb(ref->opaque, obj);
134 2511960 ref->free(ref);
135 }
136
137 2887508 return;
138 }
139
140 40386 void *ff_refstruct_ref(void *obj)
141 {
142 40386 RefCount *ref = get_refcount(obj);
143
144 40386 atomic_fetch_add_explicit(&ref->refcount, 1, memory_order_relaxed);
145
146 40386 return obj;
147 }
148
149 335162 const void *ff_refstruct_ref_c(const void *obj)
150 {
151 /* Casting const away here is fine, as it is only supposed
152 * to apply to the user's data and not our bookkeeping data. */
153 335162 RefCount *ref = get_refcount((void*)obj);
154
155 335162 atomic_fetch_add_explicit(&ref->refcount, 1, memory_order_relaxed);
156
157 335162 return obj;
158 }
159
160 431638 void ff_refstruct_replace(void *dstp, const void *src)
161 {
162 const void *dst;
163 431638 memcpy(&dst, dstp, sizeof(dst));
164
165
2/2
✓ Branch 0 taken 142189 times.
✓ Branch 1 taken 289449 times.
431638 if (src == dst)
166 142189 return;
167 289449 ff_refstruct_unref(dstp);
168
1/2
✓ Branch 0 taken 289449 times.
✗ Branch 1 not taken.
289449 if (src) {
169 289449 dst = ff_refstruct_ref_c(src);
170 289449 memcpy(dstp, &dst, sizeof(dst));
171 }
172 }
173
174 307 int ff_refstruct_exclusive(const void *obj)
175 {
176 307 const RefCount *ref = cget_refcount(obj);
177 /* Casting const away here is safe, because it is a load.
178 * It is necessary because atomic_load_explicit() does not
179 * accept const atomics in C11 (see also N1807). */
180 307 return atomic_load_explicit((atomic_uintptr_t*)&ref->refcount, memory_order_acquire) == 1;
181 }
182
183 struct FFRefStructPool {
184 size_t size;
185 FFRefStructOpaque opaque;
186 int (*init_cb)(FFRefStructOpaque opaque, void *obj);
187 void (*reset_cb)(FFRefStructOpaque opaque, void *obj);
188 void (*free_entry_cb)(FFRefStructOpaque opaque, void *obj);
189 void (*free_cb)(FFRefStructOpaque opaque);
190
191 int uninited;
192 unsigned entry_flags;
193 unsigned pool_flags;
194
195 /** The number of outstanding entries not in available_entries. */
196 atomic_uintptr_t refcount;
197 /**
198 * This is a linked list of available entries;
199 * the RefCount's opaque pointer is used as next pointer
200 * for available entries.
201 * While the entries are in use, the opaque is a pointer
202 * to the corresponding FFRefStructPool.
203 */
204 RefCount *available_entries;
205 AVMutex mutex;
206 };
207
208 10133 static void pool_free(FFRefStructPool *pool)
209 {
210 10133 ff_mutex_destroy(&pool->mutex);
211
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10133 times.
10133 if (pool->free_cb)
212 pool->free_cb(pool->opaque);
213 10133 av_free(get_refcount(pool));
214 10133 }
215
216 159059 static void pool_free_entry(FFRefStructPool *pool, RefCount *ref)
217 {
218
2/2
✓ Branch 0 taken 3906 times.
✓ Branch 1 taken 155153 times.
159059 if (pool->free_entry_cb)
219 3906 pool->free_entry_cb(pool->opaque, get_userdata(ref));
220 159059 av_free(ref);
221 159059 }
222
223 2401615 static void pool_return_entry(void *ref_)
224 {
225 2401615 RefCount *ref = ref_;
226 2401615 FFRefStructPool *pool = ref->opaque.nc;
227
228 2401615 ff_mutex_lock(&pool->mutex);
229
2/2
✓ Branch 0 taken 2381695 times.
✓ Branch 1 taken 19920 times.
2401615 if (!pool->uninited) {
230 2381695 ref->opaque.nc = pool->available_entries;
231 2381695 pool->available_entries = ref;
232 2381695 ref = NULL;
233 }
234 2401615 ff_mutex_unlock(&pool->mutex);
235
236
2/2
✓ Branch 0 taken 19920 times.
✓ Branch 1 taken 2381695 times.
2401615 if (ref)
237 19920 pool_free_entry(pool, ref);
238
239
2/2
✓ Branch 0 taken 5318 times.
✓ Branch 1 taken 2396297 times.
2401615 if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
240 5318 pool_free(pool);
241 2401615 }
242
243 37162 static void pool_reset_entry(FFRefStructOpaque opaque, void *entry)
244 {
245 37162 FFRefStructPool *pool = opaque.nc;
246
247 37162 pool->reset_cb(pool->opaque, entry);
248 37162 }
249
250 2401615 static int refstruct_pool_get_ext(void *datap, FFRefStructPool *pool)
251 {
252 2401615 void *ret = NULL;
253
254 2401615 memcpy(datap, &(void *){ NULL }, sizeof(void*));
255
256 2401615 ff_mutex_lock(&pool->mutex);
257 ff_assert(!pool->uninited);
258
2/2
✓ Branch 0 taken 2242556 times.
✓ Branch 1 taken 159059 times.
2401615 if (pool->available_entries) {
259 2242556 RefCount *ref = pool->available_entries;
260 2242556 ret = get_userdata(ref);
261 2242556 pool->available_entries = ref->opaque.nc;
262 2242556 ref->opaque.nc = pool;
263 2242556 atomic_init(&ref->refcount, 1);
264 }
265 2401615 ff_mutex_unlock(&pool->mutex);
266
267
2/2
✓ Branch 0 taken 159059 times.
✓ Branch 1 taken 2242556 times.
2401615 if (!ret) {
268 RefCount *ref;
269 159059 ret = ff_refstruct_alloc_ext(pool->size, pool->entry_flags, pool,
270
2/2
✓ Branch 0 taken 3906 times.
✓ Branch 1 taken 155153 times.
159059 pool->reset_cb ? pool_reset_entry : NULL);
271
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 159059 times.
159059 if (!ret)
272 return AVERROR(ENOMEM);
273 159059 ref = get_refcount(ret);
274 159059 ref->free = pool_return_entry;
275
2/2
✓ Branch 0 taken 3906 times.
✓ Branch 1 taken 155153 times.
159059 if (pool->init_cb) {
276 3906 int err = pool->init_cb(pool->opaque, ret);
277
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3906 times.
3906 if (err < 0) {
278 if (pool->pool_flags & FF_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR)
279 pool->reset_cb(pool->opaque, ret);
280 if (pool->pool_flags & FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR)
281 pool->free_entry_cb(pool->opaque, ret);
282 av_free(ref);
283 return err;
284 }
285 }
286 }
287 2401615 atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed);
288
289
2/2
✓ Branch 0 taken 41138 times.
✓ Branch 1 taken 2360477 times.
2401615 if (pool->pool_flags & FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME)
290 41138 memset(ret, 0, pool->size);
291
292 2401615 memcpy(datap, &ret, sizeof(ret));
293
294 2401615 return 0;
295 }
296
297 2401615 void *ff_refstruct_pool_get(FFRefStructPool *pool)
298 {
299 void *ret;
300 2401615 refstruct_pool_get_ext(&ret, pool);
301 2401615 return ret;
302 }
303
304 /**
305 * Hint: The content of pool_unref() and refstruct_pool_uninit()
306 * could currently be merged; they are only separate functions
307 * in case we would ever introduce weak references.
308 */
309 10133 static void pool_unref(void *ref)
310 {
311 10133 FFRefStructPool *pool = get_userdata(ref);
312
2/2
✓ Branch 0 taken 4815 times.
✓ Branch 1 taken 5318 times.
10133 if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
313 4815 pool_free(pool);
314 10133 }
315
316 10133 static void refstruct_pool_uninit(FFRefStructOpaque unused, void *obj)
317 {
318 10133 FFRefStructPool *pool = obj;
319 RefCount *entry;
320
321 10133 ff_mutex_lock(&pool->mutex);
322 ff_assert(!pool->uninited);
323 10133 pool->uninited = 1;
324 10133 entry = pool->available_entries;
325 10133 pool->available_entries = NULL;
326 10133 ff_mutex_unlock(&pool->mutex);
327
328
2/2
✓ Branch 0 taken 139139 times.
✓ Branch 1 taken 10133 times.
149272 while (entry) {
329 139139 void *next = entry->opaque.nc;
330 139139 pool_free_entry(pool, entry);
331 139139 entry = next;
332 }
333 10133 }
334
335 8031 FFRefStructPool *ff_refstruct_pool_alloc(size_t size, unsigned flags)
336 {
337 8031 return ff_refstruct_pool_alloc_ext(size, flags, NULL, NULL, NULL, NULL, NULL);
338 }
339
340 10133 FFRefStructPool *ff_refstruct_pool_alloc_ext_c(size_t size, unsigned flags,
341 FFRefStructOpaque opaque,
342 int (*init_cb)(FFRefStructOpaque opaque, void *obj),
343 void (*reset_cb)(FFRefStructOpaque opaque, void *obj),
344 void (*free_entry_cb)(FFRefStructOpaque opaque, void *obj),
345 void (*free_cb)(FFRefStructOpaque opaque))
346 {
347 10133 FFRefStructPool *pool = ff_refstruct_alloc_ext(sizeof(*pool), 0, NULL,
348 refstruct_pool_uninit);
349 int err;
350
351
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10133 times.
10133 if (!pool)
352 return NULL;
353 10133 get_refcount(pool)->free = pool_unref;
354
355 10133 pool->size = size;
356 10133 pool->opaque = opaque;
357 10133 pool->init_cb = init_cb;
358 10133 pool->reset_cb = reset_cb;
359 10133 pool->free_entry_cb = free_entry_cb;
360 10133 pool->free_cb = free_cb;
361 #define COMMON_FLAGS FF_REFSTRUCT_POOL_FLAG_NO_ZEROING
362 10133 pool->entry_flags = flags & COMMON_FLAGS;
363 // Filter out nonsense combinations to avoid checks later.
364
2/2
✓ Branch 0 taken 8031 times.
✓ Branch 1 taken 2102 times.
10133 if (!pool->reset_cb)
365 8031 flags &= ~FF_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR;
366
2/2
✓ Branch 0 taken 8031 times.
✓ Branch 1 taken 2102 times.
10133 if (!pool->free_entry_cb)
367 8031 flags &= ~FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR;
368 10133 pool->pool_flags = flags;
369
370
2/2
✓ Branch 0 taken 1469 times.
✓ Branch 1 taken 8664 times.
10133 if (flags & FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME) {
371 // We will zero the buffer before every use, so zeroing
372 // upon allocating the buffer is unnecessary.
373 1469 pool->entry_flags |= FF_REFSTRUCT_FLAG_NO_ZEROING;
374 }
375
376 10133 atomic_init(&pool->refcount, 1);
377
378 10133 err = ff_mutex_init(&pool->mutex, NULL);
379
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10133 times.
10133 if (err) {
380 // Don't call ff_refstruct_uninit() on pool, as it hasn't been properly
381 // set up and is just a POD right now.
382 av_free(get_refcount(pool));
383 return NULL;
384 }
385 10133 return pool;
386 }
387