GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavcodec/x86/mathops.h Lines: 12 12 100.0 %
Date: 2020-08-14 10:39:37 Branches: 0 0 - %

Line Branch Exec Source
1
/*
2
 * simple math operations
3
 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21
22
#ifndef AVCODEC_X86_MATHOPS_H
23
#define AVCODEC_X86_MATHOPS_H
24
25
#include "config.h"
26
27
#include "libavutil/common.h"
28
#include "libavutil/x86/asm.h"
29
30
#if HAVE_INLINE_ASM
31
32
#if ARCH_X86_32
33
34
#define MULL MULL
35
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
36
{
37
    int rt, dummy;
38
    __asm__ (
39
        "imull %3               \n\t"
40
        "shrdl %4, %%edx, %%eax \n\t"
41
        :"=a"(rt), "=d"(dummy)
42
        :"a"(a), "rm"(b), "ci"((uint8_t)shift)
43
    );
44
    return rt;
45
}
46
47
#define MULH MULH
48
static av_always_inline av_const int MULH(int a, int b)
49
{
50
    int rt, dummy;
51
    __asm__ (
52
        "imull %3"
53
        :"=d"(rt), "=a"(dummy)
54
        :"a"(a), "rm"(b)
55
    );
56
    return rt;
57
}
58
59
#define MUL64 MUL64
60
static av_always_inline av_const int64_t MUL64(int a, int b)
61
{
62
    int64_t rt;
63
    __asm__ (
64
        "imull %2"
65
        :"=A"(rt)
66
        :"a"(a), "rm"(b)
67
    );
68
    return rt;
69
}
70
71
#endif /* ARCH_X86_32 */
72
73
#if HAVE_I686
74
/* median of 3 */
75
#define mid_pred mid_pred
76
754545018
static inline av_const int mid_pred(int a, int b, int c)
77
{
78
754545018
    int i=b;
79
754545018
    __asm__ (
80
        "cmp    %2, %1 \n\t"
81
        "cmovg  %1, %0 \n\t"
82
        "cmovg  %2, %1 \n\t"
83
        "cmp    %3, %1 \n\t"
84
        "cmovl  %3, %1 \n\t"
85
        "cmp    %1, %0 \n\t"
86
        "cmovg  %1, %0 \n\t"
87
        :"+&r"(i), "+&r"(a)
88
        :"r"(b), "r"(c)
89
    );
90
754545018
    return i;
91
}
92
93
#if HAVE_6REGS
94
#define COPY3_IF_LT(x, y, a, b, c, d)\
95
__asm__ volatile(\
96
    "cmpl  %0, %3       \n\t"\
97
    "cmovl %3, %0       \n\t"\
98
    "cmovl %4, %1       \n\t"\
99
    "cmovl %5, %2       \n\t"\
100
    : "+&r" (x), "+&r" (a), "+r" (c)\
101
    : "r" (y), "r" (b), "r" (d)\
102
);
103
#endif /* HAVE_6REGS */
104
105
#endif /* HAVE_I686 */
106
107
#define MASK_ABS(mask, level)                   \
108
    __asm__ ("cdq                    \n\t"      \
109
             "xorl %1, %0            \n\t"      \
110
             "subl %1, %0            \n\t"      \
111
             : "+a"(level), "=&d"(mask))
112
113
// avoid +32 for shift optimization (gcc should do that ...)
114
#define NEG_SSR32 NEG_SSR32
115
387960547
static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
116
387960547
    __asm__ ("sarl %1, %0\n\t"
117
         : "+r" (a)
118
387960547
         : "ic" ((uint8_t)(-s))
119
    );
120
387960547
    return a;
121
}
122
123
#define NEG_USR32 NEG_USR32
124
1830337422
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
125
1830337422
    __asm__ ("shrl %1, %0\n\t"
126
         : "+r" (a)
127
1830337422
         : "ic" ((uint8_t)(-s))
128
    );
129
1830337422
    return a;
130
}
131
132
#endif /* HAVE_INLINE_ASM */
133
#endif /* AVCODEC_X86_MATHOPS_H */