LCOV - code coverage report
Current view: top level - src/libavcodec/x86 - ac3dsp_init.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 35 62 56.5 %
Date: 2017-01-21 09:32:20 Functions: 1 2 50.0 %

          Line data    Source code
       1             : /*
       2             :  * x86-optimized AC-3 DSP functions
       3             :  * Copyright (c) 2011 Justin Ruggles
       4             :  *
       5             :  * This file is part of FFmpeg.
       6             :  *
       7             :  * FFmpeg is free software; you can redistribute it and/or
       8             :  * modify it under the terms of the GNU Lesser General Public
       9             :  * License as published by the Free Software Foundation; either
      10             :  * version 2.1 of the License, or (at your option) any later version.
      11             :  *
      12             :  * FFmpeg is distributed in the hope that it will be useful,
      13             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      14             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      15             :  * Lesser General Public License for more details.
      16             :  *
      17             :  * You should have received a copy of the GNU Lesser General Public
      18             :  * License along with FFmpeg; if not, write to the Free Software
      19             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
      20             :  */
      21             : 
      22             : #include "libavutil/attributes.h"
      23             : #include "libavutil/mem.h"
      24             : #include "libavutil/x86/asm.h"
      25             : #include "libavutil/x86/cpu.h"
      26             : #include "libavcodec/ac3.h"
      27             : #include "libavcodec/ac3dsp.h"
      28             : 
      29             : void ff_ac3_exponent_min_mmx   (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
      30             : void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
      31             : void ff_ac3_exponent_min_sse2  (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
      32             : 
      33             : int ff_ac3_max_msb_abs_int16_mmx  (const int16_t *src, int len);
      34             : int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
      35             : int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
      36             : int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
      37             : 
      38             : void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
      39             : void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
      40             : 
      41             : void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
      42             : void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
      43             : 
      44             : void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
      45             : void ff_float_to_fixed24_sse  (int32_t *dst, const float *src, unsigned int len);
      46             : void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
      47             : 
      48             : int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
      49             : 
      50             : void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
      51             : void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
      52             : 
      53             : void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
      54             :                                         const int16_t *window, unsigned int len);
      55             : void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
      56             :                                       const int16_t *window, unsigned int len);
      57             : void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
      58             :                                   const int16_t *window, unsigned int len);
      59             : void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
      60             :                                 const int16_t *window, unsigned int len);
      61             : void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
      62             :                                  const int16_t *window, unsigned int len);
      63             : void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
      64             :                                       const int16_t *window, unsigned int len);
      65             : 
      66             : #if ARCH_X86_32 && defined(__INTEL_COMPILER)
      67             : #       undef HAVE_7REGS
      68             : #       define HAVE_7REGS 0
      69             : #endif
      70             : 
      71             : #if HAVE_SSE_INLINE && HAVE_7REGS
      72             : 
      73             : #define IF1(x) x
      74             : #define IF0(x)
      75             : 
      76             : #define MIX5(mono, stereo)                                      \
      77             :     __asm__ volatile (                                          \
      78             :         "movss           0(%1), %%xmm5          \n"             \
      79             :         "movss           8(%1), %%xmm6          \n"             \
      80             :         "movss          24(%1), %%xmm7          \n"             \
      81             :         "shufps     $0, %%xmm5, %%xmm5          \n"             \
      82             :         "shufps     $0, %%xmm6, %%xmm6          \n"             \
      83             :         "shufps     $0, %%xmm7, %%xmm7          \n"             \
      84             :         "1:                                     \n"             \
      85             :         "movaps       (%0, %2), %%xmm0          \n"             \
      86             :         "movaps       (%0, %3), %%xmm1          \n"             \
      87             :         "movaps       (%0, %4), %%xmm2          \n"             \
      88             :         "movaps       (%0, %5), %%xmm3          \n"             \
      89             :         "movaps       (%0, %6), %%xmm4          \n"             \
      90             :         "mulps          %%xmm5, %%xmm0          \n"             \
      91             :         "mulps          %%xmm6, %%xmm1          \n"             \
      92             :         "mulps          %%xmm5, %%xmm2          \n"             \
      93             :         "mulps          %%xmm7, %%xmm3          \n"             \
      94             :         "mulps          %%xmm7, %%xmm4          \n"             \
      95             :  stereo("addps          %%xmm1, %%xmm0          \n")            \
      96             :         "addps          %%xmm1, %%xmm2          \n"             \
      97             :         "addps          %%xmm3, %%xmm0          \n"             \
      98             :         "addps          %%xmm4, %%xmm2          \n"             \
      99             :    mono("addps          %%xmm2, %%xmm0          \n")            \
     100             :         "movaps         %%xmm0, (%0, %2)        \n"             \
     101             :  stereo("movaps         %%xmm2, (%0, %3)        \n")            \
     102             :         "add               $16, %0              \n"             \
     103             :         "jl                 1b                  \n"             \
     104             :         : "+&r"(i)                                              \
     105             :         : "r"(matrix),                                          \
     106             :           "r"(samples[0] + len),                                \
     107             :           "r"(samples[1] + len),                                \
     108             :           "r"(samples[2] + len),                                \
     109             :           "r"(samples[3] + len),                                \
     110             :           "r"(samples[4] + len)                                 \
     111             :         : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",      \
     112             :                       "%xmm4", "%xmm5", "%xmm6", "%xmm7",)      \
     113             :          "memory"                                               \
     114             :     );
     115             : 
     116             : #define MIX_MISC(stereo)                                        \
     117             :     __asm__ volatile (                                          \
     118             :         "mov              %5, %2            \n"                 \
     119             :         "1:                                 \n"                 \
     120             :         "mov -%c7(%6, %2, %c8), %3          \n"                 \
     121             :         "movaps     (%3, %0), %%xmm0        \n"                 \
     122             :  stereo("movaps       %%xmm0, %%xmm1        \n")                \
     123             :         "mulps        %%xmm4, %%xmm0        \n"                 \
     124             :  stereo("mulps        %%xmm5, %%xmm1        \n")                \
     125             :         "2:                                 \n"                 \
     126             :         "mov   (%6, %2, %c8), %1            \n"                 \
     127             :         "movaps     (%1, %0), %%xmm2        \n"                 \
     128             :  stereo("movaps       %%xmm2, %%xmm3        \n")                \
     129             :         "mulps   (%4, %2, 8), %%xmm2        \n"                 \
     130             :  stereo("mulps 16(%4, %2, 8), %%xmm3        \n")                \
     131             :         "addps        %%xmm2, %%xmm0        \n"                 \
     132             :  stereo("addps        %%xmm3, %%xmm1        \n")                \
     133             :         "add              $4, %2            \n"                 \
     134             :         "jl               2b                \n"                 \
     135             :         "mov              %5, %2            \n"                 \
     136             :  stereo("mov   (%6, %2, %c8), %1            \n")                \
     137             :         "movaps       %%xmm0, (%3, %0)      \n"                 \
     138             :  stereo("movaps       %%xmm1, (%1, %0)      \n")                \
     139             :         "add             $16, %0            \n"                 \
     140             :         "jl               1b                \n"                 \
     141             :         : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m)                \
     142             :         : "r"(matrix_simd + in_ch),                             \
     143             :           "g"((intptr_t) - 4 * (in_ch - 1)),                    \
     144             :           "r"(samp + in_ch),                                    \
     145             :           "i"(sizeof(float *)), "i"(sizeof(float *)/4)          \
     146             :         : "memory"                                              \
     147             :     );
     148             : 
     149           0 : static void ac3_downmix_sse(float **samples, float (*matrix)[2],
     150             :                             int out_ch, int in_ch, int len)
     151             : {
     152           0 :     int (*matrix_cmp)[2] = (int(*)[2])matrix;
     153             :     intptr_t i, j, k, m;
     154             : 
     155           0 :     i = -len * sizeof(float);
     156           0 :     if (in_ch == 5 && out_ch == 2 &&
     157           0 :         !(matrix_cmp[0][1] | matrix_cmp[2][0]   |
     158           0 :           matrix_cmp[3][1] | matrix_cmp[4][0]   |
     159           0 :           (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
     160           0 :           (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
     161           0 :         MIX5(IF0, IF1);
     162           0 :     } else if (in_ch == 5 && out_ch == 1 &&
     163           0 :                matrix_cmp[0][0] == matrix_cmp[2][0] &&
     164           0 :                matrix_cmp[3][0] == matrix_cmp[4][0]) {
     165           0 :         MIX5(IF1, IF0);
     166             :     } else {
     167           0 :         LOCAL_ALIGNED(16, float, matrix_simd, [AC3_MAX_CHANNELS], [2][4]);
     168             :         float *samp[AC3_MAX_CHANNELS];
     169             : 
     170           0 :         for (j = 0; j < in_ch; j++)
     171           0 :             samp[j] = samples[j] + len;
     172             : 
     173           0 :         j = 2 * in_ch * sizeof(float);
     174           0 :         __asm__ volatile (
     175             :             "1:                                 \n"
     176             :             "sub             $8, %0             \n"
     177             :             "movss     (%2, %0), %%xmm4         \n"
     178             :             "movss    4(%2, %0), %%xmm5         \n"
     179             :             "shufps          $0, %%xmm4, %%xmm4 \n"
     180             :             "shufps          $0, %%xmm5, %%xmm5 \n"
     181             :             "movaps      %%xmm4,   (%1, %0, 4)  \n"
     182             :             "movaps      %%xmm5, 16(%1, %0, 4)  \n"
     183             :             "jg              1b                 \n"
     184             :             : "+&r"(j)
     185             :             : "r"(matrix_simd), "r"(matrix)
     186             :             : "memory"
     187             :         );
     188           0 :         if (out_ch == 2) {
     189           0 :             MIX_MISC(IF1);
     190             :         } else {
     191           0 :             MIX_MISC(IF0);
     192             :         }
     193             :     }
     194           0 : }
     195             : 
     196             : #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
     197             : 
     198          41 : av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
     199             : {
     200          41 :     int cpu_flags = av_get_cpu_flags();
     201             : 
     202          41 :     if (EXTERNAL_MMX(cpu_flags)) {
     203           3 :         c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
     204           3 :         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
     205           3 :         c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
     206           3 :         c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
     207             :     }
     208          41 :     if (EXTERNAL_AMD3DNOW(cpu_flags)) {
     209           0 :         if (!bit_exact) {
     210           0 :             c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
     211             :         }
     212             :     }
     213          41 :     if (EXTERNAL_MMXEXT(cpu_flags)) {
     214           3 :         c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
     215           3 :         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
     216           3 :         if (bit_exact) {
     217           0 :             c->apply_window_int16 = ff_apply_window_int16_mmxext;
     218             :         } else {
     219           3 :             c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
     220             :         }
     221             :     }
     222          41 :     if (EXTERNAL_SSE(cpu_flags)) {
     223           3 :         c->float_to_fixed24 = ff_float_to_fixed24_sse;
     224             :     }
     225          41 :     if (EXTERNAL_SSE2(cpu_flags)) {
     226           3 :         c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
     227           3 :         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
     228           3 :         c->float_to_fixed24 = ff_float_to_fixed24_sse2;
     229           3 :         c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
     230           3 :         c->extract_exponents = ff_ac3_extract_exponents_sse2;
     231           3 :         if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
     232           3 :             c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
     233           3 :             c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
     234             :         }
     235           3 :         if (bit_exact) {
     236           0 :             c->apply_window_int16 = ff_apply_window_int16_sse2;
     237           3 :         } else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
     238           3 :             c->apply_window_int16 = ff_apply_window_int16_round_sse2;
     239             :         }
     240             :     }
     241          41 :     if (EXTERNAL_SSSE3(cpu_flags)) {
     242           3 :         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
     243           3 :         if (cpu_flags & AV_CPU_FLAG_ATOM) {
     244           0 :             c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
     245             :         } else {
     246           3 :             c->extract_exponents = ff_ac3_extract_exponents_ssse3;
     247           3 :             c->apply_window_int16 = ff_apply_window_int16_ssse3;
     248             :         }
     249             :     }
     250             : 
     251             : #if HAVE_SSE_INLINE && HAVE_7REGS
     252          41 :     if (INLINE_SSE(cpu_flags)) {
     253           3 :         c->downmix = ac3_downmix_sse;
     254             :     }
     255             : #endif
     256          41 : }

Generated by: LCOV version 1.12