libavcodec/x86/mathops.h
Go to the documentation of this file.
00001 /*
00002  * simple math operations
00003  * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
00004  *
00005  * This file is part of Libav.
00006  *
00007  * Libav is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU Lesser General Public
00009  * License as published by the Free Software Foundation; either
00010  * version 2.1 of the License, or (at your option) any later version.
00011  *
00012  * Libav is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015  * Lesser General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU Lesser General Public
00018  * License along with Libav; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00020  */
00021 
00022 #ifndef AVCODEC_X86_MATHOPS_H
00023 #define AVCODEC_X86_MATHOPS_H
00024 
00025 #include "config.h"
00026 #include "libavutil/common.h"
00027 
00028 #if ARCH_X86_32
00029 
00030 #define MULL MULL
00031 static av_always_inline av_const int MULL(int a, int b, unsigned shift)
00032 {
00033     int rt, dummy;
00034     __asm__ (
00035         "imull %3               \n\t"
00036         "shrdl %4, %%edx, %%eax \n\t"
00037         :"=a"(rt), "=d"(dummy)
00038         :"a"(a), "rm"(b), "ci"((uint8_t)shift)
00039     );
00040     return rt;
00041 }
00042 
00043 #define MULH MULH
00044 static av_always_inline av_const int MULH(int a, int b)
00045 {
00046     int rt, dummy;
00047     __asm__ (
00048         "imull %3"
00049         :"=d"(rt), "=a"(dummy)
00050         :"a"(a), "rm"(b)
00051     );
00052     return rt;
00053 }
00054 
00055 #define MUL64 MUL64
00056 static av_always_inline av_const int64_t MUL64(int a, int b)
00057 {
00058     int64_t rt;
00059     __asm__ (
00060         "imull %2"
00061         :"=A"(rt)
00062         :"a"(a), "rm"(b)
00063     );
00064     return rt;
00065 }
00066 
00067 #endif /* ARCH_X86_32 */
00068 
00069 #if HAVE_CMOV
00070 /* median of 3 */
00071 #define mid_pred mid_pred
00072 static inline av_const int mid_pred(int a, int b, int c)
00073 {
00074     int i=b;
00075     __asm__ volatile(
00076         "cmp    %2, %1 \n\t"
00077         "cmovg  %1, %0 \n\t"
00078         "cmovg  %2, %1 \n\t"
00079         "cmp    %3, %1 \n\t"
00080         "cmovl  %3, %1 \n\t"
00081         "cmp    %1, %0 \n\t"
00082         "cmovg  %1, %0 \n\t"
00083         :"+&r"(i), "+&r"(a)
00084         :"r"(b), "r"(c)
00085     );
00086     return i;
00087 }
00088 #endif
00089 
00090 #if HAVE_CMOV
00091 #define COPY3_IF_LT(x, y, a, b, c, d)\
00092 __asm__ volatile(\
00093     "cmpl  %0, %3       \n\t"\
00094     "cmovl %3, %0       \n\t"\
00095     "cmovl %4, %1       \n\t"\
00096     "cmovl %5, %2       \n\t"\
00097     : "+&r" (x), "+&r" (a), "+r" (c)\
00098     : "r" (y), "r" (b), "r" (d)\
00099 );
00100 #endif
00101 
00102 // avoid +32 for shift optimization (gcc should do that ...)
00103 #define NEG_SSR32 NEG_SSR32
00104 static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
00105     __asm__ ("sarl %1, %0\n\t"
00106          : "+r" (a)
00107          : "ic" ((uint8_t)(-s))
00108     );
00109     return a;
00110 }
00111 
00112 #define NEG_USR32 NEG_USR32
00113 static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
00114     __asm__ ("shrl %1, %0\n\t"
00115          : "+r" (a)
00116          : "ic" ((uint8_t)(-s))
00117     );
00118     return a;
00119 }
00120 
00121 #endif /* AVCODEC_X86_MATHOPS_H */