diff options
Diffstat (limited to 'libs/libopus/celt/x86')
-rw-r--r-- | libs/libopus/celt/x86/celt_lpc_sse.h | 10 | ||||
-rw-r--r-- | libs/libopus/celt/x86/celt_lpc_sse4_1.c (renamed from libs/libopus/celt/x86/celt_lpc_sse.c) | 57 | ||||
-rw-r--r-- | libs/libopus/celt/x86/pitch_sse.h | 2 | ||||
-rw-r--r-- | libs/libopus/celt/x86/pitch_sse4_1.c | 51 | ||||
-rw-r--r-- | libs/libopus/celt/x86/vq_sse.h | 50 | ||||
-rw-r--r-- | libs/libopus/celt/x86/vq_sse2.c | 217 | ||||
-rw-r--r-- | libs/libopus/celt/x86/x86_celt_map.c | 14 | ||||
-rw-r--r-- | libs/libopus/celt/x86/x86cpu.h | 32 |
8 files changed, 342 insertions, 91 deletions
diff --git a/libs/libopus/celt/x86/celt_lpc_sse.h b/libs/libopus/celt/x86/celt_lpc_sse.h index c5ec796ed..7d1ecf753 100644 --- a/libs/libopus/celt/x86/celt_lpc_sse.h +++ b/libs/libopus/celt/x86/celt_lpc_sse.h @@ -41,12 +41,11 @@ void celt_fir_sse4_1( opus_val16 *y, int N, int ord, - opus_val16 *mem, int arch); #if defined(OPUS_X86_PRESUME_SSE4_1) -#define celt_fir(x, num, y, N, ord, mem, arch) \ - ((void)arch, celt_fir_sse4_1(x, num, y, N, ord, mem, arch)) +#define celt_fir(x, num, y, N, ord, arch) \ + ((void)arch, celt_fir_sse4_1(x, num, y, N, ord, arch)) #else @@ -56,11 +55,10 @@ extern void (*const CELT_FIR_IMPL[OPUS_ARCHMASK + 1])( opus_val16 *y, int N, int ord, - opus_val16 *mem, int arch); -# define celt_fir(x, num, y, N, ord, mem, arch) \ - ((*CELT_FIR_IMPL[(arch) & OPUS_ARCHMASK])(x, num, y, N, ord, mem, arch)) +# define celt_fir(x, num, y, N, ord, arch) \ + ((*CELT_FIR_IMPL[(arch) & OPUS_ARCHMASK])(x, num, y, N, ord, arch)) #endif #endif diff --git a/libs/libopus/celt/x86/celt_lpc_sse.c b/libs/libopus/celt/x86/celt_lpc_sse4_1.c index 67e5592ac..547856884 100644 --- a/libs/libopus/celt/x86/celt_lpc_sse.c +++ b/libs/libopus/celt/x86/celt_lpc_sse4_1.c @@ -40,65 +40,23 @@ #if defined(FIXED_POINT) -void celt_fir_sse4_1(const opus_val16 *_x, +void celt_fir_sse4_1(const opus_val16 *x, const opus_val16 *num, - opus_val16 *_y, + opus_val16 *y, int N, int ord, - opus_val16 *mem, int arch) { int i,j; VARDECL(opus_val16, rnum); - VARDECL(opus_val16, x); __m128i vecNoA; opus_int32 noA ; SAVE_STACK; ALLOC(rnum, ord, opus_val16); - ALLOC(x, N+ord, opus_val16); for(i=0;i<ord;i++) rnum[i] = num[ord-i-1]; - for(i=0;i<ord;i++) - x[i] = mem[ord-i-1]; - - for (i=0;i<N-7;i+=8) - { - x[i+ord ]=_x[i ]; - x[i+ord+1]=_x[i+1]; - x[i+ord+2]=_x[i+2]; - x[i+ord+3]=_x[i+3]; - x[i+ord+4]=_x[i+4]; - x[i+ord+5]=_x[i+5]; - x[i+ord+6]=_x[i+6]; - x[i+ord+7]=_x[i+7]; - } - - for (;i<N-3;i+=4) - { - x[i+ord ]=_x[i ]; - x[i+ord+1]=_x[i+1]; - x[i+ord+2]=_x[i+2]; - x[i+ord+3]=_x[i+3]; - } - - for (;i<N;i++) - x[i+ord]=_x[i]; - - for(i=0;i<ord;i++) - mem[i] = _x[N-i-1]; -#ifdef SMALL_FOOTPRINT - for (i=0;i<N;i++) - { - opus_val32 sum = SHL32(EXTEND32(_x[i]), SIG_SHIFT); - for (j=0;j<ord;j++) - { - sum = MAC16_16(sum,rnum[j],x[i+j]); - } - _y[i] = SATURATE16(PSHR32(sum, SIG_SHIFT)); - } -#else noA = EXTEND32(1) << SIG_SHIFT >> 1; vecNoA = _mm_set_epi32(noA, noA, noA, noA); @@ -107,25 +65,24 @@ void celt_fir_sse4_1(const opus_val16 *_x, opus_val32 sums[4] = {0}; __m128i vecSum, vecX; - xcorr_kernel(rnum, x+i, sums, ord, arch); + xcorr_kernel(rnum, x+i-ord, sums, ord, arch); vecSum = _mm_loadu_si128((__m128i *)sums); vecSum = _mm_add_epi32(vecSum, vecNoA); vecSum = _mm_srai_epi32(vecSum, SIG_SHIFT); - vecX = OP_CVTEPI16_EPI32_M64(_x + i); + vecX = OP_CVTEPI16_EPI32_M64(x + i); vecSum = _mm_add_epi32(vecSum, vecX); vecSum = _mm_packs_epi32(vecSum, vecSum); - _mm_storel_epi64((__m128i *)(_y + i), vecSum); + _mm_storel_epi64((__m128i *)(y + i), vecSum); } for (;i<N;i++) { opus_val32 sum = 0; for (j=0;j<ord;j++) - sum = MAC16_16(sum, rnum[j], x[i + j]); - _y[i] = SATURATE16(ADD32(EXTEND32(_x[i]), PSHR32(sum, SIG_SHIFT))); + sum = MAC16_16(sum, rnum[j], x[i+j-ord]); + y[i] = SATURATE16(ADD32(EXTEND32(x[i]), PSHR32(sum, SIG_SHIFT))); } -#endif RESTORE_STACK; } diff --git a/libs/libopus/celt/x86/pitch_sse.h b/libs/libopus/celt/x86/pitch_sse.h index e5f87ab51..f7a014b6e 100644 --- a/libs/libopus/celt/x86/pitch_sse.h +++ b/libs/libopus/celt/x86/pitch_sse.h @@ -91,7 +91,7 @@ opus_val32 celt_inner_prod_sse2( int N); #endif -#if defined(OPUS_X86_MAY_HAVE_SSE2) && !defined(FIXED_POINT) +#if defined(OPUS_X86_MAY_HAVE_SSE) && !defined(FIXED_POINT) opus_val32 celt_inner_prod_sse( const opus_val16 *x, const opus_val16 *y, diff --git a/libs/libopus/celt/x86/pitch_sse4_1.c b/libs/libopus/celt/x86/pitch_sse4_1.c index a092c68b2..2bc578304 100644 --- a/libs/libopus/celt/x86/pitch_sse4_1.c +++ b/libs/libopus/celt/x86/pitch_sse4_1.c @@ -117,6 +117,14 @@ void xcorr_kernel_sse4_1(const opus_val16 * x, const opus_val16 * y, opus_val32 __m128i sum0, sum1, sum2, sum3, vecSum; __m128i initSum; +#ifdef OPUS_CHECK_ASM + opus_val32 sum_c[4]; + for (j=0;j<4;j++) { + sum_c[j] = sum[j]; + } + xcorr_kernel_c(x, y, sum_c, len); +#endif + celt_assert(len >= 3); sum0 = _mm_setzero_si128(); @@ -177,19 +185,56 @@ void xcorr_kernel_sse4_1(const opus_val16 * x, const opus_val16 * y, opus_val32 vecSum = _mm_add_epi32(vecSum, sum2); } - for (;j<len;j++) + vecX = OP_CVTEPI16_EPI32_M64(&x[len - 4]); + if (len - j == 3) { - vecX = OP_CVTEPI16_EPI32_M64(&x[j + 0]); - vecX0 = _mm_shuffle_epi32(vecX, 0x00); + vecX0 = _mm_shuffle_epi32(vecX, 0x55); + vecX1 = _mm_shuffle_epi32(vecX, 0xaa); + vecX2 = _mm_shuffle_epi32(vecX, 0xff); vecY0 = OP_CVTEPI16_EPI32_M64(&y[j + 0]); + vecY1 = OP_CVTEPI16_EPI32_M64(&y[j + 1]); + vecY2 = OP_CVTEPI16_EPI32_M64(&y[j + 2]); sum0 = _mm_mullo_epi32(vecX0, vecY0); + sum1 = _mm_mullo_epi32(vecX1, vecY1); + sum2 = _mm_mullo_epi32(vecX2, vecY2); + + vecSum = _mm_add_epi32(vecSum, sum0); + vecSum = _mm_add_epi32(vecSum, sum1); + vecSum = _mm_add_epi32(vecSum, sum2); + } + else if (len - j == 2) + { + vecX0 = _mm_shuffle_epi32(vecX, 0xaa); + vecX1 = _mm_shuffle_epi32(vecX, 0xff); + + vecY0 = OP_CVTEPI16_EPI32_M64(&y[j + 0]); + vecY1 = OP_CVTEPI16_EPI32_M64(&y[j + 1]); + + sum0 = _mm_mullo_epi32(vecX0, vecY0); + sum1 = _mm_mullo_epi32(vecX1, vecY1); + + vecSum = _mm_add_epi32(vecSum, sum0); + vecSum = _mm_add_epi32(vecSum, sum1); + } + else if (len - j == 1) + { + vecX0 = _mm_shuffle_epi32(vecX, 0xff); + + vecY0 = OP_CVTEPI16_EPI32_M64(&y[j + 0]); + + sum0 = _mm_mullo_epi32(vecX0, vecY0); + vecSum = _mm_add_epi32(vecSum, sum0); } initSum = _mm_loadu_si128((__m128i *)(&sum[0])); initSum = _mm_add_epi32(initSum, vecSum); _mm_storeu_si128((__m128i *)sum, initSum); + +#ifdef OPUS_CHECK_ASM + celt_assert(!memcmp(sum_c, sum, sizeof(sum_c))); +#endif } #endif diff --git a/libs/libopus/celt/x86/vq_sse.h b/libs/libopus/celt/x86/vq_sse.h new file mode 100644 index 000000000..b4efe8f24 --- /dev/null +++ b/libs/libopus/celt/x86/vq_sse.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2016 Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef VQ_SSE_H +#define VQ_SSE_H + +#if defined(OPUS_X86_MAY_HAVE_SSE2) && !defined(FIXED_POINT) +#define OVERRIDE_OP_PVQ_SEARCH + +opus_val16 op_pvq_search_sse2(celt_norm *_X, int *iy, int K, int N, int arch); + +#if defined(OPUS_X86_PRESUME_SSE2) +#define op_pvq_search(x, iy, K, N, arch) \ + (op_pvq_search_sse2(x, iy, K, N, arch)) + +#else + +extern opus_val16 (*const OP_PVQ_SEARCH_IMPL[OPUS_ARCHMASK + 1])( + celt_norm *_X, int *iy, int K, int N, int arch); + +# define op_pvq_search(X, iy, K, N, arch) \ + ((*OP_PVQ_SEARCH_IMPL[(arch) & OPUS_ARCHMASK])(X, iy, K, N, arch)) + +#endif +#endif + +#endif diff --git a/libs/libopus/celt/x86/vq_sse2.c b/libs/libopus/celt/x86/vq_sse2.c new file mode 100644 index 000000000..775042860 --- /dev/null +++ b/libs/libopus/celt/x86/vq_sse2.c @@ -0,0 +1,217 @@ +/* Copyright (c) 2007-2008 CSIRO + Copyright (c) 2007-2009 Xiph.Org Foundation + Copyright (c) 2007-2016 Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <xmmintrin.h> +#include <emmintrin.h> +#include "celt_lpc.h" +#include "stack_alloc.h" +#include "mathops.h" +#include "vq.h" +#include "x86cpu.h" + + +#ifndef FIXED_POINT + +opus_val16 op_pvq_search_sse2(celt_norm *_X, int *iy, int K, int N, int arch) +{ + int i, j; + int pulsesLeft; + float xy, yy; + VARDECL(celt_norm, y); + VARDECL(celt_norm, X); + VARDECL(float, signy); + __m128 signmask; + __m128 sums; + __m128i fours; + SAVE_STACK; + + (void)arch; + /* All bits set to zero, except for the sign bit. */ + signmask = _mm_set_ps1(-0.f); + fours = _mm_set_epi32(4, 4, 4, 4); + ALLOC(y, N+3, celt_norm); + ALLOC(X, N+3, celt_norm); + ALLOC(signy, N+3, float); + + OPUS_COPY(X, _X, N); + X[N] = X[N+1] = X[N+2] = 0; + sums = _mm_setzero_ps(); + for (j=0;j<N;j+=4) + { + __m128 x4, s4; + x4 = _mm_loadu_ps(&X[j]); + s4 = _mm_cmplt_ps(x4, _mm_setzero_ps()); + /* Get rid of the sign */ + x4 = _mm_andnot_ps(signmask, x4); + sums = _mm_add_ps(sums, x4); + /* Clear y and iy in case we don't do the projection. */ + _mm_storeu_ps(&y[j], _mm_setzero_ps()); + _mm_storeu_si128((__m128i*)&iy[j], _mm_setzero_si128()); + _mm_storeu_ps(&X[j], x4); + _mm_storeu_ps(&signy[j], s4); + } + sums = _mm_add_ps(sums, _mm_shuffle_ps(sums, sums, _MM_SHUFFLE(1, 0, 3, 2))); + sums = _mm_add_ps(sums, _mm_shuffle_ps(sums, sums, _MM_SHUFFLE(2, 3, 0, 1))); + + xy = yy = 0; + + pulsesLeft = K; + + /* Do a pre-search by projecting on the pyramid */ + if (K > (N>>1)) + { + __m128i pulses_sum; + __m128 yy4, xy4; + __m128 rcp4; + opus_val32 sum = _mm_cvtss_f32(sums); + /* If X is too small, just replace it with a pulse at 0 */ + /* Prevents infinities and NaNs from causing too many pulses + to be allocated. 64 is an approximation of infinity here. */ + if (!(sum > EPSILON && sum < 64)) + { + X[0] = QCONST16(1.f,14); + j=1; do + X[j]=0; + while (++j<N); + sums = _mm_set_ps1(1.f); + } + /* Using K+e with e < 1 guarantees we cannot get more than K pulses. */ + rcp4 = _mm_mul_ps(_mm_set_ps1((float)(K+.8)), _mm_rcp_ps(sums)); + xy4 = yy4 = _mm_setzero_ps(); + pulses_sum = _mm_setzero_si128(); + for (j=0;j<N;j+=4) + { + __m128 rx4, x4, y4; + __m128i iy4; + x4 = _mm_loadu_ps(&X[j]); + rx4 = _mm_mul_ps(x4, rcp4); + iy4 = _mm_cvttps_epi32(rx4); + pulses_sum = _mm_add_epi32(pulses_sum, iy4); + _mm_storeu_si128((__m128i*)&iy[j], iy4); + y4 = _mm_cvtepi32_ps(iy4); + xy4 = _mm_add_ps(xy4, _mm_mul_ps(x4, y4)); + yy4 = _mm_add_ps(yy4, _mm_mul_ps(y4, y4)); + /* double the y[] vector so we don't have to do it in the search loop. */ + _mm_storeu_ps(&y[j], _mm_add_ps(y4, y4)); + } + pulses_sum = _mm_add_epi32(pulses_sum, _mm_shuffle_epi32(pulses_sum, _MM_SHUFFLE(1, 0, 3, 2))); + pulses_sum = _mm_add_epi32(pulses_sum, _mm_shuffle_epi32(pulses_sum, _MM_SHUFFLE(2, 3, 0, 1))); + pulsesLeft -= _mm_cvtsi128_si32(pulses_sum); + xy4 = _mm_add_ps(xy4, _mm_shuffle_ps(xy4, xy4, _MM_SHUFFLE(1, 0, 3, 2))); + xy4 = _mm_add_ps(xy4, _mm_shuffle_ps(xy4, xy4, _MM_SHUFFLE(2, 3, 0, 1))); + xy = _mm_cvtss_f32(xy4); + yy4 = _mm_add_ps(yy4, _mm_shuffle_ps(yy4, yy4, _MM_SHUFFLE(1, 0, 3, 2))); + yy4 = _mm_add_ps(yy4, _mm_shuffle_ps(yy4, yy4, _MM_SHUFFLE(2, 3, 0, 1))); + yy = _mm_cvtss_f32(yy4); + } + X[N] = X[N+1] = X[N+2] = -100; + y[N] = y[N+1] = y[N+2] = 100; + celt_sig_assert(pulsesLeft>=0); + + /* This should never happen, but just in case it does (e.g. on silence) + we fill the first bin with pulses. */ + if (pulsesLeft > N+3) + { + opus_val16 tmp = (opus_val16)pulsesLeft; + yy = MAC16_16(yy, tmp, tmp); + yy = MAC16_16(yy, tmp, y[0]); + iy[0] += pulsesLeft; + pulsesLeft=0; + } + + for (i=0;i<pulsesLeft;i++) + { + int best_id; + __m128 xy4, yy4; + __m128 max, max2; + __m128i count; + __m128i pos; + /* The squared magnitude term gets added anyway, so we might as well + add it outside the loop */ + yy = ADD16(yy, 1); + xy4 = _mm_load1_ps(&xy); + yy4 = _mm_load1_ps(&yy); + max = _mm_setzero_ps(); + pos = _mm_setzero_si128(); + count = _mm_set_epi32(3, 2, 1, 0); + for (j=0;j<N;j+=4) + { + __m128 x4, y4, r4; + x4 = _mm_loadu_ps(&X[j]); + y4 = _mm_loadu_ps(&y[j]); + x4 = _mm_add_ps(x4, xy4); + y4 = _mm_add_ps(y4, yy4); + y4 = _mm_rsqrt_ps(y4); + r4 = _mm_mul_ps(x4, y4); + /* Update the index of the max. */ + pos = _mm_max_epi16(pos, _mm_and_si128(count, _mm_castps_si128(_mm_cmpgt_ps(r4, max)))); + /* Update the max. */ + max = _mm_max_ps(max, r4); + /* Update the indices (+4) */ + count = _mm_add_epi32(count, fours); + } + /* Horizontal max */ + max2 = _mm_max_ps(max, _mm_shuffle_ps(max, max, _MM_SHUFFLE(1, 0, 3, 2))); + max2 = _mm_max_ps(max2, _mm_shuffle_ps(max2, max2, _MM_SHUFFLE(2, 3, 0, 1))); + /* Now that max2 contains the max at all positions, look at which value(s) of the + partial max is equal to the global max. */ + pos = _mm_and_si128(pos, _mm_castps_si128(_mm_cmpeq_ps(max, max2))); + pos = _mm_max_epi16(pos, _mm_unpackhi_epi64(pos, pos)); + pos = _mm_max_epi16(pos, _mm_shufflelo_epi16(pos, _MM_SHUFFLE(1, 0, 3, 2))); + best_id = _mm_cvtsi128_si32(pos); + + /* Updating the sums of the new pulse(s) */ + xy = ADD32(xy, EXTEND32(X[best_id])); + /* We're multiplying y[j] by two so we don't have to do it here */ + yy = ADD16(yy, y[best_id]); + + /* Only now that we've made the final choice, update y/iy */ + /* Multiplying y[j] by 2 so we don't have to do it everywhere else */ + y[best_id] += 2; + iy[best_id]++; + } + + /* Put the original sign back */ + for (j=0;j<N;j+=4) + { + __m128i y4; + __m128i s4; + y4 = _mm_loadu_si128((__m128i*)&iy[j]); + s4 = _mm_castps_si128(_mm_loadu_ps(&signy[j])); + y4 = _mm_xor_si128(_mm_add_epi32(y4, s4), s4); + _mm_storeu_si128((__m128i*)&iy[j], y4); + } + RESTORE_STACK; + return yy; +} + +#endif diff --git a/libs/libopus/celt/x86/x86_celt_map.c b/libs/libopus/celt/x86/x86_celt_map.c index 47ba41b9e..d39d88ede 100644 --- a/libs/libopus/celt/x86/x86_celt_map.c +++ b/libs/libopus/celt/x86/x86_celt_map.c @@ -33,6 +33,7 @@ #include "celt_lpc.h" #include "pitch.h" #include "pitch_sse.h" +#include "vq.h" #if defined(OPUS_HAVE_RTCD) @@ -46,7 +47,6 @@ void (*const CELT_FIR_IMPL[OPUS_ARCHMASK + 1])( opus_val16 *y, int N, int ord, - opus_val16 *mem, int arch ) = { celt_fir_c, /* non-sse */ @@ -151,5 +151,17 @@ void (*const COMB_FILTER_CONST_IMPL[OPUS_ARCHMASK + 1])( #endif +#if defined(OPUS_X86_MAY_HAVE_SSE2) && !defined(OPUS_X86_PRESUME_SSE2) +opus_val16 (*const OP_PVQ_SEARCH_IMPL[OPUS_ARCHMASK + 1])( + celt_norm *_X, int *iy, int K, int N, int arch +) = { + op_pvq_search_c, /* non-sse */ + op_pvq_search_c, + MAY_HAVE_SSE2(op_pvq_search), + MAY_HAVE_SSE2(op_pvq_search), + MAY_HAVE_SSE2(op_pvq_search) +}; +#endif + #endif #endif diff --git a/libs/libopus/celt/x86/x86cpu.h b/libs/libopus/celt/x86/x86cpu.h index 04fd48aac..0de8df355 100644 --- a/libs/libopus/celt/x86/x86cpu.h +++ b/libs/libopus/celt/x86/x86cpu.h @@ -56,38 +56,10 @@ int opus_select_arch(void); # endif -/*gcc appears to emit MOVDQA's to load the argument of an _mm_cvtepi8_epi32() - or _mm_cvtepi16_epi32() when optimizations are disabled, even though the - actual PMOVSXWD instruction takes an m32 or m64. Unlike a normal memory - reference, these require 16-byte alignment and load a full 16 bytes (instead - of 4 or 8), possibly reading out of bounds. - - We can insert an explicit MOVD or MOVQ using _mm_cvtsi32_si128() or - _mm_loadl_epi64(), which should have the same semantics as an m32 or m64 - reference in the PMOVSXWD instruction itself, but gcc is not smart enough to - optimize this out when optimizations ARE enabled. - - Clang, in contrast, requires us to do this always for _mm_cvtepi8_epi32 - (which is fair, since technically the compiler is always allowed to do the - dereference before invoking the function implementing the intrinsic). - However, it is smart enough to eliminate the extra MOVD instruction. - For _mm_cvtepi16_epi32, it does the right thing, though does *not* optimize out - the extra MOVQ if it's specified explicitly */ - -# if defined(__clang__) || !defined(__OPTIMIZE__) -# define OP_CVTEPI8_EPI32_M32(x) \ +#define OP_CVTEPI8_EPI32_M32(x) \ (_mm_cvtepi8_epi32(_mm_cvtsi32_si128(*(int *)(x)))) -# else -# define OP_CVTEPI8_EPI32_M32(x) \ - (_mm_cvtepi8_epi32(*(__m128i *)(x))) -#endif -# if !defined(__OPTIMIZE__) -# define OP_CVTEPI16_EPI32_M64(x) \ +#define OP_CVTEPI16_EPI32_M64(x) \ (_mm_cvtepi16_epi32(_mm_loadl_epi64((__m128i *)(x)))) -# else -# define OP_CVTEPI16_EPI32_M64(x) \ - (_mm_cvtepi16_epi32(*(__m128i *)(x))) -# endif #endif |