From 5033dbca32609b69c512e0edd0b4fc40d220de67 Mon Sep 17 00:00:00 2001 From: "Matt A. Tobin" Date: Thu, 5 May 2022 15:52:29 -0500 Subject: Bug 1762614 - Update libopus to 2654707e86cc94413998976d179b2ab4a2aa3114 --- libs/libopus/Makefile.in | 4 +- libs/libopus/README_MOZILLA | 2 +- libs/libopus/celt/_kiss_fft_guts.h | 16 +- libs/libopus/celt/arch.h | 55 +- libs/libopus/celt/arm/arm2gnu.pl | 4 +- libs/libopus/celt/arm/arm_celt_map.c | 21 +- libs/libopus/celt/arm/armcpu.c | 2 + libs/libopus/celt/arm/celt_fft_ne10.c | 173 +++ libs/libopus/celt/arm/celt_mdct_ne10.c | 258 ++++ libs/libopus/celt/arm/celt_ne10_fft.c | 174 --- libs/libopus/celt/arm/celt_ne10_mdct.c | 258 ---- libs/libopus/celt/arm/celt_neon_intr.c | 110 +- libs/libopus/celt/arm/celt_pitch_xcorr_arm.s | 6 +- libs/libopus/celt/arm/fft_arm.h | 1 - libs/libopus/celt/arm/fixed_armv4.h | 6 +- libs/libopus/celt/arm/fixed_armv5e.h | 4 +- libs/libopus/celt/arm/mdct_arm.h | 1 - libs/libopus/celt/arm/pitch_arm.h | 56 +- libs/libopus/celt/arm/pitch_neon_intr.c | 290 ++++ libs/libopus/celt/bands.c | 330 +++-- libs/libopus/celt/bands.h | 9 +- libs/libopus/celt/celt.c | 25 +- libs/libopus/celt/celt.h | 32 +- libs/libopus/celt/celt_decoder.c | 196 ++- libs/libopus/celt/celt_encoder.c | 399 ++++-- libs/libopus/celt/celt_lpc.c | 124 +- libs/libopus/celt/celt_lpc.h | 5 +- libs/libopus/celt/cwrs.c | 2 +- libs/libopus/celt/ecintrin.h | 4 + libs/libopus/celt/entcode.h | 4 +- libs/libopus/celt/entdec.h | 2 +- libs/libopus/celt/entenc.h | 2 +- libs/libopus/celt/fixed_debug.h | 58 +- libs/libopus/celt/fixed_generic.h | 23 +- libs/libopus/celt/float_cast.h | 66 +- libs/libopus/celt/kiss_fft.c | 56 +- libs/libopus/celt/mathops.c | 5 +- libs/libopus/celt/mathops.h | 40 +- libs/libopus/celt/mdct.c | 16 +- libs/libopus/celt/mips/celt_mipsr1.h | 1 + libs/libopus/celt/mips/vq_mipsr1.h | 11 +- libs/libopus/celt/modes.c | 2 +- libs/libopus/celt/os_support.h | 1 - libs/libopus/celt/pitch.c | 46 +- libs/libopus/celt/pitch.h | 14 +- libs/libopus/celt/quant_bands.c | 11 +- libs/libopus/celt/rate.c | 14 +- libs/libopus/celt/rate.h | 2 +- libs/libopus/celt/stack_alloc.h | 4 +- libs/libopus/celt/static_modes_fixed_arm_ne10.h | 2 +- libs/libopus/celt/static_modes_float_arm_ne10.h | 2 +- libs/libopus/celt/vq.c | 118 +- libs/libopus/celt/vq.h | 20 +- libs/libopus/celt/x86/celt_lpc_sse.c | 132 -- libs/libopus/celt/x86/celt_lpc_sse.h | 10 +- libs/libopus/celt/x86/celt_lpc_sse4_1.c | 89 ++ libs/libopus/celt/x86/pitch_sse.h | 2 +- libs/libopus/celt/x86/pitch_sse4_1.c | 51 +- libs/libopus/celt/x86/vq_sse.h | 50 + libs/libopus/celt/x86/vq_sse2.c | 217 +++ libs/libopus/celt/x86/x86_celt_map.c | 14 +- libs/libopus/celt/x86/x86cpu.h | 32 +- libs/libopus/include/opus.h | 2 +- libs/libopus/include/opus_custom.h | 4 +- libs/libopus/include/opus_defines.h | 54 +- libs/libopus/include/opus_multistream.h | 4 +- libs/libopus/include/opus_projection.h | 568 ++++++++ libs/libopus/include/opus_types.h | 27 +- libs/libopus/moz.build | 63 +- libs/libopus/nonunified.patch | 4 +- libs/libopus/nonunified2.patch | 34 + libs/libopus/silk/A2NLSF.c | 8 +- libs/libopus/silk/API.h | 3 +- libs/libopus/silk/CNG.c | 16 +- libs/libopus/silk/LPC_analysis_filter.c | 25 +- libs/libopus/silk/LPC_fit.c | 82 ++ libs/libopus/silk/LPC_inv_pred_gain.c | 97 +- libs/libopus/silk/LP_variable_cutoff.c | 2 +- libs/libopus/silk/MacroCount.h | 12 +- libs/libopus/silk/MacroDebug.h | 3 +- libs/libopus/silk/NLSF2A.c | 59 +- libs/libopus/silk/NLSF_VQ.c | 44 +- libs/libopus/silk/NLSF_VQ_weights_laroia.c | 4 +- libs/libopus/silk/NLSF_decode.c | 22 +- libs/libopus/silk/NLSF_del_dec_quant.c | 8 +- libs/libopus/silk/NLSF_encode.c | 37 +- libs/libopus/silk/NSQ.c | 104 +- libs/libopus/silk/NSQ_del_dec.c | 129 +- libs/libopus/silk/PLC.c | 6 +- libs/libopus/silk/SigProc_FIX.h | 62 +- libs/libopus/silk/VAD.c | 22 +- libs/libopus/silk/VQ_WMat_EC.c | 129 +- libs/libopus/silk/arm/LPC_inv_pred_gain_arm.h | 57 + .../libopus/silk/arm/LPC_inv_pred_gain_neon_intr.c | 288 ++++ libs/libopus/silk/arm/NSQ_del_dec_arm.h | 100 ++ libs/libopus/silk/arm/NSQ_del_dec_neon_intr.c | 1124 ++++++++++++++++ libs/libopus/silk/arm/NSQ_neon.h | 33 +- libs/libopus/silk/arm/arm_silk_map.c | 68 + libs/libopus/silk/arm/biquad_alt_arm.h | 68 + libs/libopus/silk/arm/biquad_alt_neon_intr.c | 156 +++ libs/libopus/silk/arm/macros_armv4.h | 13 +- libs/libopus/silk/arm/macros_armv5e.h | 9 +- libs/libopus/silk/biquad_alt.c | 53 +- libs/libopus/silk/bwexpander.c | 2 +- libs/libopus/silk/bwexpander_32.c | 3 +- libs/libopus/silk/check_control_input.c | 22 +- libs/libopus/silk/control.h | 8 + libs/libopus/silk/control_SNR.c | 103 +- libs/libopus/silk/control_audio_bandwidth.c | 16 +- libs/libopus/silk/control_codec.c | 105 +- libs/libopus/silk/debug.c | 10 +- libs/libopus/silk/debug.h | 38 +- libs/libopus/silk/dec_API.c | 6 +- libs/libopus/silk/decode_core.c | 6 +- libs/libopus/silk/decode_frame.c | 6 +- libs/libopus/silk/decode_indices.c | 2 +- libs/libopus/silk/decode_parameters.c | 4 +- libs/libopus/silk/decode_pitch.c | 4 +- libs/libopus/silk/decode_pulses.c | 2 +- libs/libopus/silk/decoder_set_fs.c | 8 +- libs/libopus/silk/define.h | 18 +- libs/libopus/silk/enc_API.c | 47 +- libs/libopus/silk/encode_indices.c | 6 +- libs/libopus/silk/encode_pulses.c | 2 +- libs/libopus/silk/fixed/apply_sine_window_FIX.c | 8 +- .../fixed/arm/warped_autocorrelation_FIX_arm.h | 68 + .../arm/warped_autocorrelation_FIX_neon_intr.c | 265 ++++ libs/libopus/silk/fixed/burg_modified_FIX.c | 12 +- libs/libopus/silk/fixed/corrMatrix_FIX.c | 38 +- libs/libopus/silk/fixed/encode_frame_FIX.c | 123 +- libs/libopus/silk/fixed/find_LPC_FIX.c | 4 +- libs/libopus/silk/fixed/find_LTP_FIX.c | 238 +--- libs/libopus/silk/fixed/find_pitch_lags_FIX.c | 20 +- libs/libopus/silk/fixed/find_pred_coefs_FIX.c | 27 +- libs/libopus/silk/fixed/k2a_FIX.c | 13 +- libs/libopus/silk/fixed/k2a_Q16_FIX.c | 15 +- libs/libopus/silk/fixed/main_FIX.h | 74 +- .../fixed/mips/noise_shape_analysis_FIX_mipsr1.h | 6 +- .../libopus/silk/fixed/mips/prefilter_FIX_mipsr1.h | 184 --- .../fixed/mips/warped_autocorrelation_FIX_mipsr1.h | 4 +- libs/libopus/silk/fixed/noise_shape_analysis_FIX.c | 160 +-- libs/libopus/silk/fixed/pitch_analysis_core_FIX.c | 125 +- libs/libopus/silk/fixed/prefilter_FIX.c | 221 --- libs/libopus/silk/fixed/residual_energy16_FIX.c | 8 +- libs/libopus/silk/fixed/residual_energy_FIX.c | 2 +- libs/libopus/silk/fixed/schur64_FIX.c | 7 +- libs/libopus/silk/fixed/schur_FIX.c | 15 +- libs/libopus/silk/fixed/solve_LS_FIX.c | 249 ---- libs/libopus/silk/fixed/structs_FIX.h | 22 +- libs/libopus/silk/fixed/vector_ops_FIX.c | 2 +- .../silk/fixed/warped_autocorrelation_FIX.c | 11 +- .../libopus/silk/fixed/x86/burg_modified_FIX_sse.c | 377 ------ .../silk/fixed/x86/burg_modified_FIX_sse4_1.c | 400 ++++++ libs/libopus/silk/fixed/x86/prefilter_FIX_sse.c | 160 --- libs/libopus/silk/fixed/x86/vector_ops_FIX_sse.c | 88 -- .../libopus/silk/fixed/x86/vector_ops_FIX_sse4_1.c | 92 ++ libs/libopus/silk/float/LPC_analysis_filter_FLP.c | 4 +- libs/libopus/silk/float/LPC_inv_pred_gain_FLP.c | 37 +- libs/libopus/silk/float/SigProc_FLP.h | 7 - libs/libopus/silk/float/apply_sine_window_FLP.c | 4 +- libs/libopus/silk/float/burg_modified_FLP.c | 2 +- libs/libopus/silk/float/encode_frame_FLP.c | 105 +- libs/libopus/silk/float/energy_FLP.c | 5 +- libs/libopus/silk/float/find_LPC_FLP.c | 4 +- libs/libopus/silk/float/find_LTP_FLP.c | 108 +- libs/libopus/silk/float/find_pitch_lags_FLP.c | 2 +- libs/libopus/silk/float/find_pred_coefs_FLP.c | 14 +- libs/libopus/silk/float/inner_product_FLP.c | 5 +- libs/libopus/silk/float/k2a_FLP.c | 15 +- libs/libopus/silk/float/levinsondurbin_FLP.c | 81 -- libs/libopus/silk/float/main_FLP.h | 55 +- libs/libopus/silk/float/noise_shape_analysis_FLP.c | 149 +-- libs/libopus/silk/float/pitch_analysis_core_FLP.c | 36 +- libs/libopus/silk/float/prefilter_FLP.c | 206 --- libs/libopus/silk/float/residual_energy_FLP.c | 2 +- libs/libopus/silk/float/schur_FLP.c | 16 +- libs/libopus/silk/float/solve_LS_FLP.c | 207 --- libs/libopus/silk/float/sort_FLP.c | 6 +- libs/libopus/silk/float/structs_FLP.h | 22 +- .../silk/float/warped_autocorrelation_FLP.c | 6 +- libs/libopus/silk/float/wrappers_FLP.c | 49 +- libs/libopus/silk/gain_quant.c | 1 + libs/libopus/silk/init_decoder.c | 1 + libs/libopus/silk/interpolate.c | 4 +- libs/libopus/silk/lin2log.c | 2 +- libs/libopus/silk/macros.h | 8 - libs/libopus/silk/main.h | 125 +- libs/libopus/silk/mips/NSQ_del_dec_mipsr1.h | 7 +- libs/libopus/silk/mips/sigproc_fix_mipsr1.h | 5 - libs/libopus/silk/process_NLSFs.c | 10 +- libs/libopus/silk/quant_LTP_gains.c | 67 +- libs/libopus/silk/resampler.c | 10 +- libs/libopus/silk/resampler_down2.c | 4 +- libs/libopus/silk/resampler_private_down_FIR.c | 2 +- libs/libopus/silk/sort.c | 14 +- libs/libopus/silk/stereo_LR_to_MS.c | 2 +- libs/libopus/silk/stereo_encode_pred.c | 6 +- libs/libopus/silk/structs.h | 6 +- libs/libopus/silk/sum_sqr_shift.c | 51 +- libs/libopus/silk/tables.h | 8 - libs/libopus/silk/tables_LTP.c | 2 - libs/libopus/silk/tables_NLSF_CB_NB_MB.c | 36 + libs/libopus/silk/tables_NLSF_CB_WB.c | 36 + libs/libopus/silk/tables_other.c | 14 - libs/libopus/silk/tuning_parameters.h | 28 +- libs/libopus/silk/typedef.h | 3 + libs/libopus/silk/x86/NSQ_del_dec_sse.c | 857 ------------ libs/libopus/silk/x86/NSQ_del_dec_sse4_1.c | 914 +++++++++++++ libs/libopus/silk/x86/NSQ_sse.c | 720 ---------- libs/libopus/silk/x86/NSQ_sse4_1.c | 772 +++++++++++ libs/libopus/silk/x86/SigProc_FIX_sse.h | 12 +- libs/libopus/silk/x86/VAD_sse.c | 277 ---- libs/libopus/silk/x86/VAD_sse4_1.c | 289 ++++ libs/libopus/silk/x86/VQ_WMat_EC_sse.c | 142 -- libs/libopus/silk/x86/VQ_WMat_EC_sse4_1.c | 173 +++ libs/libopus/silk/x86/main_sse.h | 199 ++- libs/libopus/silk/x86/x86_silk_map.c | 99 +- libs/libopus/sources.mozbuild | 44 +- libs/libopus/src/analysis.c | 779 +++++++---- libs/libopus/src/analysis.h | 38 +- libs/libopus/src/mapping_matrix.c | 378 ++++++ libs/libopus/src/mapping_matrix.h | 133 ++ libs/libopus/src/mlp.c | 155 ++- libs/libopus/src/mlp.h | 35 +- libs/libopus/src/mlp_data.c | 755 +++++++++-- libs/libopus/src/opus.c | 4 +- libs/libopus/src/opus_decoder.c | 117 +- libs/libopus/src/opus_encoder.c | 1401 ++++++++++++-------- libs/libopus/src/opus_multistream_decoder.c | 97 +- libs/libopus/src/opus_multistream_encoder.c | 358 +++-- libs/libopus/src/opus_private.h | 85 +- libs/libopus/src/opus_projection_decoder.c | 258 ++++ libs/libopus/src/opus_projection_encoder.c | 468 +++++++ libs/libopus/src/repacketizer.c | 3 +- libs/libopus/update.sh | 3 +- 235 files changed, 13617 insertions(+), 8290 deletions(-) create mode 100644 libs/libopus/celt/arm/celt_fft_ne10.c create mode 100644 libs/libopus/celt/arm/celt_mdct_ne10.c delete mode 100644 libs/libopus/celt/arm/celt_ne10_fft.c delete mode 100644 libs/libopus/celt/arm/celt_ne10_mdct.c create mode 100644 libs/libopus/celt/arm/pitch_neon_intr.c delete mode 100644 libs/libopus/celt/x86/celt_lpc_sse.c create mode 100644 libs/libopus/celt/x86/celt_lpc_sse4_1.c create mode 100644 libs/libopus/celt/x86/vq_sse.h create mode 100644 libs/libopus/celt/x86/vq_sse2.c create mode 100644 libs/libopus/include/opus_projection.h create mode 100644 libs/libopus/nonunified2.patch create mode 100644 libs/libopus/silk/LPC_fit.c create mode 100644 libs/libopus/silk/arm/LPC_inv_pred_gain_arm.h create mode 100644 libs/libopus/silk/arm/LPC_inv_pred_gain_neon_intr.c create mode 100644 libs/libopus/silk/arm/NSQ_del_dec_arm.h create mode 100644 libs/libopus/silk/arm/NSQ_del_dec_neon_intr.c create mode 100644 libs/libopus/silk/arm/biquad_alt_arm.h create mode 100644 libs/libopus/silk/arm/biquad_alt_neon_intr.c create mode 100644 libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_arm.h create mode 100644 libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_neon_intr.c delete mode 100644 libs/libopus/silk/fixed/mips/prefilter_FIX_mipsr1.h delete mode 100644 libs/libopus/silk/fixed/prefilter_FIX.c delete mode 100644 libs/libopus/silk/fixed/solve_LS_FIX.c delete mode 100644 libs/libopus/silk/fixed/x86/burg_modified_FIX_sse.c create mode 100644 libs/libopus/silk/fixed/x86/burg_modified_FIX_sse4_1.c delete mode 100644 libs/libopus/silk/fixed/x86/prefilter_FIX_sse.c delete mode 100644 libs/libopus/silk/fixed/x86/vector_ops_FIX_sse.c create mode 100644 libs/libopus/silk/fixed/x86/vector_ops_FIX_sse4_1.c delete mode 100644 libs/libopus/silk/float/levinsondurbin_FLP.c delete mode 100644 libs/libopus/silk/float/prefilter_FLP.c delete mode 100644 libs/libopus/silk/float/solve_LS_FLP.c delete mode 100644 libs/libopus/silk/x86/NSQ_del_dec_sse.c create mode 100644 libs/libopus/silk/x86/NSQ_del_dec_sse4_1.c delete mode 100644 libs/libopus/silk/x86/NSQ_sse.c create mode 100644 libs/libopus/silk/x86/NSQ_sse4_1.c delete mode 100644 libs/libopus/silk/x86/VAD_sse.c create mode 100644 libs/libopus/silk/x86/VAD_sse4_1.c delete mode 100644 libs/libopus/silk/x86/VQ_WMat_EC_sse.c create mode 100644 libs/libopus/silk/x86/VQ_WMat_EC_sse4_1.c create mode 100644 libs/libopus/src/mapping_matrix.c create mode 100644 libs/libopus/src/mapping_matrix.h create mode 100644 libs/libopus/src/opus_projection_decoder.c create mode 100644 libs/libopus/src/opus_projection_encoder.c diff --git a/libs/libopus/Makefile.in b/libs/libopus/Makefile.in index 34998eff4..4fa77570f 100644 --- a/libs/libopus/Makefile.in +++ b/libs/libopus/Makefile.in @@ -11,10 +11,10 @@ celt_pitch_xcorr_arm-gnu.s: celt/arm/armopts-gnu.S # armopts needs a specific rule, because arm2gnu.pl will always add the .S # suffix when translating the files that include it. -celt/arm/armopts-gnu.S: celt/arm/armopts.s $(call mkdir_deps,celt/arm) $(GLOBAL_DEPS) +celt/arm/armopts-gnu.S: $(srcdir)/celt/arm/armopts.s $(call mkdir_deps,celt/arm) $(GLOBAL_DEPS) $(PERL) $(srcdir)/celt/arm/arm2gnu.pl < $< > $@ # For all others, we can use an implicit rule -%-gnu.s: celt/arm/%.s $(GLOBAL_DEPS) +%-gnu.s: $(srcdir)/celt/arm/%.s $(GLOBAL_DEPS) $(PERL) $(srcdir)/celt/arm/arm2gnu.pl < $< > $@ endif diff --git a/libs/libopus/README_MOZILLA b/libs/libopus/README_MOZILLA index ee61ca17a..d2c5c4920 100644 --- a/libs/libopus/README_MOZILLA +++ b/libs/libopus/README_MOZILLA @@ -8,4 +8,4 @@ files after the copy step. The upstream repository is https://git.xiph.org/opus.git -The git tag/revision used was v1.1.3. +The git tag/revision used was 2654707e86cc94413998976d179b2ab4a2aa3114. diff --git a/libs/libopus/celt/_kiss_fft_guts.h b/libs/libopus/celt/_kiss_fft_guts.h index 5e3d58fd6..17392b3e9 100644 --- a/libs/libopus/celt/_kiss_fft_guts.h +++ b/libs/libopus/celt/_kiss_fft_guts.h @@ -58,12 +58,12 @@ # define S_MUL(a,b) MULT16_32_Q15(b, a) # define C_MUL(m,a,b) \ - do{ (m).r = SUB32(S_MUL((a).r,(b).r) , S_MUL((a).i,(b).i)); \ - (m).i = ADD32(S_MUL((a).r,(b).i) , S_MUL((a).i,(b).r)); }while(0) + do{ (m).r = SUB32_ovflw(S_MUL((a).r,(b).r) , S_MUL((a).i,(b).i)); \ + (m).i = ADD32_ovflw(S_MUL((a).r,(b).i) , S_MUL((a).i,(b).r)); }while(0) # define C_MULC(m,a,b) \ - do{ (m).r = ADD32(S_MUL((a).r,(b).r) , S_MUL((a).i,(b).i)); \ - (m).i = SUB32(S_MUL((a).i,(b).r) , S_MUL((a).r,(b).i)); }while(0) + do{ (m).r = ADD32_ovflw(S_MUL((a).r,(b).r) , S_MUL((a).i,(b).i)); \ + (m).i = SUB32_ovflw(S_MUL((a).i,(b).r) , S_MUL((a).r,(b).i)); }while(0) # define C_MULBYSCALAR( c, s ) \ do{ (c).r = S_MUL( (c).r , s ) ;\ @@ -77,17 +77,17 @@ DIVSCALAR( (c).i , div); }while (0) #define C_ADD( res, a,b)\ - do {(res).r=ADD32((a).r,(b).r); (res).i=ADD32((a).i,(b).i); \ + do {(res).r=ADD32_ovflw((a).r,(b).r); (res).i=ADD32_ovflw((a).i,(b).i); \ }while(0) #define C_SUB( res, a,b)\ - do {(res).r=SUB32((a).r,(b).r); (res).i=SUB32((a).i,(b).i); \ + do {(res).r=SUB32_ovflw((a).r,(b).r); (res).i=SUB32_ovflw((a).i,(b).i); \ }while(0) #define C_ADDTO( res , a)\ - do {(res).r = ADD32((res).r, (a).r); (res).i = ADD32((res).i,(a).i);\ + do {(res).r = ADD32_ovflw((res).r, (a).r); (res).i = ADD32_ovflw((res).i,(a).i);\ }while(0) #define C_SUBFROM( res , a)\ - do {(res).r = ADD32((res).r,(a).r); (res).i = SUB32((res).i,(a).i); \ + do {(res).r = ADD32_ovflw((res).r,(a).r); (res).i = SUB32_ovflw((res).i,(a).i); \ }while(0) #if defined(OPUS_ARM_INLINE_ASM) diff --git a/libs/libopus/celt/arch.h b/libs/libopus/celt/arch.h index 8ceab5fe1..3845c3a08 100644 --- a/libs/libopus/celt/arch.h +++ b/libs/libopus/celt/arch.h @@ -46,25 +46,53 @@ # endif # endif +#if OPUS_GNUC_PREREQ(3, 0) +#define opus_likely(x) (__builtin_expect(!!(x), 1)) +#define opus_unlikely(x) (__builtin_expect(!!(x), 0)) +#else +#define opus_likely(x) (!!(x)) +#define opus_unlikely(x) (!!(x)) +#endif + #define CELT_SIG_SCALE 32768.f -#define celt_fatal(str) _celt_fatal(str, __FILE__, __LINE__); -#ifdef ENABLE_ASSERTIONS +#define CELT_FATAL(str) celt_fatal(str, __FILE__, __LINE__); + +#if defined(ENABLE_ASSERTIONS) || defined(ENABLE_HARDENING) +#ifdef __GNUC__ +__attribute__((noreturn)) +#endif +void celt_fatal(const char *str, const char *file, int line); + +#if defined(CELT_C) && !defined(OVERRIDE_celt_fatal) #include #include #ifdef __GNUC__ __attribute__((noreturn)) #endif -static OPUS_INLINE void _celt_fatal(const char *str, const char *file, int line) +void celt_fatal(const char *str, const char *file, int line) { fprintf (stderr, "Fatal (internal) error in %s, line %d: %s\n", file, line, str); +#if defined(_MSC_VER) + _set_abort_behavior( 0, _WRITE_ABORT_MSG); +#endif abort(); } -#define celt_assert(cond) {if (!(cond)) {celt_fatal("assertion failed: " #cond);}} -#define celt_assert2(cond, message) {if (!(cond)) {celt_fatal("assertion failed: " #cond "\n" message);}} +#endif + +#define celt_assert(cond) {if (!(cond)) {CELT_FATAL("assertion failed: " #cond);}} +#define celt_assert2(cond, message) {if (!(cond)) {CELT_FATAL("assertion failed: " #cond "\n" message);}} +#define MUST_SUCCEED(call) celt_assert((call) == OPUS_OK) #else #define celt_assert(cond) #define celt_assert2(cond, message) +#define MUST_SUCCEED(call) do {if((call) != OPUS_OK) {RESTORE_STACK; return OPUS_INTERNAL_ERROR;} } while (0) +#endif + +#if defined(ENABLE_ASSERTIONS) +#define celt_sig_assert(cond) {if (!(cond)) {CELT_FATAL("signal assertion failed: " #cond);}} +#else +#define celt_sig_assert(cond) #endif #define IMUL32(a,b) ((a)*(b)) @@ -93,14 +121,20 @@ static OPUS_INLINE void _celt_fatal(const char *str, const char *file, int line) typedef opus_int16 opus_val16; typedef opus_int32 opus_val32; +typedef opus_int64 opus_val64; typedef opus_val32 celt_sig; typedef opus_val16 celt_norm; typedef opus_val32 celt_ener; +#define celt_isnan(x) 0 + #define Q15ONE 32767 #define SIG_SHIFT 12 +/* Safe saturation value for 32-bit signals. Should be less than + 2^31*(1-0.85) to avoid blowing up on DC at deemphasis.*/ +#define SIG_SAT (300000000) #define NORM_SCALING 16384 @@ -129,7 +163,7 @@ static OPUS_INLINE opus_int16 SAT16(opus_int32 x) { #ifdef OPUS_ARM_PRESUME_AARCH64_NEON_INTR #include "arm/fixed_arm64.h" -#elif OPUS_ARM_INLINE_EDSP +#elif defined (OPUS_ARM_INLINE_EDSP) #include "arm/fixed_armv5e.h" #elif defined (OPUS_ARM_INLINE_ASM) #include "arm/fixed_armv4.h" @@ -147,6 +181,7 @@ static OPUS_INLINE opus_int16 SAT16(opus_int32 x) { typedef float opus_val16; typedef float opus_val32; +typedef float opus_val64; typedef float celt_sig; typedef float celt_norm; @@ -186,6 +221,7 @@ static OPUS_INLINE int celt_isnan(float x) #define NEG16(x) (-(x)) #define NEG32(x) (-(x)) +#define NEG32_ovflw(x) (-(x)) #define EXTRACT16(x) (x) #define EXTEND32(x) (x) #define SHR16(a,shift) (a) @@ -202,6 +238,7 @@ static OPUS_INLINE int celt_isnan(float x) #define SATURATE16(x) (x) #define ROUND16(a,shift) (a) +#define SROUND16(a,shift) (a) #define HALF16(x) (.5f*(x)) #define HALF32(x) (.5f*(x)) @@ -209,6 +246,8 @@ static OPUS_INLINE int celt_isnan(float x) #define SUB16(a,b) ((a)-(b)) #define ADD32(a,b) ((a)+(b)) #define SUB32(a,b) ((a)-(b)) +#define ADD32_ovflw(a,b) ((a)+(b)) +#define SUB32_ovflw(a,b) ((a)-(b)) #define MULT16_16_16(a,b) ((a)*(b)) #define MULT16_16(a,b) ((opus_val32)(a)*(opus_val32)(b)) #define MAC16_16(c,a,b) ((c)+(opus_val32)(a)*(opus_val32)(b)) @@ -243,9 +282,9 @@ static OPUS_INLINE int celt_isnan(float x) #ifndef GLOBAL_STACK_SIZE #ifdef FIXED_POINT -#define GLOBAL_STACK_SIZE 100000 +#define GLOBAL_STACK_SIZE 120000 #else -#define GLOBAL_STACK_SIZE 100000 +#define GLOBAL_STACK_SIZE 120000 #endif #endif diff --git a/libs/libopus/celt/arm/arm2gnu.pl b/libs/libopus/celt/arm/arm2gnu.pl index 6c922ac81..a2895f744 100755 --- a/libs/libopus/celt/arm/arm2gnu.pl +++ b/libs/libopus/celt/arm/arm2gnu.pl @@ -164,11 +164,11 @@ while (<>) { $prefix = ""; if ($proc) { - $prefix = $prefix.sprintf("\t.type\t%s, %%function; ",$proc) unless ($apple); + $prefix = $prefix.sprintf("\t.type\t%s, %%function", $proc) unless ($apple); # Make sure we $prefix isn't empty here (for the $apple case). # We handle mangling the label here, make sure it doesn't match # the label handling below (if $prefix would be empty). - $prefix = "; "; + $prefix = $prefix."; "; push(@proc_stack, $proc); s/^[A-Za-z_\.]\w+/$symprefix$&:/; } diff --git a/libs/libopus/celt/arm/arm_celt_map.c b/libs/libopus/celt/arm/arm_celt_map.c index 4d4d069a8..ca988b66f 100644 --- a/libs/libopus/celt/arm/arm_celt_map.c +++ b/libs/libopus/celt/arm/arm_celt_map.c @@ -35,12 +35,29 @@ #if defined(OPUS_HAVE_RTCD) +# if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR) +opus_val32 (*const CELT_INNER_PROD_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *x, const opus_val16 *y, int N) = { + celt_inner_prod_c, /* ARMv4 */ + celt_inner_prod_c, /* EDSP */ + celt_inner_prod_c, /* Media */ + celt_inner_prod_neon /* NEON */ +}; + +void (*const DUAL_INNER_PROD_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *x, const opus_val16 *y01, const opus_val16 *y02, + int N, opus_val32 *xy1, opus_val32 *xy2) = { + dual_inner_prod_c, /* ARMv4 */ + dual_inner_prod_c, /* EDSP */ + dual_inner_prod_c, /* Media */ + dual_inner_prod_neon /* NEON */ +}; +# endif + # if defined(FIXED_POINT) # if ((defined(OPUS_ARM_MAY_HAVE_NEON) && !defined(OPUS_ARM_PRESUME_NEON)) || \ (defined(OPUS_ARM_MAY_HAVE_MEDIA) && !defined(OPUS_ARM_PRESUME_MEDIA)) || \ (defined(OPUS_ARM_MAY_HAVE_EDSP) && !defined(OPUS_ARM_PRESUME_EDSP))) opus_val32 (*const CELT_PITCH_XCORR_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *, - const opus_val16 *, opus_val32 *, int , int) = { + const opus_val16 *, opus_val32 *, int, int, int) = { celt_pitch_xcorr_c, /* ARMv4 */ MAY_HAVE_EDSP(celt_pitch_xcorr), /* EDSP */ MAY_HAVE_MEDIA(celt_pitch_xcorr), /* Media */ @@ -51,7 +68,7 @@ opus_val32 (*const CELT_PITCH_XCORR_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *, # else /* !FIXED_POINT */ # if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR) void (*const CELT_PITCH_XCORR_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *, - const opus_val16 *, opus_val32 *, int, int) = { + const opus_val16 *, opus_val32 *, int, int, int) = { celt_pitch_xcorr_c, /* ARMv4 */ celt_pitch_xcorr_c, /* EDSP */ celt_pitch_xcorr_c, /* Media */ diff --git a/libs/libopus/celt/arm/armcpu.c b/libs/libopus/celt/arm/armcpu.c index 694a63b78..cce3ae3a9 100644 --- a/libs/libopus/celt/arm/armcpu.c +++ b/libs/libopus/celt/arm/armcpu.c @@ -93,6 +93,8 @@ static OPUS_INLINE opus_uint32 opus_cpu_capabilities(void){ #elif defined(__linux__) /* Linux based */ +#include + opus_uint32 opus_cpu_capabilities(void) { opus_uint32 flags = 0; diff --git a/libs/libopus/celt/arm/celt_fft_ne10.c b/libs/libopus/celt/arm/celt_fft_ne10.c new file mode 100644 index 000000000..ea5fd7808 --- /dev/null +++ b/libs/libopus/celt/arm/celt_fft_ne10.c @@ -0,0 +1,173 @@ +/* Copyright (c) 2015 Xiph.Org Foundation + Written by Viswanath Puttagunta */ +/** + @file celt_fft_ne10.c + @brief ARM Neon optimizations for fft using NE10 library + */ + +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef SKIP_CONFIG_H +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif +#endif + +#include +#include "os_support.h" +#include "kiss_fft.h" +#include "stack_alloc.h" + +#if !defined(FIXED_POINT) +# define NE10_FFT_ALLOC_C2C_TYPE_NEON ne10_fft_alloc_c2c_float32_neon +# define NE10_FFT_CFG_TYPE_T ne10_fft_cfg_float32_t +# define NE10_FFT_STATE_TYPE_T ne10_fft_state_float32_t +# define NE10_FFT_DESTROY_C2C_TYPE ne10_fft_destroy_c2c_float32 +# define NE10_FFT_CPX_TYPE_T ne10_fft_cpx_float32_t +# define NE10_FFT_C2C_1D_TYPE_NEON ne10_fft_c2c_1d_float32_neon +#else +# define NE10_FFT_ALLOC_C2C_TYPE_NEON(nfft) ne10_fft_alloc_c2c_int32_neon(nfft) +# define NE10_FFT_CFG_TYPE_T ne10_fft_cfg_int32_t +# define NE10_FFT_STATE_TYPE_T ne10_fft_state_int32_t +# define NE10_FFT_DESTROY_C2C_TYPE ne10_fft_destroy_c2c_int32 +# define NE10_FFT_DESTROY_C2C_TYPE ne10_fft_destroy_c2c_int32 +# define NE10_FFT_CPX_TYPE_T ne10_fft_cpx_int32_t +# define NE10_FFT_C2C_1D_TYPE_NEON ne10_fft_c2c_1d_int32_neon +#endif + +#if defined(CUSTOM_MODES) + +/* nfft lengths in NE10 that support scaled fft */ +# define NE10_FFTSCALED_SUPPORT_MAX 4 +static const int ne10_fft_scaled_support[NE10_FFTSCALED_SUPPORT_MAX] = { + 480, 240, 120, 60 +}; + +int opus_fft_alloc_arm_neon(kiss_fft_state *st) +{ + int i; + size_t memneeded = sizeof(struct arch_fft_state); + + st->arch_fft = (arch_fft_state *)opus_alloc(memneeded); + if (!st->arch_fft) + return -1; + + for (i = 0; i < NE10_FFTSCALED_SUPPORT_MAX; i++) { + if(st->nfft == ne10_fft_scaled_support[i]) + break; + } + if (i == NE10_FFTSCALED_SUPPORT_MAX) { + /* This nfft length (scaled fft) is not supported in NE10 */ + st->arch_fft->is_supported = 0; + st->arch_fft->priv = NULL; + } + else { + st->arch_fft->is_supported = 1; + st->arch_fft->priv = (void *)NE10_FFT_ALLOC_C2C_TYPE_NEON(st->nfft); + if (st->arch_fft->priv == NULL) { + return -1; + } + } + return 0; +} + +void opus_fft_free_arm_neon(kiss_fft_state *st) +{ + NE10_FFT_CFG_TYPE_T cfg; + + if (!st->arch_fft) + return; + + cfg = (NE10_FFT_CFG_TYPE_T)st->arch_fft->priv; + if (cfg) + NE10_FFT_DESTROY_C2C_TYPE(cfg); + opus_free(st->arch_fft); +} +#endif + +void opus_fft_neon(const kiss_fft_state *st, + const kiss_fft_cpx *fin, + kiss_fft_cpx *fout) +{ + NE10_FFT_STATE_TYPE_T state; + NE10_FFT_CFG_TYPE_T cfg = &state; + VARDECL(NE10_FFT_CPX_TYPE_T, buffer); + SAVE_STACK; + ALLOC(buffer, st->nfft, NE10_FFT_CPX_TYPE_T); + + if (!st->arch_fft->is_supported) { + /* This nfft length (scaled fft) not supported in NE10 */ + opus_fft_c(st, fin, fout); + } + else { + memcpy((void *)cfg, st->arch_fft->priv, sizeof(NE10_FFT_STATE_TYPE_T)); + state.buffer = (NE10_FFT_CPX_TYPE_T *)&buffer[0]; +#if !defined(FIXED_POINT) + state.is_forward_scaled = 1; + + NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, + (NE10_FFT_CPX_TYPE_T *)fin, + cfg, 0); +#else + NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, + (NE10_FFT_CPX_TYPE_T *)fin, + cfg, 0, 1); +#endif + } + RESTORE_STACK; +} + +void opus_ifft_neon(const kiss_fft_state *st, + const kiss_fft_cpx *fin, + kiss_fft_cpx *fout) +{ + NE10_FFT_STATE_TYPE_T state; + NE10_FFT_CFG_TYPE_T cfg = &state; + VARDECL(NE10_FFT_CPX_TYPE_T, buffer); + SAVE_STACK; + ALLOC(buffer, st->nfft, NE10_FFT_CPX_TYPE_T); + + if (!st->arch_fft->is_supported) { + /* This nfft length (scaled fft) not supported in NE10 */ + opus_ifft_c(st, fin, fout); + } + else { + memcpy((void *)cfg, st->arch_fft->priv, sizeof(NE10_FFT_STATE_TYPE_T)); + state.buffer = (NE10_FFT_CPX_TYPE_T *)&buffer[0]; +#if !defined(FIXED_POINT) + state.is_backward_scaled = 0; + + NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, + (NE10_FFT_CPX_TYPE_T *)fin, + cfg, 1); +#else + NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, + (NE10_FFT_CPX_TYPE_T *)fin, + cfg, 1, 0); +#endif + } + RESTORE_STACK; +} diff --git a/libs/libopus/celt/arm/celt_mdct_ne10.c b/libs/libopus/celt/arm/celt_mdct_ne10.c new file mode 100644 index 000000000..3531d02d1 --- /dev/null +++ b/libs/libopus/celt/arm/celt_mdct_ne10.c @@ -0,0 +1,258 @@ +/* Copyright (c) 2015 Xiph.Org Foundation + Written by Viswanath Puttagunta */ +/** + @file celt_mdct_ne10.c + @brief ARM Neon optimizations for mdct using NE10 library + */ + +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef SKIP_CONFIG_H +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif +#endif + +#include "kiss_fft.h" +#include "_kiss_fft_guts.h" +#include "mdct.h" +#include "stack_alloc.h" + +void clt_mdct_forward_neon(const mdct_lookup *l, + kiss_fft_scalar *in, + kiss_fft_scalar * OPUS_RESTRICT out, + const opus_val16 *window, + int overlap, int shift, int stride, int arch) +{ + int i; + int N, N2, N4; + VARDECL(kiss_fft_scalar, f); + VARDECL(kiss_fft_cpx, f2); + const kiss_fft_state *st = l->kfft[shift]; + const kiss_twiddle_scalar *trig; + + SAVE_STACK; + + N = l->n; + trig = l->trig; + for (i=0;i>= 1; + trig += N; + } + N2 = N>>1; + N4 = N>>2; + + ALLOC(f, N2, kiss_fft_scalar); + ALLOC(f2, N4, kiss_fft_cpx); + + /* Consider the input to be composed of four blocks: [a, b, c, d] */ + /* Window, shuffle, fold */ + { + /* Temp pointers to make it really clear to the compiler what we're doing */ + const kiss_fft_scalar * OPUS_RESTRICT xp1 = in+(overlap>>1); + const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+N2-1+(overlap>>1); + kiss_fft_scalar * OPUS_RESTRICT yp = f; + const opus_val16 * OPUS_RESTRICT wp1 = window+(overlap>>1); + const opus_val16 * OPUS_RESTRICT wp2 = window+(overlap>>1)-1; + for(i=0;i<((overlap+3)>>2);i++) + { + /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/ + *yp++ = MULT16_32_Q15(*wp2, xp1[N2]) + MULT16_32_Q15(*wp1,*xp2); + *yp++ = MULT16_32_Q15(*wp1, *xp1) - MULT16_32_Q15(*wp2, xp2[-N2]); + xp1+=2; + xp2-=2; + wp1+=2; + wp2-=2; + } + wp1 = window; + wp2 = window+overlap-1; + for(;i>2);i++) + { + /* Real part arranged as a-bR, Imag part arranged as -c-dR */ + *yp++ = *xp2; + *yp++ = *xp1; + xp1+=2; + xp2-=2; + } + for(;ii,t[N4+i]) - S_MUL(fp->r,t[i]); + yi = S_MUL(fp->r,t[N4+i]) + S_MUL(fp->i,t[i]); + *yp1 = yr; + *yp2 = yi; + fp++; + yp1 += 2*stride; + yp2 -= 2*stride; + } + } + RESTORE_STACK; +} + +void clt_mdct_backward_neon(const mdct_lookup *l, + kiss_fft_scalar *in, + kiss_fft_scalar * OPUS_RESTRICT out, + const opus_val16 * OPUS_RESTRICT window, + int overlap, int shift, int stride, int arch) +{ + int i; + int N, N2, N4; + VARDECL(kiss_fft_scalar, f); + const kiss_twiddle_scalar *trig; + const kiss_fft_state *st = l->kfft[shift]; + + N = l->n; + trig = l->trig; + for (i=0;i>= 1; + trig += N; + } + N2 = N>>1; + N4 = N>>2; + + ALLOC(f, N2, kiss_fft_scalar); + + /* Pre-rotate */ + { + /* Temp pointers to make it really clear to the compiler what we're doing */ + const kiss_fft_scalar * OPUS_RESTRICT xp1 = in; + const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+stride*(N2-1); + kiss_fft_scalar * OPUS_RESTRICT yp = f; + const kiss_twiddle_scalar * OPUS_RESTRICT t = &trig[0]; + for(i=0;i>1)), arch); + + /* Post-rotate and de-shuffle from both ends of the buffer at once to make + it in-place. */ + { + kiss_fft_scalar * yp0 = out+(overlap>>1); + kiss_fft_scalar * yp1 = out+(overlap>>1)+N2-2; + const kiss_twiddle_scalar *t = &trig[0]; + /* Loop to (N4+1)>>1 to handle odd N4. When N4 is odd, the + middle pair will be computed twice. */ + for(i=0;i<(N4+1)>>1;i++) + { + kiss_fft_scalar re, im, yr, yi; + kiss_twiddle_scalar t0, t1; + re = yp0[0]; + im = yp0[1]; + t0 = t[i]; + t1 = t[N4+i]; + /* We'd scale up by 2 here, but instead it's done when mixing the windows */ + yr = S_MUL(re,t0) + S_MUL(im,t1); + yi = S_MUL(re,t1) - S_MUL(im,t0); + re = yp1[0]; + im = yp1[1]; + yp0[0] = yr; + yp1[1] = yi; + + t0 = t[(N4-i-1)]; + t1 = t[(N2-i-1)]; + /* We'd scale up by 2 here, but instead it's done when mixing the windows */ + yr = S_MUL(re,t0) + S_MUL(im,t1); + yi = S_MUL(re,t1) - S_MUL(im,t0); + yp1[0] = yr; + yp0[1] = yi; + yp0 += 2; + yp1 -= 2; + } + } + + /* Mirror on both sides for TDAC */ + { + kiss_fft_scalar * OPUS_RESTRICT xp1 = out+overlap-1; + kiss_fft_scalar * OPUS_RESTRICT yp1 = out; + const opus_val16 * OPUS_RESTRICT wp1 = window; + const opus_val16 * OPUS_RESTRICT wp2 = window+overlap-1; + + for(i = 0; i < overlap/2; i++) + { + kiss_fft_scalar x1, x2; + x1 = *xp1; + x2 = *yp1; + *yp1++ = MULT16_32_Q15(*wp2, x2) - MULT16_32_Q15(*wp1, x1); + *xp1-- = MULT16_32_Q15(*wp1, x2) + MULT16_32_Q15(*wp2, x1); + wp1++; + wp2--; + } + } + RESTORE_STACK; +} diff --git a/libs/libopus/celt/arm/celt_ne10_fft.c b/libs/libopus/celt/arm/celt_ne10_fft.c deleted file mode 100644 index 42d96a711..000000000 --- a/libs/libopus/celt/arm/celt_ne10_fft.c +++ /dev/null @@ -1,174 +0,0 @@ -/* Copyright (c) 2015 Xiph.Org Foundation - Written by Viswanath Puttagunta */ -/** - @file celt_ne10_fft.c - @brief ARM Neon optimizations for fft using NE10 library - */ - -/* - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef SKIP_CONFIG_H -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif -#endif - -#include -#include -#include "os_support.h" -#include "kiss_fft.h" -#include "stack_alloc.h" - -#if !defined(FIXED_POINT) -# define NE10_FFT_ALLOC_C2C_TYPE_NEON ne10_fft_alloc_c2c_float32_neon -# define NE10_FFT_CFG_TYPE_T ne10_fft_cfg_float32_t -# define NE10_FFT_STATE_TYPE_T ne10_fft_state_float32_t -# define NE10_FFT_DESTROY_C2C_TYPE ne10_fft_destroy_c2c_float32 -# define NE10_FFT_CPX_TYPE_T ne10_fft_cpx_float32_t -# define NE10_FFT_C2C_1D_TYPE_NEON ne10_fft_c2c_1d_float32_neon -#else -# define NE10_FFT_ALLOC_C2C_TYPE_NEON(nfft) ne10_fft_alloc_c2c_int32_neon(nfft) -# define NE10_FFT_CFG_TYPE_T ne10_fft_cfg_int32_t -# define NE10_FFT_STATE_TYPE_T ne10_fft_state_int32_t -# define NE10_FFT_DESTROY_C2C_TYPE ne10_fft_destroy_c2c_int32 -# define NE10_FFT_DESTROY_C2C_TYPE ne10_fft_destroy_c2c_int32 -# define NE10_FFT_CPX_TYPE_T ne10_fft_cpx_int32_t -# define NE10_FFT_C2C_1D_TYPE_NEON ne10_fft_c2c_1d_int32_neon -#endif - -#if defined(CUSTOM_MODES) - -/* nfft lengths in NE10 that support scaled fft */ -# define NE10_FFTSCALED_SUPPORT_MAX 4 -static const int ne10_fft_scaled_support[NE10_FFTSCALED_SUPPORT_MAX] = { - 480, 240, 120, 60 -}; - -int opus_fft_alloc_arm_neon(kiss_fft_state *st) -{ - int i; - size_t memneeded = sizeof(struct arch_fft_state); - - st->arch_fft = (arch_fft_state *)opus_alloc(memneeded); - if (!st->arch_fft) - return -1; - - for (i = 0; i < NE10_FFTSCALED_SUPPORT_MAX; i++) { - if(st->nfft == ne10_fft_scaled_support[i]) - break; - } - if (i == NE10_FFTSCALED_SUPPORT_MAX) { - /* This nfft length (scaled fft) is not supported in NE10 */ - st->arch_fft->is_supported = 0; - st->arch_fft->priv = NULL; - } - else { - st->arch_fft->is_supported = 1; - st->arch_fft->priv = (void *)NE10_FFT_ALLOC_C2C_TYPE_NEON(st->nfft); - if (st->arch_fft->priv == NULL) { - return -1; - } - } - return 0; -} - -void opus_fft_free_arm_neon(kiss_fft_state *st) -{ - NE10_FFT_CFG_TYPE_T cfg; - - if (!st->arch_fft) - return; - - cfg = (NE10_FFT_CFG_TYPE_T)st->arch_fft->priv; - if (cfg) - NE10_FFT_DESTROY_C2C_TYPE(cfg); - opus_free(st->arch_fft); -} -#endif - -void opus_fft_neon(const kiss_fft_state *st, - const kiss_fft_cpx *fin, - kiss_fft_cpx *fout) -{ - NE10_FFT_STATE_TYPE_T state; - NE10_FFT_CFG_TYPE_T cfg = &state; - VARDECL(NE10_FFT_CPX_TYPE_T, buffer); - SAVE_STACK; - ALLOC(buffer, st->nfft, NE10_FFT_CPX_TYPE_T); - - if (!st->arch_fft->is_supported) { - /* This nfft length (scaled fft) not supported in NE10 */ - opus_fft_c(st, fin, fout); - } - else { - memcpy((void *)cfg, st->arch_fft->priv, sizeof(NE10_FFT_STATE_TYPE_T)); - state.buffer = (NE10_FFT_CPX_TYPE_T *)&buffer[0]; -#if !defined(FIXED_POINT) - state.is_forward_scaled = 1; - - NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, - (NE10_FFT_CPX_TYPE_T *)fin, - cfg, 0); -#else - NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, - (NE10_FFT_CPX_TYPE_T *)fin, - cfg, 0, 1); -#endif - } - RESTORE_STACK; -} - -void opus_ifft_neon(const kiss_fft_state *st, - const kiss_fft_cpx *fin, - kiss_fft_cpx *fout) -{ - NE10_FFT_STATE_TYPE_T state; - NE10_FFT_CFG_TYPE_T cfg = &state; - VARDECL(NE10_FFT_CPX_TYPE_T, buffer); - SAVE_STACK; - ALLOC(buffer, st->nfft, NE10_FFT_CPX_TYPE_T); - - if (!st->arch_fft->is_supported) { - /* This nfft length (scaled fft) not supported in NE10 */ - opus_ifft_c(st, fin, fout); - } - else { - memcpy((void *)cfg, st->arch_fft->priv, sizeof(NE10_FFT_STATE_TYPE_T)); - state.buffer = (NE10_FFT_CPX_TYPE_T *)&buffer[0]; -#if !defined(FIXED_POINT) - state.is_backward_scaled = 0; - - NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, - (NE10_FFT_CPX_TYPE_T *)fin, - cfg, 1); -#else - NE10_FFT_C2C_1D_TYPE_NEON((NE10_FFT_CPX_TYPE_T *)fout, - (NE10_FFT_CPX_TYPE_T *)fin, - cfg, 1, 0); -#endif - } - RESTORE_STACK; -} diff --git a/libs/libopus/celt/arm/celt_ne10_mdct.c b/libs/libopus/celt/arm/celt_ne10_mdct.c deleted file mode 100644 index 293c3efd7..000000000 --- a/libs/libopus/celt/arm/celt_ne10_mdct.c +++ /dev/null @@ -1,258 +0,0 @@ -/* Copyright (c) 2015 Xiph.Org Foundation - Written by Viswanath Puttagunta */ -/** - @file celt_ne10_mdct.c - @brief ARM Neon optimizations for mdct using NE10 library - */ - -/* - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef SKIP_CONFIG_H -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif -#endif - -#include "kiss_fft.h" -#include "_kiss_fft_guts.h" -#include "mdct.h" -#include "stack_alloc.h" - -void clt_mdct_forward_neon(const mdct_lookup *l, - kiss_fft_scalar *in, - kiss_fft_scalar * OPUS_RESTRICT out, - const opus_val16 *window, - int overlap, int shift, int stride, int arch) -{ - int i; - int N, N2, N4; - VARDECL(kiss_fft_scalar, f); - VARDECL(kiss_fft_cpx, f2); - const kiss_fft_state *st = l->kfft[shift]; - const kiss_twiddle_scalar *trig; - - SAVE_STACK; - - N = l->n; - trig = l->trig; - for (i=0;i>= 1; - trig += N; - } - N2 = N>>1; - N4 = N>>2; - - ALLOC(f, N2, kiss_fft_scalar); - ALLOC(f2, N4, kiss_fft_cpx); - - /* Consider the input to be composed of four blocks: [a, b, c, d] */ - /* Window, shuffle, fold */ - { - /* Temp pointers to make it really clear to the compiler what we're doing */ - const kiss_fft_scalar * OPUS_RESTRICT xp1 = in+(overlap>>1); - const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+N2-1+(overlap>>1); - kiss_fft_scalar * OPUS_RESTRICT yp = f; - const opus_val16 * OPUS_RESTRICT wp1 = window+(overlap>>1); - const opus_val16 * OPUS_RESTRICT wp2 = window+(overlap>>1)-1; - for(i=0;i<((overlap+3)>>2);i++) - { - /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/ - *yp++ = MULT16_32_Q15(*wp2, xp1[N2]) + MULT16_32_Q15(*wp1,*xp2); - *yp++ = MULT16_32_Q15(*wp1, *xp1) - MULT16_32_Q15(*wp2, xp2[-N2]); - xp1+=2; - xp2-=2; - wp1+=2; - wp2-=2; - } - wp1 = window; - wp2 = window+overlap-1; - for(;i>2);i++) - { - /* Real part arranged as a-bR, Imag part arranged as -c-dR */ - *yp++ = *xp2; - *yp++ = *xp1; - xp1+=2; - xp2-=2; - } - for(;ii,t[N4+i]) - S_MUL(fp->r,t[i]); - yi = S_MUL(fp->r,t[N4+i]) + S_MUL(fp->i,t[i]); - *yp1 = yr; - *yp2 = yi; - fp++; - yp1 += 2*stride; - yp2 -= 2*stride; - } - } - RESTORE_STACK; -} - -void clt_mdct_backward_neon(const mdct_lookup *l, - kiss_fft_scalar *in, - kiss_fft_scalar * OPUS_RESTRICT out, - const opus_val16 * OPUS_RESTRICT window, - int overlap, int shift, int stride, int arch) -{ - int i; - int N, N2, N4; - VARDECL(kiss_fft_scalar, f); - const kiss_twiddle_scalar *trig; - const kiss_fft_state *st = l->kfft[shift]; - - N = l->n; - trig = l->trig; - for (i=0;i>= 1; - trig += N; - } - N2 = N>>1; - N4 = N>>2; - - ALLOC(f, N2, kiss_fft_scalar); - - /* Pre-rotate */ - { - /* Temp pointers to make it really clear to the compiler what we're doing */ - const kiss_fft_scalar * OPUS_RESTRICT xp1 = in; - const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+stride*(N2-1); - kiss_fft_scalar * OPUS_RESTRICT yp = f; - const kiss_twiddle_scalar * OPUS_RESTRICT t = &trig[0]; - for(i=0;i>1)), arch); - - /* Post-rotate and de-shuffle from both ends of the buffer at once to make - it in-place. */ - { - kiss_fft_scalar * yp0 = out+(overlap>>1); - kiss_fft_scalar * yp1 = out+(overlap>>1)+N2-2; - const kiss_twiddle_scalar *t = &trig[0]; - /* Loop to (N4+1)>>1 to handle odd N4. When N4 is odd, the - middle pair will be computed twice. */ - for(i=0;i<(N4+1)>>1;i++) - { - kiss_fft_scalar re, im, yr, yi; - kiss_twiddle_scalar t0, t1; - re = yp0[0]; - im = yp0[1]; - t0 = t[i]; - t1 = t[N4+i]; - /* We'd scale up by 2 here, but instead it's done when mixing the windows */ - yr = S_MUL(re,t0) + S_MUL(im,t1); - yi = S_MUL(re,t1) - S_MUL(im,t0); - re = yp1[0]; - im = yp1[1]; - yp0[0] = yr; - yp1[1] = yi; - - t0 = t[(N4-i-1)]; - t1 = t[(N2-i-1)]; - /* We'd scale up by 2 here, but instead it's done when mixing the windows */ - yr = S_MUL(re,t0) + S_MUL(im,t1); - yi = S_MUL(re,t1) - S_MUL(im,t0); - yp1[0] = yr; - yp0[1] = yi; - yp0 += 2; - yp1 -= 2; - } - } - - /* Mirror on both sides for TDAC */ - { - kiss_fft_scalar * OPUS_RESTRICT xp1 = out+overlap-1; - kiss_fft_scalar * OPUS_RESTRICT yp1 = out; - const opus_val16 * OPUS_RESTRICT wp1 = window; - const opus_val16 * OPUS_RESTRICT wp2 = window+overlap-1; - - for(i = 0; i < overlap/2; i++) - { - kiss_fft_scalar x1, x2; - x1 = *xp1; - x2 = *yp1; - *yp1++ = MULT16_32_Q15(*wp2, x2) - MULT16_32_Q15(*wp1, x1); - *xp1-- = MULT16_32_Q15(*wp1, x2) + MULT16_32_Q15(*wp2, x1); - wp1++; - wp2--; - } - } - RESTORE_STACK; -} diff --git a/libs/libopus/celt/arm/celt_neon_intr.c b/libs/libopus/celt/arm/celt_neon_intr.c index 47bbe3dc2..effda769d 100644 --- a/libs/libopus/celt/arm/celt_neon_intr.c +++ b/libs/libopus/celt/arm/celt_neon_intr.c @@ -191,121 +191,21 @@ static void xcorr_kernel_neon_float(const float32_t *x, const float32_t *y, vst1q_f32(sum, SUMM); } -/* - * Function: xcorr_kernel_neon_float_process1 - * --------------------------------- - * Computes single correlation values and stores in *sum - */ -static void xcorr_kernel_neon_float_process1(const float32_t *x, - const float32_t *y, float32_t *sum, int len) { - float32x4_t XX[4]; - float32x4_t YY[4]; - float32x2_t XX_2; - float32x2_t YY_2; - float32x4_t SUMM; - float32x2_t SUMM_2[2]; - const float32_t *xi = x; - const float32_t *yi = y; - - SUMM = vdupq_n_f32(0); - - /* Work on 16 values per iteration */ - while (len >= 16) { - XX[0] = vld1q_f32(xi); - xi += 4; - XX[1] = vld1q_f32(xi); - xi += 4; - XX[2] = vld1q_f32(xi); - xi += 4; - XX[3] = vld1q_f32(xi); - xi += 4; - - YY[0] = vld1q_f32(yi); - yi += 4; - YY[1] = vld1q_f32(yi); - yi += 4; - YY[2] = vld1q_f32(yi); - yi += 4; - YY[3] = vld1q_f32(yi); - yi += 4; - - SUMM = vmlaq_f32(SUMM, YY[0], XX[0]); - SUMM = vmlaq_f32(SUMM, YY[1], XX[1]); - SUMM = vmlaq_f32(SUMM, YY[2], XX[2]); - SUMM = vmlaq_f32(SUMM, YY[3], XX[3]); - len -= 16; - } - - /* Work on 8 values */ - if (len >= 8) { - XX[0] = vld1q_f32(xi); - xi += 4; - XX[1] = vld1q_f32(xi); - xi += 4; - - YY[0] = vld1q_f32(yi); - yi += 4; - YY[1] = vld1q_f32(yi); - yi += 4; - - SUMM = vmlaq_f32(SUMM, YY[0], XX[0]); - SUMM = vmlaq_f32(SUMM, YY[1], XX[1]); - len -= 8; - } - - /* Work on 4 values */ - if (len >= 4) { - XX[0] = vld1q_f32(xi); - xi += 4; - YY[0] = vld1q_f32(yi); - yi += 4; - SUMM = vmlaq_f32(SUMM, YY[0], XX[0]); - len -= 4; - } - - /* Start accumulating results */ - SUMM_2[0] = vget_low_f32(SUMM); - if (len >= 2) { - /* While at it, consume 2 more values if available */ - XX_2 = vld1_f32(xi); - xi += 2; - YY_2 = vld1_f32(yi); - yi += 2; - SUMM_2[0] = vmla_f32(SUMM_2[0], YY_2, XX_2); - len -= 2; - } - SUMM_2[1] = vget_high_f32(SUMM); - SUMM_2[0] = vadd_f32(SUMM_2[0], SUMM_2[1]); - SUMM_2[0] = vpadd_f32(SUMM_2[0], SUMM_2[0]); - /* Ok, now we have result accumulated in SUMM_2[0].0 */ - - if (len > 0) { - /* Case when you have one value left */ - XX_2 = vld1_dup_f32(xi); - YY_2 = vld1_dup_f32(yi); - SUMM_2[0] = vmla_f32(SUMM_2[0], XX_2, YY_2); - } - - vst1_lane_f32(sum, SUMM_2[0], 0); -} - void celt_pitch_xcorr_float_neon(const opus_val16 *_x, const opus_val16 *_y, - opus_val32 *xcorr, int len, int max_pitch) { + opus_val32 *xcorr, int len, int max_pitch, int arch) { int i; + (void)arch; celt_assert(max_pitch > 0); - celt_assert((((unsigned char *)_x-(unsigned char *)NULL)&3)==0); + celt_sig_assert((((unsigned char *)_x-(unsigned char *)NULL)&3)==0); for (i = 0; i < (max_pitch-3); i += 4) { xcorr_kernel_neon_float((const float32_t *)_x, (const float32_t *)_y+i, (float32_t *)xcorr+i, len); } - /* In case max_pitch isn't multiple of 4 - * compute single correlation value per iteration - */ + /* In case max_pitch isn't a multiple of 4, do non-unrolled version. */ for (; i < max_pitch; i++) { - xcorr_kernel_neon_float_process1((const float32_t *)_x, - (const float32_t *)_y+i, (float32_t *)xcorr+i, len); + xcorr[i] = celt_inner_prod_neon(_x, _y+i, len); } } #endif diff --git a/libs/libopus/celt/arm/celt_pitch_xcorr_arm.s b/libs/libopus/celt/arm/celt_pitch_xcorr_arm.s index f96e0a88b..6e873afc3 100644 --- a/libs/libopus/celt/arm/celt_pitch_xcorr_arm.s +++ b/libs/libopus/celt/arm/celt_pitch_xcorr_arm.s @@ -153,7 +153,7 @@ xcorr_kernel_neon_process1 ENDP ; opus_val32 celt_pitch_xcorr_neon(opus_val16 *_x, opus_val16 *_y, -; opus_val32 *xcorr, int len, int max_pitch) +; opus_val32 *xcorr, int len, int max_pitch, int arch) celt_pitch_xcorr_neon PROC ; input: ; r0 = opus_val16 *_x @@ -168,6 +168,8 @@ celt_pitch_xcorr_neon PROC ; r6 = int max_pitch ; r12 = int j ; q15 = int maxcorr[4] (q15 is not used by xcorr_kernel_neon()) + ; ignored: + ; int arch STMFD sp!, {r4-r6, lr} LDR r6, [sp, #16] VMOV.S32 q15, #1 @@ -358,6 +360,8 @@ celt_pitch_xcorr_edsp PROC ; r9 = opus_val32 sum3 ; r1 = int max_pitch ; r12 = int j + ; ignored: + ; int arch STMFD sp!, {r4-r11, lr} MOV r5, r1 LDR r1, [sp, #36] diff --git a/libs/libopus/celt/arm/fft_arm.h b/libs/libopus/celt/arm/fft_arm.h index 0cb55d8e2..0b78175f3 100644 --- a/libs/libopus/celt/arm/fft_arm.h +++ b/libs/libopus/celt/arm/fft_arm.h @@ -34,7 +34,6 @@ #if !defined(FFT_ARM_H) #define FFT_ARM_H -#include "config.h" #include "kiss_fft.h" #if defined(HAVE_ARM_NE10) diff --git a/libs/libopus/celt/arm/fixed_armv4.h b/libs/libopus/celt/arm/fixed_armv4.h index efb3b1896..d84888a77 100644 --- a/libs/libopus/celt/arm/fixed_armv4.h +++ b/libs/libopus/celt/arm/fixed_armv4.h @@ -37,7 +37,7 @@ static OPUS_INLINE opus_val32 MULT16_32_Q16_armv4(opus_val16 a, opus_val32 b) "#MULT16_32_Q16\n\t" "smull %0, %1, %2, %3\n\t" : "=&r"(rd_lo), "=&r"(rd_hi) - : "%r"(b),"r"(a<<16) + : "%r"(b),"r"(SHL32(a,16)) ); return rd_hi; } @@ -54,10 +54,10 @@ static OPUS_INLINE opus_val32 MULT16_32_Q15_armv4(opus_val16 a, opus_val32 b) "#MULT16_32_Q15\n\t" "smull %0, %1, %2, %3\n\t" : "=&r"(rd_lo), "=&r"(rd_hi) - : "%r"(b), "r"(a<<16) + : "%r"(b), "r"(SHL32(a,16)) ); /*We intentionally don't OR in the high bit of rd_lo for speed.*/ - return rd_hi<<1; + return SHL32(rd_hi,1); } #define MULT16_32_Q15(a, b) (MULT16_32_Q15_armv4(a, b)) diff --git a/libs/libopus/celt/arm/fixed_armv5e.h b/libs/libopus/celt/arm/fixed_armv5e.h index 36a632110..6bf73cbac 100644 --- a/libs/libopus/celt/arm/fixed_armv5e.h +++ b/libs/libopus/celt/arm/fixed_armv5e.h @@ -59,7 +59,7 @@ static OPUS_INLINE opus_val32 MULT16_32_Q15_armv5e(opus_val16 a, opus_val32 b) : "=r"(res) : "r"(b), "r"(a) ); - return res<<1; + return SHL32(res,1); } #define MULT16_32_Q15(a, b) (MULT16_32_Q15_armv5e(a, b)) @@ -76,7 +76,7 @@ static OPUS_INLINE opus_val32 MAC16_32_Q15_armv5e(opus_val32 c, opus_val16 a, "#MAC16_32_Q15\n\t" "smlawb %0, %1, %2, %3;\n" : "=r"(res) - : "r"(b<<1), "r"(a), "r"(c) + : "r"(SHL32(b,1)), "r"(a), "r"(c) ); return res; } diff --git a/libs/libopus/celt/arm/mdct_arm.h b/libs/libopus/celt/arm/mdct_arm.h index 49cbb4457..14200bac4 100644 --- a/libs/libopus/celt/arm/mdct_arm.h +++ b/libs/libopus/celt/arm/mdct_arm.h @@ -33,7 +33,6 @@ #if !defined(MDCT_ARM_H) #define MDCT_ARM_H -#include "config.h" #include "mdct.h" #if defined(HAVE_ARM_NE10) diff --git a/libs/libopus/celt/arm/pitch_arm.h b/libs/libopus/celt/arm/pitch_arm.h index 14331169e..bed8b04ea 100644 --- a/libs/libopus/celt/arm/pitch_arm.h +++ b/libs/libopus/celt/arm/pitch_arm.h @@ -30,11 +30,47 @@ # include "armcpu.h" +# if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) +opus_val32 celt_inner_prod_neon(const opus_val16 *x, const opus_val16 *y, int N); +void dual_inner_prod_neon(const opus_val16 *x, const opus_val16 *y01, + const opus_val16 *y02, int N, opus_val32 *xy1, opus_val32 *xy2); + +# if !defined(OPUS_HAVE_RTCD) && defined(OPUS_ARM_PRESUME_NEON) +# define OVERRIDE_CELT_INNER_PROD (1) +# define OVERRIDE_DUAL_INNER_PROD (1) +# define celt_inner_prod(x, y, N, arch) ((void)(arch), PRESUME_NEON(celt_inner_prod)(x, y, N)) +# define dual_inner_prod(x, y01, y02, N, xy1, xy2, arch) ((void)(arch), PRESUME_NEON(dual_inner_prod)(x, y01, y02, N, xy1, xy2)) +# endif +# endif + +# if !defined(OVERRIDE_CELT_INNER_PROD) +# if defined(OPUS_HAVE_RTCD) && (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR)) +extern opus_val32 (*const CELT_INNER_PROD_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *x, const opus_val16 *y, int N); +# define OVERRIDE_CELT_INNER_PROD (1) +# define celt_inner_prod(x, y, N, arch) ((*CELT_INNER_PROD_IMPL[(arch)&OPUS_ARCHMASK])(x, y, N)) +# elif defined(OPUS_ARM_PRESUME_NEON_INTR) +# define OVERRIDE_CELT_INNER_PROD (1) +# define celt_inner_prod(x, y, N, arch) ((void)(arch), celt_inner_prod_neon(x, y, N)) +# endif +# endif + +# if !defined(OVERRIDE_DUAL_INNER_PROD) +# if defined(OPUS_HAVE_RTCD) && (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR)) +extern void (*const DUAL_INNER_PROD_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *x, + const opus_val16 *y01, const opus_val16 *y02, int N, opus_val32 *xy1, opus_val32 *xy2); +# define OVERRIDE_DUAL_INNER_PROD (1) +# define dual_inner_prod(x, y01, y02, N, xy1, xy2, arch) ((*DUAL_INNER_PROD_IMPL[(arch)&OPUS_ARCHMASK])(x, y01, y02, N, xy1, xy2)) +# elif defined(OPUS_ARM_PRESUME_NEON_INTR) +# define OVERRIDE_DUAL_INNER_PROD (1) +# define dual_inner_prod(x, y01, y02, N, xy1, xy2, arch) ((void)(arch), dual_inner_prod_neon(x, y01, y02, N, xy1, xy2)) +# endif +# endif + # if defined(FIXED_POINT) # if defined(OPUS_ARM_MAY_HAVE_NEON) opus_val32 celt_pitch_xcorr_neon(const opus_val16 *_x, const opus_val16 *_y, - opus_val32 *xcorr, int len, int max_pitch); + opus_val32 *xcorr, int len, int max_pitch, int arch); # endif # if defined(OPUS_ARM_MAY_HAVE_MEDIA) @@ -43,7 +79,7 @@ opus_val32 celt_pitch_xcorr_neon(const opus_val16 *_x, const opus_val16 *_y, # if defined(OPUS_ARM_MAY_HAVE_EDSP) opus_val32 celt_pitch_xcorr_edsp(const opus_val16 *_x, const opus_val16 *_y, - opus_val32 *xcorr, int len, int max_pitch); + opus_val32 *xcorr, int len, int max_pitch, int arch); # endif # if defined(OPUS_HAVE_RTCD) && \ @@ -52,18 +88,17 @@ opus_val32 celt_pitch_xcorr_edsp(const opus_val16 *_x, const opus_val16 *_y, (defined(OPUS_ARM_MAY_HAVE_EDSP) && !defined(OPUS_ARM_PRESUME_EDSP))) extern opus_val32 (*const CELT_PITCH_XCORR_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *, - const opus_val16 *, opus_val32 *, int, int); + const opus_val16 *, opus_val32 *, int, int, int); # define OVERRIDE_PITCH_XCORR (1) # define celt_pitch_xcorr(_x, _y, xcorr, len, max_pitch, arch) \ ((*CELT_PITCH_XCORR_IMPL[(arch)&OPUS_ARCHMASK])(_x, _y, \ - xcorr, len, max_pitch)) + xcorr, len, max_pitch, arch)) # elif defined(OPUS_ARM_PRESUME_EDSP) || \ defined(OPUS_ARM_PRESUME_MEDIA) || \ defined(OPUS_ARM_PRESUME_NEON) # define OVERRIDE_PITCH_XCORR (1) -# define celt_pitch_xcorr(_x, _y, xcorr, len, max_pitch, arch) \ - ((void)(arch),PRESUME_NEON(celt_pitch_xcorr)(_x, _y, xcorr, len, max_pitch)) +# define celt_pitch_xcorr (PRESUME_NEON(celt_pitch_xcorr)) # endif @@ -99,25 +134,24 @@ extern void (*const XCORR_KERNEL_IMPL[OPUS_ARCHMASK + 1])( /* Float case */ #if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) void celt_pitch_xcorr_float_neon(const opus_val16 *_x, const opus_val16 *_y, - opus_val32 *xcorr, int len, int max_pitch); + opus_val32 *xcorr, int len, int max_pitch, int arch); #endif # if defined(OPUS_HAVE_RTCD) && \ (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR)) extern void (*const CELT_PITCH_XCORR_IMPL[OPUS_ARCHMASK+1])(const opus_val16 *, - const opus_val16 *, opus_val32 *, int, int); + const opus_val16 *, opus_val32 *, int, int, int); # define OVERRIDE_PITCH_XCORR (1) # define celt_pitch_xcorr(_x, _y, xcorr, len, max_pitch, arch) \ ((*CELT_PITCH_XCORR_IMPL[(arch)&OPUS_ARCHMASK])(_x, _y, \ - xcorr, len, max_pitch)) + xcorr, len, max_pitch, arch)) # elif defined(OPUS_ARM_PRESUME_NEON_INTR) # define OVERRIDE_PITCH_XCORR (1) -# define celt_pitch_xcorr(_x, _y, xcorr, len, max_pitch, arch) \ - ((void)(arch),celt_pitch_xcorr_float_neon(_x, _y, xcorr, len, max_pitch)) +# define celt_pitch_xcorr celt_pitch_xcorr_float_neon # endif diff --git a/libs/libopus/celt/arm/pitch_neon_intr.c b/libs/libopus/celt/arm/pitch_neon_intr.c new file mode 100644 index 000000000..1ac38c433 --- /dev/null +++ b/libs/libopus/celt/arm/pitch_neon_intr.c @@ -0,0 +1,290 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include "pitch.h" + +#ifdef FIXED_POINT + +opus_val32 celt_inner_prod_neon(const opus_val16 *x, const opus_val16 *y, int N) +{ + int i; + opus_val32 xy; + int16x8_t x_s16x8, y_s16x8; + int32x4_t xy_s32x4 = vdupq_n_s32(0); + int64x2_t xy_s64x2; + int64x1_t xy_s64x1; + + for (i = 0; i < N - 7; i += 8) { + x_s16x8 = vld1q_s16(&x[i]); + y_s16x8 = vld1q_s16(&y[i]); + xy_s32x4 = vmlal_s16(xy_s32x4, vget_low_s16 (x_s16x8), vget_low_s16 (y_s16x8)); + xy_s32x4 = vmlal_s16(xy_s32x4, vget_high_s16(x_s16x8), vget_high_s16(y_s16x8)); + } + + if (N - i >= 4) { + const int16x4_t x_s16x4 = vld1_s16(&x[i]); + const int16x4_t y_s16x4 = vld1_s16(&y[i]); + xy_s32x4 = vmlal_s16(xy_s32x4, x_s16x4, y_s16x4); + i += 4; + } + + xy_s64x2 = vpaddlq_s32(xy_s32x4); + xy_s64x1 = vadd_s64(vget_low_s64(xy_s64x2), vget_high_s64(xy_s64x2)); + xy = vget_lane_s32(vreinterpret_s32_s64(xy_s64x1), 0); + + for (; i < N; i++) { + xy = MAC16_16(xy, x[i], y[i]); + } + +#ifdef OPUS_CHECK_ASM + celt_assert(celt_inner_prod_c(x, y, N) == xy); +#endif + + return xy; +} + +void dual_inner_prod_neon(const opus_val16 *x, const opus_val16 *y01, const opus_val16 *y02, + int N, opus_val32 *xy1, opus_val32 *xy2) +{ + int i; + opus_val32 xy01, xy02; + int16x8_t x_s16x8, y01_s16x8, y02_s16x8; + int32x4_t xy01_s32x4 = vdupq_n_s32(0); + int32x4_t xy02_s32x4 = vdupq_n_s32(0); + int64x2_t xy01_s64x2, xy02_s64x2; + int64x1_t xy01_s64x1, xy02_s64x1; + + for (i = 0; i < N - 7; i += 8) { + x_s16x8 = vld1q_s16(&x[i]); + y01_s16x8 = vld1q_s16(&y01[i]); + y02_s16x8 = vld1q_s16(&y02[i]); + xy01_s32x4 = vmlal_s16(xy01_s32x4, vget_low_s16 (x_s16x8), vget_low_s16 (y01_s16x8)); + xy02_s32x4 = vmlal_s16(xy02_s32x4, vget_low_s16 (x_s16x8), vget_low_s16 (y02_s16x8)); + xy01_s32x4 = vmlal_s16(xy01_s32x4, vget_high_s16(x_s16x8), vget_high_s16(y01_s16x8)); + xy02_s32x4 = vmlal_s16(xy02_s32x4, vget_high_s16(x_s16x8), vget_high_s16(y02_s16x8)); + } + + if (N - i >= 4) { + const int16x4_t x_s16x4 = vld1_s16(&x[i]); + const int16x4_t y01_s16x4 = vld1_s16(&y01[i]); + const int16x4_t y02_s16x4 = vld1_s16(&y02[i]); + xy01_s32x4 = vmlal_s16(xy01_s32x4, x_s16x4, y01_s16x4); + xy02_s32x4 = vmlal_s16(xy02_s32x4, x_s16x4, y02_s16x4); + i += 4; + } + + xy01_s64x2 = vpaddlq_s32(xy01_s32x4); + xy02_s64x2 = vpaddlq_s32(xy02_s32x4); + xy01_s64x1 = vadd_s64(vget_low_s64(xy01_s64x2), vget_high_s64(xy01_s64x2)); + xy02_s64x1 = vadd_s64(vget_low_s64(xy02_s64x2), vget_high_s64(xy02_s64x2)); + xy01 = vget_lane_s32(vreinterpret_s32_s64(xy01_s64x1), 0); + xy02 = vget_lane_s32(vreinterpret_s32_s64(xy02_s64x1), 0); + + for (; i < N; i++) { + xy01 = MAC16_16(xy01, x[i], y01[i]); + xy02 = MAC16_16(xy02, x[i], y02[i]); + } + *xy1 = xy01; + *xy2 = xy02; + +#ifdef OPUS_CHECK_ASM + { + opus_val32 xy1_c, xy2_c; + dual_inner_prod_c(x, y01, y02, N, &xy1_c, &xy2_c); + celt_assert(xy1_c == *xy1); + celt_assert(xy2_c == *xy2); + } +#endif +} + +#else /* !FIXED_POINT */ + +/* ========================================================================== */ + +#ifdef OPUS_CHECK_ASM + +/* This part of code simulates floating-point NEON operations. */ + +/* celt_inner_prod_neon_float_c_simulation() simulates the floating-point */ +/* operations of celt_inner_prod_neon(), and both functions should have bit */ +/* exact output. */ +static opus_val32 celt_inner_prod_neon_float_c_simulation(const opus_val16 *x, const opus_val16 *y, int N) +{ + int i; + opus_val32 xy, xy0 = 0, xy1 = 0, xy2 = 0, xy3 = 0; + for (i = 0; i < N - 3; i += 4) { + xy0 = MAC16_16(xy0, x[i + 0], y[i + 0]); + xy1 = MAC16_16(xy1, x[i + 1], y[i + 1]); + xy2 = MAC16_16(xy2, x[i + 2], y[i + 2]); + xy3 = MAC16_16(xy3, x[i + 3], y[i + 3]); + } + xy0 += xy2; + xy1 += xy3; + xy = xy0 + xy1; + for (; i < N; i++) { + xy = MAC16_16(xy, x[i], y[i]); + } + return xy; +} + +/* dual_inner_prod_neon_float_c_simulation() simulates the floating-point */ +/* operations of dual_inner_prod_neon(), and both functions should have bit */ +/* exact output. */ +static void dual_inner_prod_neon_float_c_simulation(const opus_val16 *x, const opus_val16 *y01, const opus_val16 *y02, + int N, opus_val32 *xy1, opus_val32 *xy2) +{ + int i; + opus_val32 xy01, xy02, xy01_0 = 0, xy01_1 = 0, xy01_2 = 0, xy01_3 = 0, xy02_0 = 0, xy02_1 = 0, xy02_2 = 0, xy02_3 = 0; + for (i = 0; i < N - 3; i += 4) { + xy01_0 = MAC16_16(xy01_0, x[i + 0], y01[i + 0]); + xy01_1 = MAC16_16(xy01_1, x[i + 1], y01[i + 1]); + xy01_2 = MAC16_16(xy01_2, x[i + 2], y01[i + 2]); + xy01_3 = MAC16_16(xy01_3, x[i + 3], y01[i + 3]); + xy02_0 = MAC16_16(xy02_0, x[i + 0], y02[i + 0]); + xy02_1 = MAC16_16(xy02_1, x[i + 1], y02[i + 1]); + xy02_2 = MAC16_16(xy02_2, x[i + 2], y02[i + 2]); + xy02_3 = MAC16_16(xy02_3, x[i + 3], y02[i + 3]); + } + xy01_0 += xy01_2; + xy02_0 += xy02_2; + xy01_1 += xy01_3; + xy02_1 += xy02_3; + xy01 = xy01_0 + xy01_1; + xy02 = xy02_0 + xy02_1; + for (; i < N; i++) { + xy01 = MAC16_16(xy01, x[i], y01[i]); + xy02 = MAC16_16(xy02, x[i], y02[i]); + } + *xy1 = xy01; + *xy2 = xy02; +} + +#endif /* OPUS_CHECK_ASM */ + +/* ========================================================================== */ + +opus_val32 celt_inner_prod_neon(const opus_val16 *x, const opus_val16 *y, int N) +{ + int i; + opus_val32 xy; + float32x4_t xy_f32x4 = vdupq_n_f32(0); + float32x2_t xy_f32x2; + + for (i = 0; i < N - 7; i += 8) { + float32x4_t x_f32x4, y_f32x4; + x_f32x4 = vld1q_f32(&x[i]); + y_f32x4 = vld1q_f32(&y[i]); + xy_f32x4 = vmlaq_f32(xy_f32x4, x_f32x4, y_f32x4); + x_f32x4 = vld1q_f32(&x[i + 4]); + y_f32x4 = vld1q_f32(&y[i + 4]); + xy_f32x4 = vmlaq_f32(xy_f32x4, x_f32x4, y_f32x4); + } + + if (N - i >= 4) { + const float32x4_t x_f32x4 = vld1q_f32(&x[i]); + const float32x4_t y_f32x4 = vld1q_f32(&y[i]); + xy_f32x4 = vmlaq_f32(xy_f32x4, x_f32x4, y_f32x4); + i += 4; + } + + xy_f32x2 = vadd_f32(vget_low_f32(xy_f32x4), vget_high_f32(xy_f32x4)); + xy_f32x2 = vpadd_f32(xy_f32x2, xy_f32x2); + xy = vget_lane_f32(xy_f32x2, 0); + + for (; i < N; i++) { + xy = MAC16_16(xy, x[i], y[i]); + } + +#ifdef OPUS_CHECK_ASM + celt_assert(ABS32(celt_inner_prod_neon_float_c_simulation(x, y, N) - xy) <= VERY_SMALL); +#endif + + return xy; +} + +void dual_inner_prod_neon(const opus_val16 *x, const opus_val16 *y01, const opus_val16 *y02, + int N, opus_val32 *xy1, opus_val32 *xy2) +{ + int i; + opus_val32 xy01, xy02; + float32x4_t xy01_f32x4 = vdupq_n_f32(0); + float32x4_t xy02_f32x4 = vdupq_n_f32(0); + float32x2_t xy01_f32x2, xy02_f32x2; + + for (i = 0; i < N - 7; i += 8) { + float32x4_t x_f32x4, y01_f32x4, y02_f32x4; + x_f32x4 = vld1q_f32(&x[i]); + y01_f32x4 = vld1q_f32(&y01[i]); + y02_f32x4 = vld1q_f32(&y02[i]); + xy01_f32x4 = vmlaq_f32(xy01_f32x4, x_f32x4, y01_f32x4); + xy02_f32x4 = vmlaq_f32(xy02_f32x4, x_f32x4, y02_f32x4); + x_f32x4 = vld1q_f32(&x[i + 4]); + y01_f32x4 = vld1q_f32(&y01[i + 4]); + y02_f32x4 = vld1q_f32(&y02[i + 4]); + xy01_f32x4 = vmlaq_f32(xy01_f32x4, x_f32x4, y01_f32x4); + xy02_f32x4 = vmlaq_f32(xy02_f32x4, x_f32x4, y02_f32x4); + } + + if (N - i >= 4) { + const float32x4_t x_f32x4 = vld1q_f32(&x[i]); + const float32x4_t y01_f32x4 = vld1q_f32(&y01[i]); + const float32x4_t y02_f32x4 = vld1q_f32(&y02[i]); + xy01_f32x4 = vmlaq_f32(xy01_f32x4, x_f32x4, y01_f32x4); + xy02_f32x4 = vmlaq_f32(xy02_f32x4, x_f32x4, y02_f32x4); + i += 4; + } + + xy01_f32x2 = vadd_f32(vget_low_f32(xy01_f32x4), vget_high_f32(xy01_f32x4)); + xy02_f32x2 = vadd_f32(vget_low_f32(xy02_f32x4), vget_high_f32(xy02_f32x4)); + xy01_f32x2 = vpadd_f32(xy01_f32x2, xy01_f32x2); + xy02_f32x2 = vpadd_f32(xy02_f32x2, xy02_f32x2); + xy01 = vget_lane_f32(xy01_f32x2, 0); + xy02 = vget_lane_f32(xy02_f32x2, 0); + + for (; i < N; i++) { + xy01 = MAC16_16(xy01, x[i], y01[i]); + xy02 = MAC16_16(xy02, x[i], y02[i]); + } + *xy1 = xy01; + *xy2 = xy02; + +#ifdef OPUS_CHECK_ASM + { + opus_val32 xy1_c, xy2_c; + dual_inner_prod_neon_float_c_simulation(x, y01, y02, N, &xy1_c, &xy2_c); + celt_assert(ABS32(xy1_c - *xy1) <= VERY_SMALL); + celt_assert(ABS32(xy2_c - *xy2) <= VERY_SMALL); + } +#endif +} + +#endif /* FIXED_POINT */ diff --git a/libs/libopus/celt/bands.c b/libs/libopus/celt/bands.c index 87eaa6c03..bd54036af 100644 --- a/libs/libopus/celt/bands.c +++ b/libs/libopus/celt/bands.c @@ -65,19 +65,19 @@ opus_uint32 celt_lcg_rand(opus_uint32 seed) /* This is a cos() approximation designed to be bit-exact on any platform. Bit exactness with this approximation is important because it has an impact on the bit allocation */ -static opus_int16 bitexact_cos(opus_int16 x) +opus_int16 bitexact_cos(opus_int16 x) { opus_int32 tmp; opus_int16 x2; tmp = (4096+((opus_int32)(x)*(x)))>>13; - celt_assert(tmp<=32767); + celt_sig_assert(tmp<=32767); x2 = tmp; x2 = (32767-x2) + FRAC_MUL16(x2, (-7651 + FRAC_MUL16(x2, (8277 + FRAC_MUL16(-626, x2))))); - celt_assert(x2<=32766); + celt_sig_assert(x2<=32766); return 1+x2; } -static int bitexact_log2tan(int isin,int icos) +int bitexact_log2tan(int isin,int icos) { int lc; int ls; @@ -92,10 +92,11 @@ static int bitexact_log2tan(int isin,int icos) #ifdef FIXED_POINT /* Compute the amplitude (sqrt energy) in each of the bands */ -void compute_band_energies(const CELTMode *m, const celt_sig *X, celt_ener *bandE, int end, int C, int LM) +void compute_band_energies(const CELTMode *m, const celt_sig *X, celt_ener *bandE, int end, int C, int LM, int arch) { int i, c, N; const opus_int16 *eBands = m->eBands; + (void)arch; N = m->shortMdctSize<eBands; @@ -164,7 +165,7 @@ void compute_band_energies(const CELTMode *m, const celt_sig *X, celt_ener *band for (i=0;inbEBands] = celt_sqrt(sum); /*printf ("%f ", bandE[i+c*m->nbEBands]);*/ } @@ -224,9 +225,9 @@ void denormalise_bands(const CELTMode *m, const celt_norm * OPUS_RESTRICT X, #endif j=M*eBands[i]; band_end = M*eBands[i+1]; - lg = ADD16(bandLogE[i], SHL16((opus_val16)eMeans[i],6)); + lg = SATURATE16(ADD32(bandLogE[i], SHL32((opus_val32)eMeans[i],6))); #ifndef FIXED_POINT - g = celt_exp2(lg); + g = celt_exp2(MIN32(32.f, lg)); #else /* Handle the integer part of the log energy */ shift = 16-(lg>>DB_SHIFT); @@ -241,12 +242,12 @@ void denormalise_bands(const CELTMode *m, const celt_norm * OPUS_RESTRICT X, /* Handle extreme gains with negative shift. */ if (shift<0) { - /* For shift < -2 we'd be likely to overflow, so we're capping - the gain here. This shouldn't happen unless the bitstream is - already corrupted. */ - if (shift < -2) + /* For shift <= -2 and g > 16384 we'd be likely to overflow, so we're + capping the gain here, which is equivalent to a cap of 18 on lg. + This shouldn't trigger unless the bitstream is already corrupted. */ + if (shift <= -2) { - g = 32767; + g = 16384; shift = -2; } do { @@ -281,7 +282,7 @@ void anti_collapse(const CELTMode *m, celt_norm *X_, unsigned char *collapse_mas N0 = m->eBands[i+1]-m->eBands[i]; /* depth in 1/8 bits */ - celt_assert(pulses[i]>=0); + celt_sig_assert(pulses[i]>=0); depth = celt_udiv(1+pulses[i], (m->eBands[i+1]-m->eBands[i]))>>LM; #ifdef FIXED_POINT @@ -360,6 +361,30 @@ void anti_collapse(const CELTMode *m, celt_norm *X_, unsigned char *collapse_mas } } +/* Compute the weights to use for optimizing normalized distortion across + channels. We use the amplitude to weight square distortion, which means + that we use the square root of the value we would have been using if we + wanted to minimize the MSE in the non-normalized domain. This roughly + corresponds to some quick-and-dirty perceptual experiments I ran to + measure inter-aural masking (there doesn't seem to be any published data + on the topic). */ +static void compute_channel_weights(celt_ener Ex, celt_ener Ey, opus_val16 w[2]) +{ + celt_ener minE; +#ifdef FIXED_POINT + int shift; +#endif + minE = MIN32(Ex, Ey); + /* Adjustment to make the weights a bit more conservative. */ + Ex = ADD32(Ex, minE/3); + Ey = ADD32(Ey, minE/3); +#ifdef FIXED_POINT + shift = celt_ilog2(EPSILON+MAX32(Ex, Ey))-14; +#endif + w[0] = VSHR32(Ex, shift); + w[1] = VSHR32(Ey, shift); +} + static void intensity_stereo(const CELTMode *m, celt_norm * OPUS_RESTRICT X, const celt_norm * OPUS_RESTRICT Y, const celt_ener *bandE, int bandID, int N) { int i = bandID; @@ -453,7 +478,7 @@ static void stereo_merge(celt_norm * OPUS_RESTRICT X, celt_norm * OPUS_RESTRICT /* Decide whether we should spread the pulses in the current frame */ int spreading_decision(const CELTMode *m, const celt_norm *X, int *average, int last_decision, int *hf_average, int *tapset_decision, int update_hf, - int end, int C, int M) + int end, int C, int M, const int *spread_weight) { int i, c, N0; int sum = 0, nbBands=0; @@ -494,8 +519,8 @@ int spreading_decision(const CELTMode *m, const celt_norm *X, int *average, if (i>m->nbEBands-4) hf_sum += celt_udiv(32*(tcount[1]+tcount[0]), N); tmp = (2*tcount[2] >= N) + (2*tcount[1] >= N) + (2*tcount[0] >= N); - sum += tmp*256; - nbBands++; + sum += tmp*spread_weight[i]; + nbBands+=spread_weight[i]; } } while (++c0); /* end has to be non-zero */ celt_assert(sum>=0); - sum = celt_udiv(sum, nbBands); + sum = celt_udiv((opus_int32)sum<<8, nbBands); /* Recursive averaging */ sum = (sum+*average)>>1; *average = sum; @@ -647,6 +672,7 @@ static int compute_qn(int N, int b, int offset, int pulse_cap, int stereo) struct band_ctx { int encode; + int resynth; const CELTMode *m; int i; int intensity; @@ -657,6 +683,9 @@ struct band_ctx { const celt_ener *bandE; opus_uint32 seed; int arch; + int theta_round; + int disable_inv; + int avoid_split_noise; }; struct split_ctx { @@ -714,8 +743,35 @@ static void compute_theta(struct band_ctx *ctx, struct split_ctx *sctx, if (qn!=1) { if (encode) - itheta = (itheta*(opus_int32)qn+8192)>>14; - + { + if (!stereo || ctx->theta_round == 0) + { + itheta = (itheta*(opus_int32)qn+8192)>>14; + if (!stereo && ctx->avoid_split_noise && itheta > 0 && itheta < qn) + { + /* Check if the selected value of theta will cause the bit allocation + to inject noise on one side. If so, make sure the energy of that side + is zero. */ + int unquantized = celt_udiv((opus_int32)itheta*16384, qn); + imid = bitexact_cos((opus_int16)unquantized); + iside = bitexact_cos((opus_int16)(16384-unquantized)); + delta = FRAC_MUL16((N-1)<<7,bitexact_log2tan(iside,imid)); + if (delta > *b) + itheta = qn; + else if (delta < -*b) + itheta = 0; + } + } else { + int down; + /* Bias quantization towards itheta=0 and itheta=16384. */ + int bias = itheta > 8192 ? 32767/qn : -32767/qn; + down = IMIN(qn-1, IMAX(0, (itheta*(opus_int32)qn + bias)>>14)); + if (ctx->theta_round < 0) + itheta = down; + else + itheta = down+1; + } + } /* Entropy coding of the angle. We use a uniform pdf for the time split, a step for stereo, and a triangular one for the rest. */ if (stereo && N>2) @@ -793,7 +849,7 @@ static void compute_theta(struct band_ctx *ctx, struct split_ctx *sctx, } else if (stereo) { if (encode) { - inv = itheta > 8192; + inv = itheta > 8192 && !ctx->disable_inv; if (inv) { int j; @@ -810,6 +866,9 @@ static void compute_theta(struct band_ctx *ctx, struct split_ctx *sctx, inv = ec_dec_bit_logp(ec, 2); } else inv = 0; + /* inv flag override to avoid problems with downmixing. */ + if (ctx->disable_inv) + inv = 0; itheta = 0; } qalloc = ec_tell_frac(ec) - tell; @@ -842,14 +901,9 @@ static void compute_theta(struct band_ctx *ctx, struct split_ctx *sctx, sctx->itheta = itheta; sctx->qalloc = qalloc; } -static unsigned quant_band_n1(struct band_ctx *ctx, celt_norm *X, celt_norm *Y, int b, +static unsigned quant_band_n1(struct band_ctx *ctx, celt_norm *X, celt_norm *Y, celt_norm *lowband_out) { -#ifdef RESYNTH - int resynth = 1; -#else - int resynth = !ctx->encode; -#endif int c; int stereo; celt_norm *x = X; @@ -872,9 +926,8 @@ static unsigned quant_band_n1(struct band_ctx *ctx, celt_norm *X, celt_norm *Y, sign = ec_dec_bits(ec, 1); } ctx->remaining_bits -= 1<resynth) x[0] = sign ? -NORM_SCALING : NORM_SCALING; x = Y; } while (++c<1+stereo); @@ -899,11 +952,6 @@ static unsigned quant_partition(struct band_ctx *ctx, celt_norm *X, int B0=B; opus_val16 mid=0, side=0; unsigned cm=0; -#ifdef RESYNTH - int resynth = 1; -#else - int resynth = !ctx->encode; -#endif celt_norm *Y=NULL; int encode; const CELTMode *m; @@ -935,8 +983,7 @@ static unsigned quant_partition(struct band_ctx *ctx, celt_norm *X, fill = (fill&1)|(fill<<1); B = (B+1)>>1; - compute_theta(ctx, &sctx, X, Y, N, &b, B, B0, - LM, 0, &fill); + compute_theta(ctx, &sctx, X, Y, N, &b, B, B0, LM, 0, &fill); imid = sctx.imid; iside = sctx.iside; delta = sctx.delta; @@ -970,24 +1017,20 @@ static unsigned quant_partition(struct band_ctx *ctx, celt_norm *X, rebalance = ctx->remaining_bits; if (mbits >= sbits) { - cm = quant_partition(ctx, X, N, mbits, B, - lowband, LM, + cm = quant_partition(ctx, X, N, mbits, B, lowband, LM, MULT16_16_P15(gain,mid), fill); rebalance = mbits - (rebalance-ctx->remaining_bits); if (rebalance > 3<>B)<<(B0>>1); } else { - cm = quant_partition(ctx, Y, N, sbits, B, - next_lowband2, LM, + cm = quant_partition(ctx, Y, N, sbits, B, next_lowband2, LM, MULT16_16_P15(gain,side), fill>>B)<<(B0>>1); rebalance = sbits - (rebalance-ctx->remaining_bits); if (rebalance > 3<resynth, ctx->arch); } else { cm = alg_unquant(X, N, K, spread, B, ec, gain); } } else { /* If there's no pulse, fill the band anyway */ int j; - if (resynth) + if (ctx->resynth) { unsigned cm_mask; /* B can be as large as 16, so this shift might overflow an int on a @@ -1080,11 +1119,6 @@ static unsigned quant_band(struct band_ctx *ctx, celt_norm *X, int recombine=0; int longBlocks; unsigned cm=0; -#ifdef RESYNTH - int resynth = 1; -#else - int resynth = !ctx->encode; -#endif int k; int encode; int tf_change; @@ -1099,7 +1133,7 @@ static unsigned quant_band(struct band_ctx *ctx, celt_norm *X, /* Special case for one sample */ if (N==1) { - return quant_band_n1(ctx, X, NULL, b, lowband_out); + return quant_band_n1(ctx, X, NULL, lowband_out); } if (tf_change>0) @@ -1151,11 +1185,10 @@ static unsigned quant_band(struct band_ctx *ctx, celt_norm *X, deinterleave_hadamard(lowband, N_B>>recombine, B0<resynth) { /* Undo the sample reorganization going from time order to frequency order */ if (B0>1) @@ -1208,11 +1241,6 @@ static unsigned quant_band_stereo(struct band_ctx *ctx, celt_norm *X, celt_norm int inv = 0; opus_val16 mid=0, side=0; unsigned cm=0; -#ifdef RESYNTH - int resynth = 1; -#else - int resynth = !ctx->encode; -#endif int mbits, sbits, delta; int itheta; int qalloc; @@ -1227,13 +1255,12 @@ static unsigned quant_band_stereo(struct band_ctx *ctx, celt_norm *X, celt_norm /* Special case for one sample */ if (N==1) { - return quant_band_n1(ctx, X, Y, b, lowband_out); + return quant_band_n1(ctx, X, Y, lowband_out); } orig_fill = fill; - compute_theta(ctx, &sctx, X, Y, N, &b, B, B, - LM, 1, &fill); + compute_theta(ctx, &sctx, X, Y, N, &b, B, B, LM, 1, &fill); inv = sctx.inv; imid = sctx.imid; iside = sctx.iside; @@ -1281,13 +1308,13 @@ static unsigned quant_band_stereo(struct band_ctx *ctx, celt_norm *X, celt_norm sign = 1-2*sign; /* We use orig_fill here because we want to fold the side, but if itheta==16384, we'll have cleared the low bits of fill. */ - cm = quant_band(ctx, x2, N, mbits, B, lowband, - LM, lowband_out, Q15ONE, lowband_scratch, orig_fill); + cm = quant_band(ctx, x2, N, mbits, B, lowband, LM, lowband_out, Q15ONE, + lowband_scratch, orig_fill); /* We don't split N=2 bands, so cm is either 1 or 0 (for a fold-collapse), and there's no need to worry about mixing with the other channel. */ y2[0] = -sign*x2[1]; y2[1] = sign*x2[0]; - if (resynth) + if (ctx->resynth) { celt_norm tmp; X[0] = MULT16_16_Q15(mid, X[0]); @@ -1314,38 +1341,32 @@ static unsigned quant_band_stereo(struct band_ctx *ctx, celt_norm *X, celt_norm { /* In stereo mode, we do not apply a scaling to the mid because we need the normalized mid for folding later. */ - cm = quant_band(ctx, X, N, mbits, B, - lowband, LM, lowband_out, - Q15ONE, lowband_scratch, fill); + cm = quant_band(ctx, X, N, mbits, B, lowband, LM, lowband_out, Q15ONE, + lowband_scratch, fill); rebalance = mbits - (rebalance-ctx->remaining_bits); if (rebalance > 3<>B); + cm |= quant_band(ctx, Y, N, sbits, B, NULL, LM, NULL, side, NULL, fill>>B); } else { /* For a stereo split, the high bits of fill are always zero, so no folding will be done to the side. */ - cm = quant_band(ctx, Y, N, sbits, B, - NULL, LM, NULL, - side, NULL, fill>>B); + cm = quant_band(ctx, Y, N, sbits, B, NULL, LM, NULL, side, NULL, fill>>B); rebalance = sbits - (rebalance-ctx->remaining_bits); if (rebalance > 3<resynth) { if (N!=2) stereo_merge(X, Y, mid, N, ctx->arch); @@ -1359,19 +1380,38 @@ static unsigned quant_band_stereo(struct band_ctx *ctx, celt_norm *X, celt_norm return cm; } +static void special_hybrid_folding(const CELTMode *m, celt_norm *norm, celt_norm *norm2, int start, int M, int dual_stereo) +{ + int n1, n2; + const opus_int16 * OPUS_RESTRICT eBands = m->eBands; + n1 = M*(eBands[start+1]-eBands[start]); + n2 = M*(eBands[start+2]-eBands[start+1]); + /* Duplicate enough of the first band folding data to be able to fold the second band. + Copies no data for CELT-only mode. */ + OPUS_COPY(&norm[n1], &norm[2*n1 - n2], n2-n1); + if (dual_stereo) + OPUS_COPY(&norm2[n1], &norm2[2*n1 - n2], n2-n1); +} void quant_all_bands(int encode, const CELTMode *m, int start, int end, celt_norm *X_, celt_norm *Y_, unsigned char *collapse_masks, const celt_ener *bandE, int *pulses, int shortBlocks, int spread, int dual_stereo, int intensity, int *tf_res, opus_int32 total_bits, opus_int32 balance, ec_ctx *ec, int LM, int codedBands, - opus_uint32 *seed, int arch) + opus_uint32 *seed, int complexity, int arch, int disable_inv) { int i; opus_int32 remaining_bits; const opus_int16 * OPUS_RESTRICT eBands = m->eBands; celt_norm * OPUS_RESTRICT norm, * OPUS_RESTRICT norm2; VARDECL(celt_norm, _norm); + VARDECL(celt_norm, _lowband_scratch); + VARDECL(celt_norm, X_save); + VARDECL(celt_norm, Y_save); + VARDECL(celt_norm, X_save2); + VARDECL(celt_norm, Y_save2); + VARDECL(celt_norm, norm_save2); + int resynth_alloc; celt_norm *lowband_scratch; int B; int M; @@ -1379,10 +1419,11 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, int update_lowband = 1; int C = Y_ != NULL ? 2 : 1; int norm_offset; + int theta_rdo = encode && Y_!=NULL && !dual_stereo && complexity>=8; #ifdef RESYNTH int resynth = 1; #else - int resynth = !encode; + int resynth = !encode || theta_rdo; #endif struct band_ctx ctx; SAVE_STACK; @@ -1395,9 +1436,24 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, ALLOC(_norm, C*(M*eBands[m->nbEBands-1]-norm_offset), celt_norm); norm = _norm; norm2 = norm + M*eBands[m->nbEBands-1]-norm_offset; - /* We can use the last band as scratch space because we don't need that - scratch space for the last band. */ - lowband_scratch = X_+M*eBands[m->nbEBands-1]; + + /* For decoding, we can use the last band as scratch space because we don't need that + scratch space for the last band and we don't care about the data there until we're + decoding the last band. */ + if (encode && resynth) + resynth_alloc = M*(eBands[m->nbEBands]-eBands[m->nbEBands-1]); + else + resynth_alloc = ALLOC_NONE; + ALLOC(_lowband_scratch, resynth_alloc, celt_norm); + if (encode && resynth) + lowband_scratch = _lowband_scratch; + else + lowband_scratch = X_+M*eBands[m->nbEBands-1]; + ALLOC(X_save, resynth_alloc, celt_norm); + ALLOC(Y_save, resynth_alloc, celt_norm); + ALLOC(X_save2, resynth_alloc, celt_norm); + ALLOC(Y_save2, resynth_alloc, celt_norm); + ALLOC(norm_save2, resynth_alloc, celt_norm); lowband_offset = 0; ctx.bandE = bandE; @@ -1408,6 +1464,11 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, ctx.seed = *seed; ctx.spread = spread; ctx.arch = arch; + ctx.disable_inv = disable_inv; + ctx.resynth = resynth; + ctx.theta_round = 0; + /* Avoid injecting noise in the first band on transients. */ + ctx.avoid_split_noise = B > 1; for (i=start;i 0); tell = ec_tell_frac(ec); /* Compute how many bits we want to allocate to this band */ @@ -1445,8 +1507,15 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, b = 0; } +#ifndef DISABLE_UPDATE_DRAFT + if (resynth && (M*eBands[i]-N >= M*eBands[start] || i==start+1) && (update_lowband || lowband_offset==0)) + lowband_offset = i; + if (i == start+1) + special_hybrid_folding(m, norm, norm2, start, M, dual_stereo); +#else if (resynth && M*eBands[i]-N >= M*eBands[start] && (update_lowband || lowband_offset==0)) lowband_offset = i; +#endif tf_change = tf_res[i]; ctx.tf_change = tf_change; @@ -1457,7 +1526,7 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, Y = norm; lowband_scratch = NULL; } - if (i==end-1) + if (last && !theta_rdo) lowband_scratch = NULL; /* Get a conservative estimate of the collapse_mask's for the bands we're @@ -1472,7 +1541,11 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, fold_start = lowband_offset; while(M*eBands[--fold_start] > effective_lowband+norm_offset); fold_end = lowband_offset-1; +#ifndef DISABLE_UPDATE_DRAFT + while(++fold_end < i && M*eBands[fold_end] < effective_lowband+norm_offset+N); +#else while(M*eBands[++fold_end] < effective_lowband+norm_offset+N); +#endif x_cm = y_cm = 0; fold_i = fold_start; do { x_cm |= collapse_masks[fold_i*C+0]; @@ -1505,13 +1578,79 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, } else { if (Y!=NULL) { - x_cm = quant_band_stereo(&ctx, X, Y, N, b, B, - effective_lowband != -1 ? norm+effective_lowband : NULL, LM, - last?NULL:norm+M*eBands[i]-norm_offset, lowband_scratch, x_cm|y_cm); + if (theta_rdo && i < intensity) + { + ec_ctx ec_save, ec_save2; + struct band_ctx ctx_save, ctx_save2; + opus_val32 dist0, dist1; + unsigned cm, cm2; + int nstart_bytes, nend_bytes, save_bytes; + unsigned char *bytes_buf; + unsigned char bytes_save[1275]; + opus_val16 w[2]; + compute_channel_weights(bandE[i], bandE[i+m->nbEBands], w); + /* Make a copy. */ + cm = x_cm|y_cm; + ec_save = *ec; + ctx_save = ctx; + OPUS_COPY(X_save, X, N); + OPUS_COPY(Y_save, Y, N); + /* Encode and round down. */ + ctx.theta_round = -1; + x_cm = quant_band_stereo(&ctx, X, Y, N, b, B, + effective_lowband != -1 ? norm+effective_lowband : NULL, LM, + last?NULL:norm+M*eBands[i]-norm_offset, lowband_scratch, cm); + dist0 = MULT16_32_Q15(w[0], celt_inner_prod(X_save, X, N, arch)) + MULT16_32_Q15(w[1], celt_inner_prod(Y_save, Y, N, arch)); + + /* Save first result. */ + cm2 = x_cm; + ec_save2 = *ec; + ctx_save2 = ctx; + OPUS_COPY(X_save2, X, N); + OPUS_COPY(Y_save2, Y, N); + if (!last) + OPUS_COPY(norm_save2, norm+M*eBands[i]-norm_offset, N); + nstart_bytes = ec_save.offs; + nend_bytes = ec_save.storage; + bytes_buf = ec_save.buf+nstart_bytes; + save_bytes = nend_bytes-nstart_bytes; + OPUS_COPY(bytes_save, bytes_buf, save_bytes); + + /* Restore */ + *ec = ec_save; + ctx = ctx_save; + OPUS_COPY(X, X_save, N); + OPUS_COPY(Y, Y_save, N); +#ifndef DISABLE_UPDATE_DRAFT + if (i == start+1) + special_hybrid_folding(m, norm, norm2, start, M, dual_stereo); +#endif + /* Encode and round up. */ + ctx.theta_round = 1; + x_cm = quant_band_stereo(&ctx, X, Y, N, b, B, + effective_lowband != -1 ? norm+effective_lowband : NULL, LM, + last?NULL:norm+M*eBands[i]-norm_offset, lowband_scratch, cm); + dist1 = MULT16_32_Q15(w[0], celt_inner_prod(X_save, X, N, arch)) + MULT16_32_Q15(w[1], celt_inner_prod(Y_save, Y, N, arch)); + if (dist0 >= dist1) { + x_cm = cm2; + *ec = ec_save2; + ctx = ctx_save2; + OPUS_COPY(X, X_save2, N); + OPUS_COPY(Y, Y_save2, N); + if (!last) + OPUS_COPY(norm+M*eBands[i]-norm_offset, norm_save2, N); + OPUS_COPY(bytes_buf, bytes_save, save_bytes); + } + } else { + ctx.theta_round = 0; + x_cm = quant_band_stereo(&ctx, X, Y, N, b, B, + effective_lowband != -1 ? norm+effective_lowband : NULL, LM, + last?NULL:norm+M*eBands[i]-norm_offset, lowband_scratch, x_cm|y_cm); + } } else { x_cm = quant_band(&ctx, X, N, b, B, effective_lowband != -1 ? norm+effective_lowband : NULL, LM, - last?NULL:norm+M*eBands[i]-norm_offset, Q15ONE, lowband_scratch, x_cm|y_cm); + last?NULL:norm+M*eBands[i]-norm_offset, Q15ONE, lowband_scratch, x_cm|y_cm); } y_cm = x_cm; } @@ -1521,6 +1660,9 @@ void quant_all_bands(int encode, const CELTMode *m, int start, int end, /* Update the folding position only as long as we have 1 bit/sample depth. */ update_lowband = b>(N<nbEBands */ }; +#if defined(ENABLE_HARDENING) || defined(ENABLE_ASSERTIONS) +/* Make basic checks on the CELT state to ensure we don't end + up writing all over memory. */ +void validate_celt_decoder(CELTDecoder *st) +{ +#ifndef CUSTOM_MODES + celt_assert(st->mode == opus_custom_mode_create(48000, 960, NULL)); + celt_assert(st->overlap == 120); + celt_assert(st->end <= 21); +#else +/* From Section 4.3 in the spec: "The normal CELT layer uses 21 of those bands, + though Opus Custom (see Section 6.2) may use a different number of bands" + + Check if it's within the maximum number of Bark frequency bands instead */ + celt_assert(st->end <= 25); +#endif + celt_assert(st->channels == 1 || st->channels == 2); + celt_assert(st->stream_channels == 1 || st->stream_channels == 2); + celt_assert(st->downsample > 0); + celt_assert(st->start == 0 || st->start == 17); + celt_assert(st->start < st->end); +#ifdef OPUS_ARCHMASK + celt_assert(st->arch >= 0); + celt_assert(st->arch <= OPUS_ARCHMASK); +#endif + celt_assert(st->last_pitch_index <= PLC_PITCH_LAG_MAX); + celt_assert(st->last_pitch_index >= PLC_PITCH_LAG_MIN || st->last_pitch_index == 0); + celt_assert(st->postfilter_period < MAX_PERIOD); + celt_assert(st->postfilter_period >= COMBFILTER_MINPERIOD || st->postfilter_period == 0); + celt_assert(st->postfilter_period_old < MAX_PERIOD); + celt_assert(st->postfilter_period_old >= COMBFILTER_MINPERIOD || st->postfilter_period_old == 0); + celt_assert(st->postfilter_tapset <= 2); + celt_assert(st->postfilter_tapset >= 0); + celt_assert(st->postfilter_tapset_old <= 2); + celt_assert(st->postfilter_tapset_old >= 0); +} +#endif + int celt_decoder_get_size(int channels) { const CELTMode *mode = opus_custom_mode_create(48000, 960, NULL); @@ -163,6 +210,11 @@ OPUS_CUSTOM_NOSTATIC int opus_custom_decoder_init(CELTDecoder *st, const CELTMod st->start = 0; st->end = st->mode->effEBands; st->signalling = 1; +#ifndef DISABLE_UPDATE_DRAFT + st->disable_inv = channels == 1; +#else + st->disable_inv = 0; +#endif st->arch = opus_select_arch(); opus_custom_decoder_ctl(st, OPUS_RESET_STATE); @@ -177,6 +229,36 @@ void opus_custom_decoder_destroy(CELTDecoder *st) } #endif /* CUSTOM_MODES */ +#ifndef CUSTOM_MODES +/* Special case for stereo with no downsampling and no accumulation. This is + quite common and we can make it faster by processing both channels in the + same loop, reducing overhead due to the dependency loop in the IIR filter. */ +static void deemphasis_stereo_simple(celt_sig *in[], opus_val16 *pcm, int N, const opus_val16 coef0, + celt_sig *mem) +{ + celt_sig * OPUS_RESTRICT x0; + celt_sig * OPUS_RESTRICT x1; + celt_sig m0, m1; + int j; + x0=in[0]; + x1=in[1]; + m0 = mem[0]; + m1 = mem[1]; + for (j=0;jmdct, &freq[b], out_syn[0]+NB*b, mode->window, overlap, shift, B, arch); } else { @@ -345,6 +435,12 @@ void celt_synthesis(const CELTMode *mode, celt_norm *X, celt_sig * out_syn[], clt_mdct_backward(&mode->mdct, &freq[b], out_syn[c]+NB*b, mode->window, overlap, shift, B, arch); } while (++cdownsample, 0, st->arch); } else { + int exc_length; /* Pitch-based PLC */ const opus_val16 *window; + opus_val16 *exc; opus_val16 fade = Q15ONE; int pitch_index; VARDECL(opus_val32, etmp); - VARDECL(opus_val16, exc); + VARDECL(opus_val16, _exc); + VARDECL(opus_val16, fir_tmp); if (loss_count == 0) { @@ -519,8 +610,14 @@ static void celt_decode_lost(CELTDecoder * OPUS_RESTRICT st, int N, int LM) fade = QCONST16(.8f,15); } + /* We want the excitation for 2 pitch periods in order to look for a + decaying signal, but we can't get more than MAX_PERIOD. */ + exc_length = IMIN(2*pitch_index, MAX_PERIOD); + ALLOC(etmp, overlap, opus_val32); - ALLOC(exc, MAX_PERIOD, opus_val16); + ALLOC(_exc, MAX_PERIOD+LPC_ORDER, opus_val16); + ALLOC(fir_tmp, exc_length, opus_val16); + exc = _exc+LPC_ORDER; window = mode->window; c=0; do { opus_val16 decay; @@ -529,13 +626,11 @@ static void celt_decode_lost(CELTDecoder * OPUS_RESTRICT st, int N, int LM) celt_sig *buf; int extrapolation_offset; int extrapolation_len; - int exc_length; int j; buf = decode_mem[c]; - for (i=0;iarch); + fir_tmp, exc_length, LPC_ORDER, st->arch); + OPUS_COPY(exc+MAX_PERIOD-exc_length, fir_tmp, exc_length); } /* Check if the waveform is decaying, and if so how fast. @@ -630,9 +735,8 @@ static void celt_decode_lost(CELTDecoder * OPUS_RESTRICT st, int N, int LM) tmp = ROUND16( buf[DECODE_BUFFER_SIZE-MAX_PERIOD-N+extrapolation_offset+j], SIG_SHIFT); - S1 += SHR32(MULT16_16(tmp, tmp), 8); + S1 += SHR32(MULT16_16(tmp, tmp), 10); } - { opus_val16 lpc_mem[LPC_ORDER]; /* Copy the last decoded samples (prior to the overlap region) to @@ -644,6 +748,10 @@ static void celt_decode_lost(CELTDecoder * OPUS_RESTRICT st, int N, int LM) celt_iir(buf+DECODE_BUFFER_SIZE-N, lpc+c*LPC_ORDER, buf+DECODE_BUFFER_SIZE-N, extrapolation_len, LPC_ORDER, lpc_mem, st->arch); +#ifdef FIXED_POINT + for (i=0; i < extrapolation_len; i++) + buf[DECODE_BUFFER_SIZE-N+i] = SATURATE(buf[DECODE_BUFFER_SIZE-N+i], SIG_SAT); +#endif } /* Check if the synthesis energy is higher than expected, which can @@ -654,7 +762,7 @@ static void celt_decode_lost(CELTDecoder * OPUS_RESTRICT st, int N, int LM) for (i=0;imode; nbEBands = mode->nbEBands; overlap = mode->overlap; @@ -956,7 +1065,7 @@ int celt_decode_with_ec(CELTDecoder * OPUS_RESTRICT st, const unsigned char *dat ALLOC(pulses, nbEBands, int); ALLOC(fine_priority, nbEBands, int); - codedBands = compute_allocation(mode, start, end, offsets, cap, + codedBands = clt_compute_allocation(mode, start, end, offsets, cap, alloc_trim, &intensity, &dual_stereo, bits, &balance, pulses, fine_quant, fine_priority, C, LM, dec, 0, 0, 0); @@ -979,7 +1088,8 @@ int celt_decode_with_ec(CELTDecoder * OPUS_RESTRICT st, const unsigned char *dat quant_all_bands(0, mode, start, end, X, C==2 ? X+N : NULL, collapse_masks, NULL, pulses, shortBlocks, spread_decision, dual_stereo, intensity, tf_res, - len*(8<rng, st->arch); + len*(8<rng, 0, + st->arch, st->disable_inv); if (anti_collapse_rsv > 0) { @@ -1234,6 +1344,26 @@ int opus_custom_decoder_ctl(CELTDecoder * OPUS_RESTRICT st, int request, ...) *value=st->rng; } break; + case OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST: + { + opus_int32 value = va_arg(ap, opus_int32); + if(value<0 || value>1) + { + goto bad_arg; + } + st->disable_inv = value; + } + break; + case OPUS_GET_PHASE_INVERSION_DISABLED_REQUEST: + { + opus_int32 *value = va_arg(ap, opus_int32*); + if (!value) + { + goto bad_arg; + } + *value = st->disable_inv; + } + break; default: goto bad_request; } diff --git a/libs/libopus/celt/celt_encoder.c b/libs/libopus/celt/celt_encoder.c index 3ee7a4d3f..d6f8afc20 100644 --- a/libs/libopus/celt/celt_encoder.c +++ b/libs/libopus/celt/celt_encoder.c @@ -73,8 +73,8 @@ struct OpusCustomEncoder { int constrained_vbr; /* If zero, VBR can do whatever it likes with the rate */ int loss_rate; int lsb_depth; - int variable_duration; int lfe; + int disable_inv; int arch; /* Everything beyond this point gets cleared on a reset */ @@ -98,6 +98,7 @@ struct OpusCustomEncoder { #endif int consec_transient; AnalysisInfo analysis; + SILKInfo silk_info; opus_val32 preemph_memE[2]; opus_val32 preemph_memD[2]; @@ -123,6 +124,7 @@ struct OpusCustomEncoder { /* opus_val16 oldBandE[], Size = channels*mode->nbEBands */ /* opus_val16 oldLogE[], Size = channels*mode->nbEBands */ /* opus_val16 oldLogE2[], Size = channels*mode->nbEBands */ + /* opus_val16 energyError[], Size = channels*mode->nbEBands */ }; int celt_encoder_get_size(int channels) @@ -136,9 +138,10 @@ OPUS_CUSTOM_NOSTATIC int opus_custom_encoder_get_size(const CELTMode *mode, int int size = sizeof(struct CELTEncoder) + (channels*mode->overlap-1)*sizeof(celt_sig) /* celt_sig in_mem[channels*mode->overlap]; */ + channels*COMBFILTER_MAXPERIOD*sizeof(celt_sig) /* celt_sig prefilter_mem[channels*COMBFILTER_MAXPERIOD]; */ - + 3*channels*mode->nbEBands*sizeof(opus_val16); /* opus_val16 oldBandE[channels*mode->nbEBands]; */ + + 4*channels*mode->nbEBands*sizeof(opus_val16); /* opus_val16 oldBandE[channels*mode->nbEBands]; */ /* opus_val16 oldLogE[channels*mode->nbEBands]; */ /* opus_val16 oldLogE2[channels*mode->nbEBands]; */ + /* opus_val16 energyError[channels*mode->nbEBands]; */ return size; } @@ -178,7 +181,6 @@ static int opus_custom_encoder_init_arch(CELTEncoder *st, const CELTMode *mode, st->start = 0; st->end = st->mode->effEBands; st->signalling = 1; - st->arch = arch; st->constrained_vbr = 1; @@ -223,7 +225,8 @@ void opus_custom_encoder_destroy(CELTEncoder *st) static int transient_analysis(const opus_val32 * OPUS_RESTRICT in, int len, int C, - opus_val16 *tf_estimate, int *tf_chan) + opus_val16 *tf_estimate, int *tf_chan, int allow_weak_transients, + int *weak_transient) { int i; VARDECL(opus_val16, tmp); @@ -233,6 +236,12 @@ static int transient_analysis(const opus_val32 * OPUS_RESTRICT in, int len, int int c; opus_val16 tf_max; int len2; + /* Forward masking: 6.7 dB/ms. */ +#ifdef FIXED_POINT + int forward_shift = 4; +#else + opus_val16 forward_decay = QCONST16(.0625f,15); +#endif /* Table of 6*64/x, trained on real data to minimize the average error */ static const unsigned char inv_table[128] = { 255,255,156,110, 86, 70, 59, 51, 45, 40, 37, 33, 31, 28, 26, 25, @@ -247,6 +256,19 @@ static int transient_analysis(const opus_val32 * OPUS_RESTRICT in, int len, int SAVE_STACK; ALLOC(tmp, len, opus_val16); + *weak_transient = 0; + /* For lower bitrates, let's be more conservative and have a forward masking + decay of 3.3 dB/ms. This avoids having to code transients at very low + bitrate (mostly for hybrid), which can result in unstable energy and/or + partial collapse. */ + if (allow_weak_transients) + { +#ifdef FIXED_POINT + forward_shift = 5; +#else + forward_decay = QCONST16(.03125f,15); +#endif + } len2=len/2; for (c=0;c=0;i--) { + /* Backward masking: 13.9 dB/ms. */ #ifdef FIXED_POINT /* FIXME: Use PSHR16() instead */ tmp[i] = mem0 + PSHR32(tmp[i]-mem0,3); @@ -339,6 +362,12 @@ static int transient_analysis(const opus_val32 * OPUS_RESTRICT in, int len, int /* Compute harmonic mean discarding the unreliable boundaries The data is smooth, so we only take 1/4th of the samples */ unmask=0; + /* We should never see NaNs here. If we find any, then something really bad happened and we better abort + before it does any damage later on. If these asserts are disabled (no hardening), then the table + lookup a few lines below (id = ...) is likely to crash dur to an out-of-bounds read. DO NOT FIX + that crash on NaN since it could result in a worse issue later on. */ + celt_assert(!celt_isnan(tmp[0])); + celt_assert(!celt_isnan(norm)); for (i=12;i200; - + /* For low bitrates, define "weak transients" that need to be + handled differently to avoid partial collapse. */ + if (allow_weak_transients && is_transient && mask_metric<600) { + is_transient = 0; + *weak_transient = 1; + } /* Arbitrary metric for VBR boost */ tf_max = MAX16(0,celt_sqrt(27*mask_metric)-42); /* *tf_estimate = 1 + MIN16(1, sqrt(MAX16(0, tf_max-30))/20); */ @@ -549,7 +583,7 @@ static opus_val32 l1_metric(const celt_norm *tmp, int N, int LM, opus_val16 bias static int tf_analysis(const CELTMode *m, int len, int isTransient, int *tf_res, int lambda, celt_norm *X, int N0, int LM, - int *tf_sum, opus_val16 tf_estimate, int tf_chan) + opus_val16 tf_estimate, int tf_chan, int *importance) { int i; VARDECL(int, metric); @@ -574,7 +608,6 @@ static int tf_analysis(const CELTMode *m, int len, int isTransient, ALLOC(path0, len, int); ALLOC(path1, len, int); - *tf_sum = 0; for (i=0;i> 10; + trim = QCONST16(4.f, 8) + QCONST16(1.f/16.f, 8)*frac; + } if (C==2) { opus_val16 sum = 0; /* Q10 */ @@ -809,7 +849,7 @@ static int alloc_trim_analysis(const CELTMode *m, const celt_norm *X, } while (++c=0;i--) + mask[i] = MAX16(mask[i], mask[i+1] - QCONST16(3.f, DB_SHIFT)); + for (i=0;i> shift; + } + /*for (i=0;i 50 && LM>=1 && !lfe) { @@ -1012,6 +1089,14 @@ static opus_val16 dynalloc_analysis(const opus_val16 *bandLogE, const opus_val16 } for (i=start;i=12) follower[i] = HALF16(follower[i]); + } +#ifdef DISABLE_FLOAT_API + (void)analysis; +#else + if (analysis->valid) + { + for (i=start;ileak_boost[i]; + } +#endif + for (i=start;i>BITRES>>3 > effectiveBytes/4) + && (tot_boost+boost_bits)>>BITRES>>3 > 2*effectiveBytes/3) { - opus_int32 cap = ((effectiveBytes/4)<valid) + gain1 = (opus_val16)(gain1 * analysis->max_pitch_ratio); +#else + (void)analysis; +#endif /* Gain threshold for enabling the prefilter/postfilter */ pf_threshold = QCONST16(.2f,15); @@ -1193,7 +1298,7 @@ static int compute_vbr(const CELTMode *mode, AnalysisInfo *analysis, opus_int32 int LM, opus_int32 bitrate, int lastCodedBands, int C, int intensity, int constrained_vbr, opus_val16 stereo_saving, int tot_boost, opus_val16 tf_estimate, int pitch_change, opus_val16 maxDepth, - int variable_duration, int lfe, int has_surround_mask, opus_val16 surround_masking, + int lfe, int has_surround_mask, opus_val16 surround_masking, opus_val16 temporal_vbr) { /* The target rate in 8th bits per frame */ @@ -1235,10 +1340,9 @@ static int compute_vbr(const CELTMode *mode, AnalysisInfo *analysis, opus_int32 SHR32(MULT16_16(stereo_saving-QCONST16(0.1f,8),(coded_stereo_dof<tonality-.15f)-0.09f; + tonal = MAX16(0.f,analysis->tonality-.15f)-0.12f; tonal_target = target + (opus_int32)((coded_bins<channels; @@ -1343,7 +1439,6 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, int end; int effEnd; int codedBands; - int tf_sum; int alloc_trim; int pitch_index=COMBFILTER_MINPERIOD; opus_val16 gain1 = 0; @@ -1355,6 +1450,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, opus_int32 total_boost; opus_int32 balance; opus_int32 tell; + opus_int32 tell0_frac; int prefilter_tapset=0; int pf_on; int anti_collapse_rsv; @@ -1376,7 +1472,10 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, opus_val16 surround_masking=0; opus_val16 temporal_vbr=0; opus_val16 surround_trim = 0; - opus_int32 equiv_rate = 510000; + opus_int32 equiv_rate; + int hybrid; + int weak_transient = 0; + int enable_tf_analysis; VARDECL(opus_val16, surround_dynalloc); ALLOC_STACK; @@ -1386,6 +1485,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, eBands = mode->eBands; start = st->start; end = st->end; + hybrid = start != 0; tf_estimate = 0; if (nbCompressedBytes<2 || pcm==NULL) { @@ -1409,12 +1509,14 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, oldBandE = (opus_val16*)(st->in_mem+CC*(overlap+COMBFILTER_MAXPERIOD)); oldLogE = oldBandE + CC*nbEBands; oldLogE2 = oldLogE + CC*nbEBands; + energyError = oldLogE2 + CC*nbEBands; if (enc==NULL) { - tell=1; + tell0_frac=tell=1; nbFilledBytes=0; } else { + tell0_frac=ec_tell_frac(enc); tell=ec_tell(enc); nbFilledBytes=(tell+4)>>3; } @@ -1467,10 +1569,11 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, if (st->bitrate!=OPUS_BITRATE_MAX) nbCompressedBytes = IMAX(2, IMIN(nbCompressedBytes, (tmp+4*mode->Fs)/(8*mode->Fs)-!!st->signalling)); - effectiveBytes = nbCompressedBytes; + effectiveBytes = nbCompressedBytes - nbFilledBytes; } + equiv_rate = ((opus_int32)nbCompressedBytes*8*50 << (3-LM)) - (40*C+20)*((400>>LM) - 50); if (st->bitrate != OPUS_BITRATE_MAX) - equiv_rate = st->bitrate - (40*C+20)*((400>>LM) - 50); + equiv_rate = IMIN(equiv_rate, st->bitrate - (40*C+20)*((400>>LM) - 50)); if (enc==NULL) { @@ -1558,17 +1661,17 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, { int enabled; int qg; - enabled = ((st->lfe&&nbAvailableBytes>3) || nbAvailableBytes>12*C) && start==0 && !silence && !st->disable_pf - && st->complexity >= 5 && !(st->consec_transient && LM!=3 && st->variable_duration==OPUS_FRAMESIZE_VARIABLE); + enabled = ((st->lfe&&nbAvailableBytes>3) || nbAvailableBytes>12*C) && !hybrid && !silence && !st->disable_pf + && st->complexity >= 5; prefilter_tapset = st->tapset_decision; - pf_on = run_prefilter(st, in, prefilter_mem, CC, N, prefilter_tapset, &pitch_index, &gain1, &qg, enabled, nbAvailableBytes); + pf_on = run_prefilter(st, in, prefilter_mem, CC, N, prefilter_tapset, &pitch_index, &gain1, &qg, enabled, nbAvailableBytes, &st->analysis); if ((gain1 > QCONST16(.4f,15) || st->prefilter_gain > QCONST16(.4f,15)) && (!st->analysis.valid || st->analysis.tonality > .3) && (pitch_index > 1.26*st->prefilter_period || pitch_index < .79*st->prefilter_period)) pitch_change = 1; if (pf_on==0) { - if(start==0 && tell+16<=total_bits) + if(!hybrid && tell+16<=total_bits) ec_enc_bit_logp(enc, 0, 1); } else { /*This block is not gated by a total bits check only because @@ -1589,8 +1692,12 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, shortBlocks = 0; if (st->complexity >= 1 && !st->lfe) { + /* Reduces the likelihood of energy instability on fricatives at low bitrate + in hybrid mode. It seems like we still want to have real transients on vowels + though (small SILK quantization offset value). */ + int allow_weak_transients = hybrid && effectiveBytes<15 && st->silk_info.signalType != 2; isTransient = transient_analysis(in, N+overlap, CC, - &tf_estimate, &tf_chan); + &tf_estimate, &tf_chan, allow_weak_transients, &weak_transient); } if (LM>0 && ec_tell(enc)+3<=total_bits) { @@ -1610,16 +1717,19 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, if (secondMdct) { compute_mdcts(mode, 0, in, freq, C, CC, LM, st->upsample, st->arch); - compute_band_energies(mode, freq, bandE, effEnd, C, LM); + compute_band_energies(mode, freq, bandE, effEnd, C, LM, st->arch); amp2Log2(mode, effEnd, end, bandE, bandLogE2, C); for (i=0;iupsample, st->arch); + /* This should catch any NaN in the CELT input. Since we're not supposed to see any (they're filtered + at the Opus layer), just abort. */ + celt_assert(!celt_isnan(freq[0]) && (C==1 || !celt_isnan(freq[N]))); if (CC==2&&C==1) tf_chan = 0; - compute_band_energies(mode, freq, bandE, effEnd, C, LM); + compute_band_energies(mode, freq, bandE, effEnd, C, LM, st->arch); if (st->lfe) { @@ -1634,7 +1744,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, ALLOC(surround_dynalloc, C*nbEBands, opus_val16); OPUS_CLEAR(surround_dynalloc, end); /* This computes how much masking takes place between surround channels */ - if (start==0&&st->energy_mask&&!st->lfe) + if (!hybrid&&st->energy_mask&&!st->lfe) { int mask_end; int midband; @@ -1736,14 +1846,14 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, /* Last chance to catch any transient we might have missed in the time-domain analysis */ - if (LM>0 && ec_tell(enc)+3<=total_bits && !isTransient && st->complexity>=5 && !st->lfe) + if (LM>0 && ec_tell(enc)+3<=total_bits && !isTransient && st->complexity>=5 && !st->lfe && !hybrid) { if (patch_transient_decision(bandLogE, oldBandE, nbEBands, start, end, C)) { isTransient = 1; shortBlocks = M; compute_mdcts(mode, shortBlocks, in, freq, C, CC, LM, st->upsample, st->arch); - compute_band_energies(mode, freq, bandE, effEnd, C, LM); + compute_band_energies(mode, freq, bandE, effEnd, C, LM, st->arch); amp2Log2(mode, effEnd, end, bandE, bandLogE, C); /* Compensate for the scaling of short vs long mdcts */ for (i=0;i=15*C && !hybrid && st->complexity>=2 && !st->lfe; + + ALLOC(offsets, nbEBands, int); + ALLOC(importance, nbEBands, int); + ALLOC(spread_weight, nbEBands, int); + + maxDepth = dynalloc_analysis(bandLogE, bandLogE2, nbEBands, start, end, C, offsets, + st->lsb_depth, mode->logN, isTransient, st->vbr, st->constrained_vbr, + eBands, LM, effectiveBytes, &tot_boost, st->lfe, surround_dynalloc, &st->analysis, importance, spread_weight); + ALLOC(tf_res, nbEBands, int); /* Disable variable tf resolution for hybrid and at very low bitrate */ - if (effectiveBytes>=15*C && start==0 && st->complexity>=2 && !st->lfe) + if (enable_tf_analysis) { int lambda; - if (effectiveBytes<40) - lambda = 12; - else if (effectiveBytes<60) - lambda = 6; - else if (effectiveBytes<100) - lambda = 4; - else - lambda = 3; - lambda*=2; - tf_select = tf_analysis(mode, effEnd, isTransient, tf_res, lambda, X, N, LM, &tf_sum, tf_estimate, tf_chan); + lambda = IMAX(80, 20480/effectiveBytes + 2); + tf_select = tf_analysis(mode, effEnd, isTransient, tf_res, lambda, X, N, LM, tf_estimate, tf_chan, importance); for (i=effEnd;isilk_info.signalType != 2) + { + /* For low bitrate hybrid, we force temporal resolution to 5 ms rather than 2.5 ms. */ + for (i=0;iforce_intra, @@ -1798,7 +1936,15 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, { st->tapset_decision = 0; st->spread_decision = SPREAD_NORMAL; - } else if (shortBlocks || st->complexity < 3 || nbAvailableBytes < 10*C || start != 0) + } else if (hybrid) + { + if (st->complexity == 0) + st->spread_decision = SPREAD_NONE; + else if (isTransient) + st->spread_decision = SPREAD_NORMAL; + else + st->spread_decision = SPREAD_AGGRESSIVE; + } else if (shortBlocks || st->complexity < 3 || nbAvailableBytes < 10*C) { if (st->complexity == 0) st->spread_decision = SPREAD_NONE; @@ -1822,7 +1968,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, { st->spread_decision = spreading_decision(mode, X, &st->tonal_average, st->spread_decision, &st->hf_average, - &st->tapset_decision, pf_on&&!shortBlocks, effEnd, C, M); + &st->tapset_decision, pf_on&&!shortBlocks, effEnd, C, M, spread_weight); } /*printf("%d %d\n", st->tapset_decision, st->spread_decision);*/ /*printf("%f %d %f %d\n\n", st->analysis.tonality, st->spread_decision, st->analysis.tonality_slope, st->tapset_decision);*/ @@ -1830,11 +1976,6 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, ec_enc_icdf(enc, st->spread_decision, spread_icdf, 5); } - ALLOC(offsets, nbEBands, int); - - maxDepth = dynalloc_analysis(bandLogE, bandLogE2, nbEBands, start, end, C, offsets, - st->lsb_depth, mode->logN, isTransient, st->vbr, st->constrained_vbr, - eBands, LM, effectiveBytes, &tot_boost, st->lfe, surround_dynalloc); /* For LFE, everything interesting is in the first band */ if (st->lfe) offsets[0] = IMIN(8, effectiveBytes/3); @@ -1896,12 +2037,15 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, alloc_trim = 5; if (tell+(6<lfe) + if (start > 0 || st->lfe) + { + st->stereo_saving = 0; alloc_trim = 5; - else + } else { alloc_trim = alloc_trim_analysis(mode, X, bandLogE, end, LM, C, N, &st->analysis, &st->stereo_saving, tf_estimate, - st->intensity, surround_trim, st->arch); + st->intensity, surround_trim, equiv_rate, st->arch); + } ec_enc_icdf(enc, alloc_trim, trim_icdf, 7); tell = ec_tell_frac(enc); } @@ -1919,17 +2063,36 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, /* Don't attempt to use more than 510 kb/s, even for frames smaller than 20 ms. The CELT allocator will just not be able to use more than that anyway. */ nbCompressedBytes = IMIN(nbCompressedBytes,1275>>(3-LM)); - base_target = vbr_rate - ((40*C+20)<constrained_vbr) base_target += (st->vbr_offset>>lm_diff); - target = compute_vbr(mode, &st->analysis, base_target, LM, equiv_rate, + if (!hybrid) + { + target = compute_vbr(mode, &st->analysis, base_target, LM, equiv_rate, st->lastCodedBands, C, st->intensity, st->constrained_vbr, st->stereo_saving, tot_boost, tf_estimate, pitch_change, maxDepth, - st->variable_duration, st->lfe, st->energy_mask!=NULL, surround_masking, + st->lfe, st->energy_mask!=NULL, surround_masking, temporal_vbr); - + } else { + target = base_target; + /* Tonal frames (offset<100) need more bits than noisy (offset>100) ones. */ + if (st->silk_info.offset < 100) target += 12 << BITRES >> (3-LM); + if (st->silk_info.offset > 100) target -= 18 << BITRES >> (3-LM); + /* Boosting bitrate on transients and vowels with significant temporal + spikes. */ + target += (opus_int32)MULT16_16_Q14(tf_estimate-QCONST16(.25f,14), (50< QCONST16(.7f,14)) + target = IMAX(target, 50<>(BITRES+3)) + 2 - nbFilledBytes; + min_allowed = ((tell+total_boost+(1<<(BITRES+3))-1)>>(BITRES+3)) + 2; + /* Take into account the 37 bits we need to have left in the packet to + signal a redundant frame in hybrid mode. Creating a shorter packet would + create an entropy coder desync. */ + if (hybrid) + min_allowed = IMAX(min_allowed, (tell0_frac+(37<>(BITRES+3)); nbAvailableBytes = (target+(1<<(BITRES+2)))>>(BITRES+3); nbAvailableBytes = IMAX(min_allowed,nbAvailableBytes); - nbAvailableBytes = IMIN(nbCompressedBytes,nbAvailableBytes+nbFilledBytes) - nbFilledBytes; + nbAvailableBytes = IMIN(nbCompressedBytes,nbAvailableBytes); /* By how much did we "miss" the target on that frame */ delta = target - vbr_rate; @@ -1988,7 +2156,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, st->vbr_reservoir = 0; /*printf ("+%d\n", adjust);*/ } - nbCompressedBytes = IMIN(nbCompressedBytes,nbAvailableBytes+nbFilledBytes); + nbCompressedBytes = IMIN(nbCompressedBytes,nbAvailableBytes); /*printf("%d\n", nbCompressedBytes*50*8);*/ /* This moves the raw bits to take into account the new compressed size */ ec_enc_shrink(enc, nbCompressedBytes); @@ -2023,7 +2191,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, #endif if (st->lfe) signalBandwidth = 1; - codedBands = compute_allocation(mode, start, end, offsets, cap, + codedBands = clt_compute_allocation(mode, start, end, offsets, cap, alloc_trim, &st->intensity, &dual_stereo, bits, &balance, pulses, fine_quant, fine_priority, C, LM, enc, 1, st->lastCodedBands, signalBandwidth); if (st->lastCodedBands) @@ -2038,7 +2206,7 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, quant_all_bands(1, mode, start, end, X, C==2 ? X+N : NULL, collapse_masks, bandE, pulses, shortBlocks, st->spread_decision, dual_stereo, st->intensity, tf_res, nbCompressedBytes*(8<rng, st->arch); + balance, enc, LM, codedBands, &st->rng, st->complexity, st->arch, st->disable_inv); if (anti_collapse_rsv > 0) { @@ -2049,6 +2217,14 @@ int celt_encode_with_ec(CELTEncoder * OPUS_RESTRICT st, const opus_val16 * pcm, ec_enc_bits(enc, anti_collapse_on, 1); } quant_energy_finalise(mode, start, end, oldBandE, error, fine_quant, fine_priority, nbCompressedBytes*8-ec_tell(enc), enc, C); + OPUS_CLEAR(energyError, nbEBands*CC); + c=0; + do { + for (i=start;ilsb_depth; } break; - case OPUS_SET_EXPERT_FRAME_DURATION_REQUEST: + case OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST: { opus_int32 value = va_arg(ap, opus_int32); - st->variable_duration = value; + if(value<0 || value>1) + { + goto bad_arg; + } + st->disable_inv = value; + } + break; + case OPUS_GET_PHASE_INVERSION_DISABLED_REQUEST: + { + opus_int32 *value = va_arg(ap, opus_int32*); + if (!value) + { + goto bad_arg; + } + *value = st->disable_inv; } break; case OPUS_RESET_STATE: @@ -2368,6 +2558,13 @@ int opus_custom_encoder_ctl(CELTEncoder * OPUS_RESTRICT st, int request, ...) OPUS_COPY(&st->analysis, info, 1); } break; + case CELT_SET_SILK_INFO_REQUEST: + { + SILKInfo *info = va_arg(ap, SILKInfo *); + if (info) + OPUS_COPY(&st->silk_info, info, 1); + } + break; case CELT_GET_MODE_REQUEST: { const CELTMode ** value = va_arg(ap, const CELTMode**); diff --git a/libs/libopus/celt/celt_lpc.c b/libs/libopus/celt/celt_lpc.c index b410a21c5..242e6df55 100644 --- a/libs/libopus/celt/celt_lpc.c +++ b/libs/libopus/celt/celt_lpc.c @@ -50,17 +50,21 @@ int p #endif OPUS_CLEAR(lpc, p); +#ifdef FIXED_POINT if (ac[0] != 0) +#else + if (ac[0] > 1e-10f) +#endif { for (i = 0; i < p; i++) { /* Sum up this iteration's reflection coefficient */ opus_val32 rr = 0; for (j = 0; j < i; j++) rr += MULT32_32_Q31(lpc[j],ac[i - j]); - rr += SHR32(ac[i + 1],3); - r = -frac_div32(SHL32(rr,3), error); + rr += SHR32(ac[i + 1],6); + r = -frac_div32(SHL32(rr,6), error); /* Update LPC coefficients and total error */ - lpc[i] = SHR32(r,3); + lpc[i] = SHR32(r,6); for (j = 0; j < (i+1)>>1; j++) { opus_val32 tmp1, tmp2; @@ -73,74 +77,100 @@ int p error = error - MULT32_32_Q31(MULT32_32_Q31(r,r),error); /* Bail out once we get 30 dB gain */ #ifdef FIXED_POINT - if (error maxabs) { + maxabs = absval; + idx = i; + } + } + maxabs = PSHR32(maxabs, 13); /* Q25->Q12 */ + + if (maxabs > 32767) { + maxabs = MIN32(maxabs, 163838); + chirp_Q16 = QCONST32(0.999, 16) - DIV32(SHL32(maxabs - 32767, 14), + SHR32(MULT32_32_32(maxabs, idx + 1), 2)); + chirp_minus_one_Q16 = chirp_Q16 - 65536; + + /* Apply bandwidth expansion. */ + for (i = 0; i < p - 1; i++) { + lpc[i] = MULT32_32_Q16(chirp_Q16, lpc[i]); + chirp_Q16 += PSHR32(MULT32_32_32(chirp_Q16, chirp_minus_one_Q16), 16); + } + lpc[p - 1] = MULT32_32_Q16(chirp_Q16, lpc[p - 1]); + } else { + break; + } + } + + if (iter == 10) { + /* If the coeffs still do not fit into the 16 bit range after 10 iterations, + fall back to the A(z)=1 filter. */ + OPUS_CLEAR(lpc, p); + _lpc[0] = 4096; /* Q12 */ + } else { + for (i = 0; i < p; i++) { + _lpc[i] = EXTRACT16(PSHR32(lpc[i], 13)); /* Q25->Q12 */ + } + } + } #endif } void celt_fir_c( - const opus_val16 *_x, + const opus_val16 *x, const opus_val16 *num, - opus_val16 *_y, + opus_val16 *y, int N, int ord, - opus_val16 *mem, int arch) { int i,j; VARDECL(opus_val16, rnum); - VARDECL(opus_val16, x); SAVE_STACK; - + celt_assert(x != y); ALLOC(rnum, ord, opus_val16); - ALLOC(x, N+ord, opus_val16); for(i=0;i_i){ - celt_assert(p>q); + celt_sig_assert(p>q); _k=_n; do p=CELT_PVQ_U_ROW[--_k][_n]; while(p>_i); diff --git a/libs/libopus/celt/ecintrin.h b/libs/libopus/celt/ecintrin.h index 2263cff6b..66a4c36ea 100644 --- a/libs/libopus/celt/ecintrin.h +++ b/libs/libopus/celt/ecintrin.h @@ -49,7 +49,11 @@ This macro should only be used for implementing ec_ilog(), if it is defined. All other code should use EC_ILOG() instead.*/ #if defined(_MSC_VER) && (_MSC_VER >= 1400) +#if defined(_MSC_VER) && (_MSC_VER >= 1910) +# include /* Improve compiler throughput. */ +#else # include +#endif /*In _DEBUG mode this is not an intrinsic by default.*/ # pragma intrinsic(_BitScanReverse) diff --git a/libs/libopus/celt/entcode.h b/libs/libopus/celt/entcode.h index 13d6c84ef..3763e3f28 100644 --- a/libs/libopus/celt/entcode.h +++ b/libs/libopus/celt/entcode.h @@ -122,7 +122,7 @@ opus_uint32 ec_tell_frac(ec_ctx *_this); /* Tested exhaustively for all n and for 1<=d<=256 */ static OPUS_INLINE opus_uint32 celt_udiv(opus_uint32 n, opus_uint32 d) { - celt_assert(d>0); + celt_sig_assert(d>0); #ifdef USE_SMALL_DIV_TABLE if (d>256) return n/d; @@ -138,7 +138,7 @@ static OPUS_INLINE opus_uint32 celt_udiv(opus_uint32 n, opus_uint32 d) { } static OPUS_INLINE opus_int32 celt_sudiv(opus_int32 n, opus_int32 d) { - celt_assert(d>0); + celt_sig_assert(d>0); #ifdef USE_SMALL_DIV_TABLE if (n<0) return -(opus_int32)celt_udiv(-n, d); diff --git a/libs/libopus/celt/entdec.h b/libs/libopus/celt/entdec.h index d8ab31873..025fc1870 100644 --- a/libs/libopus/celt/entdec.h +++ b/libs/libopus/celt/entdec.h @@ -85,7 +85,7 @@ int ec_dec_icdf(ec_dec *_this,const unsigned char *_icdf,unsigned _ftb); The bits must have been encoded with ec_enc_uint(). No call to ec_dec_update() is necessary after this call. _ft: The number of integers that can be decoded (one more than the max). - This must be at least one, and no more than 2**32-1. + This must be at least 2, and no more than 2**32-1. Return: The decoded bits.*/ opus_uint32 ec_dec_uint(ec_dec *_this,opus_uint32 _ft); diff --git a/libs/libopus/celt/entenc.h b/libs/libopus/celt/entenc.h index 796bc4d57..f502eaf66 100644 --- a/libs/libopus/celt/entenc.h +++ b/libs/libopus/celt/entenc.h @@ -67,7 +67,7 @@ void ec_enc_icdf(ec_enc *_this,int _s,const unsigned char *_icdf,unsigned _ftb); /*Encodes a raw unsigned integer in the stream. _fl: The integer to encode. _ft: The number of integers that can be encoded (one more than the max). - This must be at least one, and no more than 2**32-1.*/ + This must be at least 2, and no more than 2**32-1.*/ void ec_enc_uint(ec_enc *_this,opus_uint32 _fl,opus_uint32 _ft); /*Encodes a sequence of raw bits in the stream. diff --git a/libs/libopus/celt/fixed_debug.h b/libs/libopus/celt/fixed_debug.h index d28227f5d..3765baa60 100644 --- a/libs/libopus/celt/fixed_debug.h +++ b/libs/libopus/celt/fixed_debug.h @@ -59,6 +59,14 @@ extern opus_int64 celt_mips; #define SHR(a,b) SHR32(a,b) #define PSHR(a,b) PSHR32(a,b) +/** Add two 32-bit values, ignore any overflows */ +#define ADD32_ovflw(a,b) (celt_mips+=2,(opus_val32)((opus_uint32)(a)+(opus_uint32)(b))) +/** Subtract two 32-bit values, ignore any overflows */ +#define SUB32_ovflw(a,b) (celt_mips+=2,(opus_val32)((opus_uint32)(a)-(opus_uint32)(b))) +/* Avoid MSVC warning C4146: unary minus operator applied to unsigned type */ +/** Negate 32-bit value, ignore any overflows */ +#define NEG32_ovflw(a) (celt_mips+=2,(opus_val32)(0-(opus_uint32)(a))) + static OPUS_INLINE short NEG16(int x) { int res; @@ -227,12 +235,11 @@ static OPUS_INLINE int SHL32_(opus_int64 a, int shift, char *file, int line) #define VSHR32(a, shift) (((shift)>0) ? SHR32(a, shift) : SHL32(a, -(shift))) #define ROUND16(x,a) (celt_mips--,EXTRACT16(PSHR32((x),(a)))) +#define SROUND16(x,a) (celt_mips--,EXTRACT16(SATURATE(PSHR32(x,a), 32767))); + #define HALF16(x) (SHR16(x,1)) #define HALF32(x) (SHR32(x,1)) -//#define SHR(a,shift) ((a) >> (shift)) -//#define SHL(a,shift) ((a) << (shift)) - #define ADD16(a, b) ADD16_(a, b, __FILE__, __LINE__) static OPUS_INLINE short ADD16_(int a, int b, char *file, int line) { @@ -403,6 +410,51 @@ static OPUS_INLINE short MULT16_16_16(int a, int b) return res; } +/* result fits in 32 bits */ +static OPUS_INLINE int MULT32_32_32(opus_int64 a, opus_int64 b) +{ + opus_int64 res; + if (!VERIFY_INT(a) || !VERIFY_INT(b)) + { + fprintf (stderr, "MULT32_32_32: inputs are not int: %d %d\n", a, b); +#ifdef FIXED_DEBUG_ASSERT + celt_assert(0); +#endif + } + res = a*b; + if (!VERIFY_INT(res)) + { + fprintf (stderr, "MULT32_32_32: output is not int: %d\n", res); +#ifdef FIXED_DEBUG_ASSERT + celt_assert(0); +#endif + } + celt_mips+=5; + return res; +} + +static OPUS_INLINE int MULT32_32_Q16(opus_int64 a, opus_int64 b) +{ + opus_int64 res; + if (!VERIFY_INT(a) || !VERIFY_INT(b)) + { + fprintf (stderr, "MULT32_32_Q16: inputs are not int: %d %d\n", a, b); +#ifdef FIXED_DEBUG_ASSERT + celt_assert(0); +#endif + } + res = ((opus_int64)(a)*(opus_int64)(b)) >> 16; + if (!VERIFY_INT(res)) + { + fprintf (stderr, "MULT32_32_Q16: output is not int: %d*%d=%d\n", a, b, (int)res); +#ifdef FIXED_DEBUG_ASSERT + celt_assert(0); +#endif + } + celt_mips+=5; + return res; +} + #define MULT16_16(a, b) MULT16_16_(a, b, __FILE__, __LINE__) static OPUS_INLINE int MULT16_16_(int a, int b, char *file, int line) { diff --git a/libs/libopus/celt/fixed_generic.h b/libs/libopus/celt/fixed_generic.h index 1cfd6d698..8f29d46bb 100644 --- a/libs/libopus/celt/fixed_generic.h +++ b/libs/libopus/celt/fixed_generic.h @@ -57,6 +57,13 @@ #define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),((b)&0x0000ffff)),15)) #endif +/** 32x32 multiplication, followed by a 16-bit shift right. Results fits in 32 bits */ +#if OPUS_FAST_INT64 +#define MULT32_32_Q16(a,b) ((opus_val32)SHR((opus_int64)(a)*(opus_int64)(b),16)) +#else +#define MULT32_32_Q16(a,b) (ADD32(ADD32(ADD32((opus_val32)(SHR32(((opus_uint32)((a)&0x0000ffff)*(opus_uint32)((b)&0x0000ffff)),16)), MULT16_16SU(SHR32(a,16),((b)&0x0000ffff))), MULT16_16SU(SHR32(b,16),((a)&0x0000ffff))), SHL32(MULT16_16(SHR32(a,16),SHR32(b,16)),16))) +#endif + /** 32x32 multiplication, followed by a 31-bit shift right. Results fits in 32 bits */ #if OPUS_FAST_INT64 #define MULT32_32_Q31(a,b) ((opus_val32)SHR((opus_int64)(a)*(opus_int64)(b),31)) @@ -102,8 +109,11 @@ #define SATURATE16(x) (EXTRACT16((x)>32767 ? 32767 : (x)<-32768 ? -32768 : (x))) -/** Shift by a and round-to-neareast 32-bit value. Result is a 16-bit value */ +/** Shift by a and round-to-nearest 32-bit value. Result is a 16-bit value */ #define ROUND16(x,a) (EXTRACT16(PSHR32((x),(a)))) +/** Shift by a and round-to-nearest 32-bit value. Result is a saturated 16-bit value */ +#define SROUND16(x,a) EXTRACT16(SATURATE(PSHR32(x,a), 32767)); + /** Divide by two */ #define HALF16(x) (SHR16(x,1)) #define HALF32(x) (SHR32(x,1)) @@ -117,9 +127,20 @@ /** Subtract two 32-bit values */ #define SUB32(a,b) ((opus_val32)(a)-(opus_val32)(b)) +/** Add two 32-bit values, ignore any overflows */ +#define ADD32_ovflw(a,b) ((opus_val32)((opus_uint32)(a)+(opus_uint32)(b))) +/** Subtract two 32-bit values, ignore any overflows */ +#define SUB32_ovflw(a,b) ((opus_val32)((opus_uint32)(a)-(opus_uint32)(b))) +/* Avoid MSVC warning C4146: unary minus operator applied to unsigned type */ +/** Negate 32-bit value, ignore any overflows */ +#define NEG32_ovflw(a) ((opus_val32)(0-(opus_uint32)(a))) + /** 16x16 multiplication where the result fits in 16 bits */ #define MULT16_16_16(a,b) ((((opus_val16)(a))*((opus_val16)(b)))) +/** 32x32 multiplication where the result fits in 32 bits */ +#define MULT32_32_32(a,b) ((((opus_val32)(a))*((opus_val32)(b)))) + /* (opus_val32)(opus_val16) gives TI compiler a hint that it's 16x16->32 multiply */ /** 16x16 multiplication where the result fits in 32 bits */ #define MULT16_16(a,b) (((opus_val32)(opus_val16)(a))*((opus_val32)(opus_val16)(b))) diff --git a/libs/libopus/celt/float_cast.h b/libs/libopus/celt/float_cast.h index ed5a39b54..9d34976ee 100644 --- a/libs/libopus/celt/float_cast.h +++ b/libs/libopus/celt/float_cast.h @@ -61,7 +61,45 @@ ** the config.h file. */ -#if (HAVE_LRINTF) +/* With GCC, when SSE is available, the fastest conversion is cvtss2si. */ +#if defined(__GNUC__) && defined(__SSE__) + +#include +static OPUS_INLINE opus_int32 float2int(float x) {return _mm_cvt_ss2si(_mm_set_ss(x));} + +#elif (defined(_MSC_VER) && _MSC_VER >= 1400) && (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) + + #include + static OPUS_INLINE opus_int32 float2int(float value) + { + /* _mm_load_ss will generate same code as _mm_set_ss + ** in _MSC_VER >= 1914 /02 so keep __mm_load__ss + ** for backward compatibility. + */ + return _mm_cvtss_si32(_mm_load_ss(&value)); + } + +#elif (defined(_MSC_VER) && _MSC_VER >= 1400) && defined (_M_IX86) + + #include + + /* Win32 doesn't seem to have these functions. + ** Therefore implement OPUS_INLINE versions of these functions here. + */ + + static OPUS_INLINE opus_int32 + float2int (float flt) + { int intgr; + + _asm + { fld flt + fistp intgr + } ; + + return intgr ; + } + +#elif defined(HAVE_LRINTF) /* These defines enable functionality introduced with the 1999 ISO C ** standard. They must be defined before the inclusion of math.h to @@ -90,32 +128,6 @@ #include #define float2int(x) lrint(x) -#elif (defined(_MSC_VER) && _MSC_VER >= 1400) && defined (_M_X64) - #include - - __inline long int float2int(float value) - { - return _mm_cvtss_si32(_mm_load_ss(&value)); - } -#elif (defined(_MSC_VER) && _MSC_VER >= 1400) && defined (_M_IX86) - #include - - /* Win32 doesn't seem to have these functions. - ** Therefore implement OPUS_INLINE versions of these functions here. - */ - - __inline long int - float2int (float flt) - { int intgr; - - _asm - { fld flt - fistp intgr - } ; - - return intgr ; - } - #else #if (defined(__GNUC__) && defined(__STDC__) && __STDC__ && __STDC_VERSION__ >= 199901L) diff --git a/libs/libopus/celt/kiss_fft.c b/libs/libopus/celt/kiss_fft.c index 1f8fd0532..83775165d 100644 --- a/libs/libopus/celt/kiss_fft.c +++ b/libs/libopus/celt/kiss_fft.c @@ -82,8 +82,8 @@ static void kf_bfly2( C_SUB( Fout2[0] , Fout[0] , t ); C_ADDTO( Fout[0] , t ); - t.r = S_MUL(Fout2[1].r+Fout2[1].i, tw); - t.i = S_MUL(Fout2[1].i-Fout2[1].r, tw); + t.r = S_MUL(ADD32_ovflw(Fout2[1].r, Fout2[1].i), tw); + t.i = S_MUL(SUB32_ovflw(Fout2[1].i, Fout2[1].r), tw); C_SUB( Fout2[1] , Fout[1] , t ); C_ADDTO( Fout[1] , t ); @@ -92,8 +92,8 @@ static void kf_bfly2( C_SUB( Fout2[2] , Fout[2] , t ); C_ADDTO( Fout[2] , t ); - t.r = S_MUL(Fout2[3].i-Fout2[3].r, tw); - t.i = S_MUL(-Fout2[3].i-Fout2[3].r, tw); + t.r = S_MUL(SUB32_ovflw(Fout2[3].i, Fout2[3].r), tw); + t.i = S_MUL(NEG32_ovflw(ADD32_ovflw(Fout2[3].i, Fout2[3].r)), tw); C_SUB( Fout2[3] , Fout[3] , t ); C_ADDTO( Fout[3] , t ); Fout += 8; @@ -126,10 +126,10 @@ static void kf_bfly4( C_ADDTO( *Fout , scratch1 ); C_SUB( scratch1 , Fout[1] , Fout[3] ); - Fout[1].r = scratch0.r + scratch1.i; - Fout[1].i = scratch0.i - scratch1.r; - Fout[3].r = scratch0.r - scratch1.i; - Fout[3].i = scratch0.i + scratch1.r; + Fout[1].r = ADD32_ovflw(scratch0.r, scratch1.i); + Fout[1].i = SUB32_ovflw(scratch0.i, scratch1.r); + Fout[3].r = SUB32_ovflw(scratch0.r, scratch1.i); + Fout[3].i = ADD32_ovflw(scratch0.i, scratch1.r); Fout+=4; } } else { @@ -160,10 +160,10 @@ static void kf_bfly4( tw3 += fstride*3; C_ADDTO( *Fout , scratch[3] ); - Fout[m].r = scratch[5].r + scratch[4].i; - Fout[m].i = scratch[5].i - scratch[4].r; - Fout[m3].r = scratch[5].r - scratch[4].i; - Fout[m3].i = scratch[5].i + scratch[4].r; + Fout[m].r = ADD32_ovflw(scratch[5].r, scratch[4].i); + Fout[m].i = SUB32_ovflw(scratch[5].i, scratch[4].r); + Fout[m3].r = SUB32_ovflw(scratch[5].r, scratch[4].i); + Fout[m3].i = ADD32_ovflw(scratch[5].i, scratch[4].r); ++Fout; } } @@ -212,18 +212,18 @@ static void kf_bfly3( tw1 += fstride; tw2 += fstride*2; - Fout[m].r = Fout->r - HALF_OF(scratch[3].r); - Fout[m].i = Fout->i - HALF_OF(scratch[3].i); + Fout[m].r = SUB32_ovflw(Fout->r, HALF_OF(scratch[3].r)); + Fout[m].i = SUB32_ovflw(Fout->i, HALF_OF(scratch[3].i)); C_MULBYSCALAR( scratch[0] , epi3.i ); C_ADDTO(*Fout,scratch[3]); - Fout[m2].r = Fout[m].r + scratch[0].i; - Fout[m2].i = Fout[m].i - scratch[0].r; + Fout[m2].r = ADD32_ovflw(Fout[m].r, scratch[0].i); + Fout[m2].i = SUB32_ovflw(Fout[m].i, scratch[0].r); - Fout[m].r -= scratch[0].i; - Fout[m].i += scratch[0].r; + Fout[m].r = SUB32_ovflw(Fout[m].r, scratch[0].i); + Fout[m].i = ADD32_ovflw(Fout[m].i, scratch[0].r); ++Fout; } while(--k); @@ -282,22 +282,22 @@ static void kf_bfly5( C_ADD( scratch[8],scratch[2],scratch[3]); C_SUB( scratch[9],scratch[2],scratch[3]); - Fout0->r += scratch[7].r + scratch[8].r; - Fout0->i += scratch[7].i + scratch[8].i; + Fout0->r = ADD32_ovflw(Fout0->r, ADD32_ovflw(scratch[7].r, scratch[8].r)); + Fout0->i = ADD32_ovflw(Fout0->i, ADD32_ovflw(scratch[7].i, scratch[8].i)); - scratch[5].r = scratch[0].r + S_MUL(scratch[7].r,ya.r) + S_MUL(scratch[8].r,yb.r); - scratch[5].i = scratch[0].i + S_MUL(scratch[7].i,ya.r) + S_MUL(scratch[8].i,yb.r); + scratch[5].r = ADD32_ovflw(scratch[0].r, ADD32_ovflw(S_MUL(scratch[7].r,ya.r), S_MUL(scratch[8].r,yb.r))); + scratch[5].i = ADD32_ovflw(scratch[0].i, ADD32_ovflw(S_MUL(scratch[7].i,ya.r), S_MUL(scratch[8].i,yb.r))); - scratch[6].r = S_MUL(scratch[10].i,ya.i) + S_MUL(scratch[9].i,yb.i); - scratch[6].i = -S_MUL(scratch[10].r,ya.i) - S_MUL(scratch[9].r,yb.i); + scratch[6].r = ADD32_ovflw(S_MUL(scratch[10].i,ya.i), S_MUL(scratch[9].i,yb.i)); + scratch[6].i = NEG32_ovflw(ADD32_ovflw(S_MUL(scratch[10].r,ya.i), S_MUL(scratch[9].r,yb.i))); C_SUB(*Fout1,scratch[5],scratch[6]); C_ADD(*Fout4,scratch[5],scratch[6]); - scratch[11].r = scratch[0].r + S_MUL(scratch[7].r,yb.r) + S_MUL(scratch[8].r,ya.r); - scratch[11].i = scratch[0].i + S_MUL(scratch[7].i,yb.r) + S_MUL(scratch[8].i,ya.r); - scratch[12].r = - S_MUL(scratch[10].i,yb.i) + S_MUL(scratch[9].i,ya.i); - scratch[12].i = S_MUL(scratch[10].r,yb.i) - S_MUL(scratch[9].r,ya.i); + scratch[11].r = ADD32_ovflw(scratch[0].r, ADD32_ovflw(S_MUL(scratch[7].r,yb.r), S_MUL(scratch[8].r,ya.r))); + scratch[11].i = ADD32_ovflw(scratch[0].i, ADD32_ovflw(S_MUL(scratch[7].i,yb.r), S_MUL(scratch[8].i,ya.r))); + scratch[12].r = SUB32_ovflw(S_MUL(scratch[9].i,ya.i), S_MUL(scratch[10].i,yb.i)); + scratch[12].i = SUB32_ovflw(S_MUL(scratch[10].r,yb.i), S_MUL(scratch[9].r,ya.i)); C_ADD(*Fout2,scratch[11],scratch[12]); C_SUB(*Fout3,scratch[11],scratch[12]); diff --git a/libs/libopus/celt/mathops.c b/libs/libopus/celt/mathops.c index 21a01f52e..6ee9b9e10 100644 --- a/libs/libopus/celt/mathops.c +++ b/libs/libopus/celt/mathops.c @@ -38,7 +38,8 @@ #include "mathops.h" /*Compute floor(sqrt(_val)) with exact arithmetic. - This has been tested on all possible 32-bit inputs.*/ + _val must be greater than 0. + This has been tested on all possible 32-bit inputs greater than 0.*/ unsigned isqrt32(opus_uint32 _val){ unsigned b; unsigned g; @@ -182,7 +183,7 @@ opus_val32 celt_rcp(opus_val32 x) int i; opus_val16 n; opus_val16 r; - celt_assert2(x>0, "celt_rcp() only defined for positive values"); + celt_sig_assert(x>0); i = celt_ilog2(x); /* n is Q15 with range [0,1). */ n = VSHR32(x,i-15)-32768; diff --git a/libs/libopus/celt/mathops.h b/libs/libopus/celt/mathops.h index a0525a961..fe29dac1c 100644 --- a/libs/libopus/celt/mathops.h +++ b/libs/libopus/celt/mathops.h @@ -38,11 +38,44 @@ #include "entcode.h" #include "os_support.h" +#define PI 3.141592653f + /* Multiplies two 16-bit fractional values. Bit-exactness of this macro is important */ #define FRAC_MUL16(a,b) ((16384+((opus_int32)(opus_int16)(a)*(opus_int16)(b)))>>15) unsigned isqrt32(opus_uint32 _val); +/* CELT doesn't need it for fixed-point, by analysis.c does. */ +#if !defined(FIXED_POINT) || defined(ANALYSIS_C) +#define cA 0.43157974f +#define cB 0.67848403f +#define cC 0.08595542f +#define cE ((float)PI/2) +static OPUS_INLINE float fast_atan2f(float y, float x) { + float x2, y2; + x2 = x*x; + y2 = y*y; + /* For very small values, we don't care about the answer, so + we can just return 0. */ + if (x2 + y2 < 1e-18f) + { + return 0; + } + if(x2>23)-127; - in.i -= integer<<23; + in.i -= (opus_uint32)integer<<23; frac = in.f - 1.5f; frac = -0.41445418f + frac*(0.95909232f + frac*(-0.33951290f + frac*0.16541097f)); @@ -128,7 +160,7 @@ static OPUS_INLINE float celt_exp2(float x) /* K0 = 1, K1 = log(2), K2 = 3-4*log(2), K3 = 3*log(2) - 2 */ res.f = 0.99992522f + frac * (0.69583354f + frac * (0.22606716f + 0.078024523f*frac)); - res.i = (res.i + (integer<<23)) & 0x7fffffff; + res.i = (res.i + ((opus_uint32)integer<<23)) & 0x7fffffff; return res.f; } @@ -147,7 +179,7 @@ static OPUS_INLINE float celt_exp2(float x) /** Integer log in base2. Undefined for zero and negative numbers */ static OPUS_INLINE opus_int16 celt_ilog2(opus_int32 x) { - celt_assert2(x>0, "celt_ilog2() only defined for strictly positive numbers"); + celt_sig_assert(x>0); return EC_ILOG(x)-1; } #endif diff --git a/libs/libopus/celt/mdct.c b/libs/libopus/celt/mdct.c index 5315ad11a..5c6dab5b7 100644 --- a/libs/libopus/celt/mdct.c +++ b/libs/libopus/celt/mdct.c @@ -270,8 +270,8 @@ void clt_mdct_backward_c(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_sca int rev; kiss_fft_scalar yr, yi; rev = *bitrev++; - yr = S_MUL(*xp2, t[i]) + S_MUL(*xp1, t[N4+i]); - yi = S_MUL(*xp1, t[i]) - S_MUL(*xp2, t[N4+i]); + yr = ADD32_ovflw(S_MUL(*xp2, t[i]), S_MUL(*xp1, t[N4+i])); + yi = SUB32_ovflw(S_MUL(*xp1, t[i]), S_MUL(*xp2, t[N4+i])); /* We swap real and imag because we use an FFT instead of an IFFT. */ yp[2*rev+1] = yr; yp[2*rev] = yi; @@ -301,8 +301,8 @@ void clt_mdct_backward_c(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_sca t0 = t[i]; t1 = t[N4+i]; /* We'd scale up by 2 here, but instead it's done when mixing the windows */ - yr = S_MUL(re,t0) + S_MUL(im,t1); - yi = S_MUL(re,t1) - S_MUL(im,t0); + yr = ADD32_ovflw(S_MUL(re,t0), S_MUL(im,t1)); + yi = SUB32_ovflw(S_MUL(re,t1), S_MUL(im,t0)); /* We swap real and imag because we're using an FFT instead of an IFFT. */ re = yp1[1]; im = yp1[0]; @@ -312,8 +312,8 @@ void clt_mdct_backward_c(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_sca t0 = t[(N4-i-1)]; t1 = t[(N2-i-1)]; /* We'd scale up by 2 here, but instead it's done when mixing the windows */ - yr = S_MUL(re,t0) + S_MUL(im,t1); - yi = S_MUL(re,t1) - S_MUL(im,t0); + yr = ADD32_ovflw(S_MUL(re,t0), S_MUL(im,t1)); + yi = SUB32_ovflw(S_MUL(re,t1), S_MUL(im,t0)); yp1[0] = yr; yp0[1] = yi; yp0 += 2; @@ -333,8 +333,8 @@ void clt_mdct_backward_c(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_sca kiss_fft_scalar x1, x2; x1 = *xp1; x2 = *yp1; - *yp1++ = MULT16_32_Q15(*wp2, x2) - MULT16_32_Q15(*wp1, x1); - *xp1-- = MULT16_32_Q15(*wp1, x2) + MULT16_32_Q15(*wp2, x1); + *yp1++ = SUB32_ovflw(MULT16_32_Q15(*wp2, x2), MULT16_32_Q15(*wp1, x1)); + *xp1-- = ADD32_ovflw(MULT16_32_Q15(*wp1, x2), MULT16_32_Q15(*wp2, x1)); wp1++; wp2--; } diff --git a/libs/libopus/celt/mips/celt_mipsr1.h b/libs/libopus/celt/mips/celt_mipsr1.h index e85661a66..c332fe047 100644 --- a/libs/libopus/celt/mips/celt_mipsr1.h +++ b/libs/libopus/celt/mips/celt_mipsr1.h @@ -53,6 +53,7 @@ #include "celt_lpc.h" #include "vq.h" +#define OVERRIDE_COMB_FILTER_CONST #define OVERRIDE_comb_filter void comb_filter(opus_val32 *y, opus_val32 *x, int T0, int T1, int N, opus_val16 g0, opus_val16 g1, int tapset0, int tapset1, diff --git a/libs/libopus/celt/mips/vq_mipsr1.h b/libs/libopus/celt/mips/vq_mipsr1.h index 54cef8613..f26a33e75 100644 --- a/libs/libopus/celt/mips/vq_mipsr1.h +++ b/libs/libopus/celt/mips/vq_mipsr1.h @@ -36,11 +36,6 @@ #include "mathops.h" #include "arch.h" -static unsigned extract_collapse_mask(int *iy, int N, int B); -static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X, int N, opus_val32 Ryy, opus_val16 gain); -static void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread); -static void renormalise_vector_mips(celt_norm *X, int N, opus_val16 gain, int arch); - #define OVERRIDE_vq_exp_rotation1 static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) { @@ -69,11 +64,7 @@ static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_ } #define OVERRIDE_renormalise_vector - -#define renormalise_vector(X, N, gain, arch) \ - (renormalise_vector_mips(X, N, gain, arch)) - -void renormalise_vector_mips(celt_norm *X, int N, opus_val16 gain, int arch) +void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch) { int i; #ifdef FIXED_POINT diff --git a/libs/libopus/celt/modes.c b/libs/libopus/celt/modes.c index 911686e90..390c5e8ae 100644 --- a/libs/libopus/celt/modes.c +++ b/libs/libopus/celt/modes.c @@ -427,7 +427,7 @@ void opus_custom_mode_destroy(CELTMode *mode) } #endif /* CUSTOM_MODES_ONLY */ opus_free((opus_int16*)mode->eBands); - opus_free((opus_int16*)mode->allocVectors); + opus_free((unsigned char*)mode->allocVectors); opus_free((opus_val16*)mode->window); opus_free((opus_int16*)mode->logN); diff --git a/libs/libopus/celt/os_support.h b/libs/libopus/celt/os_support.h index a2171971e..009bf861d 100644 --- a/libs/libopus/celt/os_support.h +++ b/libs/libopus/celt/os_support.h @@ -39,7 +39,6 @@ #include "opus_defines.h" #include -#include #include /** Opus wrapper for malloc(). To do your own dynamic allocation, all you need to do is replace this function and opus_free */ diff --git a/libs/libopus/celt/pitch.c b/libs/libopus/celt/pitch.c index bf46e7d56..872582a48 100644 --- a/libs/libopus/celt/pitch.c +++ b/libs/libopus/celt/pitch.c @@ -102,11 +102,9 @@ static void find_best_pitch(opus_val32 *xcorr, opus_val16 *y, int len, } } -static void celt_fir5(const opus_val16 *x, +static void celt_fir5(opus_val16 *x, const opus_val16 *num, - opus_val16 *y, - int N, - opus_val16 *mem) + int N) { int i; opus_val16 num0, num1, num2, num3, num4; @@ -116,11 +114,11 @@ static void celt_fir5(const opus_val16 *x, num2=num[2]; num3=num[3]; num4=num[4]; - mem0=mem[0]; - mem1=mem[1]; - mem2=mem[2]; - mem3=mem[3]; - mem4=mem[4]; + mem0=0; + mem1=0; + mem2=0; + mem3=0; + mem4=0; for (i=0;i>1, mem); + celt_fir5(x_lp, lpc2, len>>1); } /* Pure C implementation. */ @@ -220,13 +213,8 @@ opus_val32 #else void #endif -#if defined(OVERRIDE_PITCH_XCORR) celt_pitch_xcorr_c(const opus_val16 *_x, const opus_val16 *_y, - opus_val32 *xcorr, int len, int max_pitch) -#else -celt_pitch_xcorr(const opus_val16 *_x, const opus_val16 *_y, opus_val32 *xcorr, int len, int max_pitch, int arch) -#endif { #if 0 /* This is a simple version of the pitch correlation that should work @@ -261,15 +249,11 @@ celt_pitch_xcorr(const opus_val16 *_x, const opus_val16 *_y, opus_val32 maxcorr=1; #endif celt_assert(max_pitch>0); - celt_assert((((unsigned char *)_x-(unsigned char *)NULL)&3)==0); + celt_sig_assert((((unsigned char *)_x-(unsigned char *)NULL)&3)==0); for (i=0;i>1;j++) sum += SHR32(MULT16_16(x_lp[j],y[i+j]), shift); #else - sum = celt_inner_prod_c(x_lp, y+i, len>>1); + sum = celt_inner_prod(x_lp, y+i, len>>1, arch); #endif xcorr[i] = MAX32(-1, sum); #ifdef FIXED_POINT @@ -424,7 +404,7 @@ static opus_val16 compute_pitch_gain(opus_val32 xy, opus_val32 xx, opus_val32 yy sx = celt_ilog2(xx)-14; sy = celt_ilog2(yy)-14; shift = sx + sy; - x2y2 = MULT16_16_Q14(VSHR32(xx, sx), VSHR32(yy, sy)); + x2y2 = SHR32(MULT16_16(VSHR32(xx, sx), VSHR32(yy, sy)), 14); if (shift & 1) { if (x2y2 < 32768) { diff --git a/libs/libopus/celt/pitch.h b/libs/libopus/celt/pitch.h index d3503532a..e425f56ae 100644 --- a/libs/libopus/celt/pitch.h +++ b/libs/libopus/celt/pitch.h @@ -46,8 +46,7 @@ #include "mips/pitch_mipsr1.h" #endif -#if ((defined(OPUS_ARM_ASM) && defined(FIXED_POINT)) \ - || defined(OPUS_ARM_MAY_HAVE_NEON_INTR)) +#if (defined(OPUS_ARM_ASM) || defined(OPUS_ARM_MAY_HAVE_NEON_INTR)) # include "arm/pitch_arm.h" #endif @@ -184,17 +183,10 @@ opus_val32 void #endif celt_pitch_xcorr_c(const opus_val16 *_x, const opus_val16 *_y, - opus_val32 *xcorr, int len, int max_pitch); - -#if !defined(OVERRIDE_PITCH_XCORR) -#ifdef FIXED_POINT -opus_val32 -#else -void -#endif -celt_pitch_xcorr(const opus_val16 *_x, const opus_val16 *_y, opus_val32 *xcorr, int len, int max_pitch, int arch); +#ifndef OVERRIDE_PITCH_XCORR +# define celt_pitch_xcorr celt_pitch_xcorr_c #endif #endif diff --git a/libs/libopus/celt/quant_bands.c b/libs/libopus/celt/quant_bands.c index 95076e0af..39a221eda 100644 --- a/libs/libopus/celt/quant_bands.c +++ b/libs/libopus/celt/quant_bands.c @@ -418,6 +418,7 @@ void quant_energy_finalise(const CELTMode *m, int start, int end, opus_val16 *ol offset = (q2-.5f)*(1<<(14-fine_quant[i]-1))*(1.f/16384); #endif oldEBands[i+c*m->nbEBands] += offset; + error[i+c*m->nbEBands] -= offset; bits_left--; } while (++c < C); } @@ -456,7 +457,7 @@ void unquant_coarse_energy(const CELTMode *m, int start, int end, opus_val16 *ol /* It would be better to express this invariant as a test on C at function entry, but that isn't enough to make the static analyzer happy. */ - celt_assert(c<2); + celt_sig_assert(c<2); tell = ec_tell(dec); if(budget-tell>=15) { @@ -547,9 +548,15 @@ void amp2Log2(const CELTMode *m, int effEnd, int end, c=0; do { for (i=0;inbEBands] = - celt_log2(SHL32(bandE[i+c*m->nbEBands],2)) + celt_log2(bandE[i+c*m->nbEBands]) - SHL16((opus_val16)eMeans[i],6); +#ifdef FIXED_POINT + /* Compensate for bandE[] being Q12 but celt_log2() taking a Q14 input. */ + bandLogE[i+c*m->nbEBands] += QCONST16(2.f, DB_SHIFT); +#endif + } for (i=effEnd;inbEBands+i] = -QCONST16(14.f,DB_SHIFT); } while (++c < C); diff --git a/libs/libopus/celt/rate.c b/libs/libopus/celt/rate.c index 9062ff750..465e1ba26 100644 --- a/libs/libopus/celt/rate.c +++ b/libs/libopus/celt/rate.c @@ -348,12 +348,17 @@ static OPUS_INLINE int interp_bits2pulses(const CELTMode *m, int start, int end, /*This if() block is the only part of the allocation function that is not a mandatory part of the bitstream: any bands we choose to skip here must be explicitly signaled.*/ - /*Choose a threshold with some hysteresis to keep bands from - fluctuating in and out.*/ + int depth_threshold; + /*We choose a threshold with some hysteresis to keep bands from + fluctuating in and out, but we try not to fold below a certain point. */ + if (codedBands > 17) + depth_threshold = j ((j>4 && j<=signalBandwidth)) + if (codedBands<=start+2 || (band_bits > (depth_threshold*band_width<>4 && j<=signalBandwidth)) #endif { ec_enc_bit_logp(ec, 1, 1); @@ -524,7 +529,7 @@ static OPUS_INLINE int interp_bits2pulses(const CELTMode *m, int start, int end, return codedBands; } -int compute_allocation(const CELTMode *m, int start, int end, const int *offsets, const int *cap, int alloc_trim, int *intensity, int *dual_stereo, +int clt_compute_allocation(const CELTMode *m, int start, int end, const int *offsets, const int *cap, int alloc_trim, int *intensity, int *dual_stereo, opus_int32 total, opus_int32 *balance, int *pulses, int *ebits, int *fine_priority, int C, int LM, ec_ctx *ec, int encode, int prev, int signalBandwidth) { int lo, hi, len, j; @@ -636,3 +641,4 @@ int compute_allocation(const CELTMode *m, int start, int end, const int *offsets RESTORE_STACK; return codedBands; } + diff --git a/libs/libopus/celt/rate.h b/libs/libopus/celt/rate.h index 515f7687c..fad5e412d 100644 --- a/libs/libopus/celt/rate.h +++ b/libs/libopus/celt/rate.h @@ -95,7 +95,7 @@ static OPUS_INLINE int pulses2bits(const CELTMode *m, int band, int LM, int puls @param pulses Number of pulses per band (returned) @return Total number of bits allocated */ -int compute_allocation(const CELTMode *m, int start, int end, const int *offsets, const int *cap, int alloc_trim, int *intensity, int *dual_stero, +int clt_compute_allocation(const CELTMode *m, int start, int end, const int *offsets, const int *cap, int alloc_trim, int *intensity, int *dual_stereo, opus_int32 total, opus_int32 *balance, int *pulses, int *ebits, int *fine_priority, int C, int LM, ec_ctx *ec, int encode, int prev, int signalBandwidth); #endif diff --git a/libs/libopus/celt/stack_alloc.h b/libs/libopus/celt/stack_alloc.h index 2b51c8d80..ae40e2a16 100644 --- a/libs/libopus/celt/stack_alloc.h +++ b/libs/libopus/celt/stack_alloc.h @@ -40,7 +40,7 @@ #endif #ifdef USE_ALLOCA -# ifdef WIN32 +# ifdef _WIN32 # include # else # ifdef HAVE_ALLOCA_H @@ -102,7 +102,7 @@ #define VARDECL(type, var) type *var -# ifdef WIN32 +# ifdef _WIN32 # define ALLOC(var, size, type) var = ((type*)_alloca(sizeof(type)*(size))) # else # define ALLOC(var, size, type) var = ((type*)alloca(sizeof(type)*(size))) diff --git a/libs/libopus/celt/static_modes_fixed_arm_ne10.h b/libs/libopus/celt/static_modes_fixed_arm_ne10.h index b8ef0cee9..762309219 100644 --- a/libs/libopus/celt/static_modes_fixed_arm_ne10.h +++ b/libs/libopus/celt/static_modes_fixed_arm_ne10.h @@ -1,7 +1,7 @@ /* The contents of this file was automatically generated by * dump_mode_arm_ne10.c with arguments: 48000 960 * It contains static definitions for some pre-defined modes. */ -#include +#include #ifndef NE10_FFT_PARAMS48000_960 #define NE10_FFT_PARAMS48000_960 diff --git a/libs/libopus/celt/static_modes_float_arm_ne10.h b/libs/libopus/celt/static_modes_float_arm_ne10.h index 934a82a42..66e1abb10 100644 --- a/libs/libopus/celt/static_modes_float_arm_ne10.h +++ b/libs/libopus/celt/static_modes_float_arm_ne10.h @@ -1,7 +1,7 @@ /* The contents of this file was automatically generated by * dump_mode_arm_ne10.c with arguments: 48000 960 * It contains static definitions for some pre-defined modes. */ -#include +#include #ifndef NE10_FFT_PARAMS48000_960 #define NE10_FFT_PARAMS48000_960 diff --git a/libs/libopus/celt/vq.c b/libs/libopus/celt/vq.c index d29f38fd8..8011e2254 100644 --- a/libs/libopus/celt/vq.c +++ b/libs/libopus/celt/vq.c @@ -39,6 +39,10 @@ #include "rate.h" #include "pitch.h" +#if defined(MIPSr1_ASM) +#include "mips/vq_mipsr1.h" +#endif + #ifndef OVERRIDE_vq_exp_rotation1 static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) { @@ -67,7 +71,7 @@ static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_ } #endif /* OVERRIDE_vq_exp_rotation1 */ -static void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) +void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) { static const int SPREAD_FACTOR[3]={15,10,5}; int i; @@ -158,42 +162,27 @@ static unsigned extract_collapse_mask(int *iy, int N, int B) return collapse_mask; } -unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc -#ifdef RESYNTH - , opus_val16 gain -#endif - ) +opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch) { VARDECL(celt_norm, y); - VARDECL(int, iy); - VARDECL(opus_val16, signx); + VARDECL(int, signx); int i, j; - opus_val16 s; int pulsesLeft; opus_val32 sum; opus_val32 xy; opus_val16 yy; - unsigned collapse_mask; SAVE_STACK; - celt_assert2(K>0, "alg_quant() needs at least one pulse"); - celt_assert2(N>1, "alg_quant() needs at least two dimensions"); - + (void)arch; ALLOC(y, N, celt_norm); - ALLOC(iy, N, int); - ALLOC(signx, N, opus_val16); - - exp_rotation(X, N, 1, B, K, spread); + ALLOC(signx, N, int); /* Get rid of the sign */ sum = 0; j=0; do { - if (X[j]>0) - signx[j]=1; - else { - signx[j]=-1; - X[j]=-X[j]; - } + signx[j] = X[j]<0; + /* OPT: Make sure the compiler doesn't use a branch on ABS16(). */ + X[j] = ABS16(X[j]); iy[j] = 0; y[j] = 0; } while (++j=1, "Allocated too many pulses in the quick pass"); + celt_sig_assert(pulsesLeft>=0); /* This should never happen, but just in case it does (e.g. on silence) we fill the first bin with pulses. */ #ifdef FIXED_POINT_DEBUG - celt_assert2(pulsesLeft<=N+3, "Not enough pulses in the quick pass"); + celt_sig_assert(pulsesLeft<=N+3); #endif if (pulsesLeft > N+3) { @@ -256,12 +250,12 @@ unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc pulsesLeft=0; } - s = 1; for (i=0;i= best_num/best_den, but that way we can do it without any division */ - /* OPT: Make sure to use conditional moves here */ - if (MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num)) + /* OPT: It's not clear whether a cmov is faster than a branch here + since the condition is more often false than true and using + a cmov introduces data dependencies across iterations. The optimal + choice may be architecture-dependent. */ + if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num))) { best_den = Ryy; best_num = Rxy; @@ -301,23 +311,47 @@ unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc /* Only now that we've made the final choice, update y/iy */ /* Multiplying y[j] by 2 so we don't have to do it everywhere else */ - y[best_id] += 2*s; + y[best_id] += 2; iy[best_id]++; } /* Put the original sign back */ j=0; do { - X[j] = MULT16_16(signx[j],X[j]); - if (signx[j] < 0) - iy[j] = -iy[j]; + /*iy[j] = signx[j] ? -iy[j] : iy[j];*/ + /* OPT: The is more likely to be compiled without a branch than the code above + but has the same performance otherwise. */ + iy[j] = (iy[j]^-signx[j]) + signx[j]; } while (++j0, "alg_quant() needs at least one pulse"); + celt_assert2(N>1, "alg_quant() needs at least two dimensions"); + + /* Covers vectorization by up to 4. */ + ALLOC(iy, N+3, int); + + exp_rotation(X, N, 1, B, K, spread); + + yy = op_pvq_search(X, iy, K, N, arch); + encode_pulses(iy, N, K, enc); -#ifdef RESYNTH - normalise_residual(iy, X, N, yy, gain); - exp_rotation(X, N, -1, B, K, spread); -#endif + if (resynth) + { + normalise_residual(iy, X, N, yy, gain); + exp_rotation(X, N, -1, B, K, spread); + } collapse_mask = extract_collapse_mask(iy, N, B); RESTORE_STACK; @@ -401,7 +435,7 @@ int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int /* 0.63662 = 2/pi */ itheta = MULT16_16_Q15(QCONST16(0.63662f,15),celt_atan2p(side, mid)); #else - itheta = (int)floor(.5f+16384*0.63662f*atan2(side,mid)); + itheta = (int)floor(.5f+16384*0.63662f*fast_atan2f(side,mid)); #endif return itheta; diff --git a/libs/libopus/celt/vq.h b/libs/libopus/celt/vq.h index 5cfcbe50e..45ec55918 100644 --- a/libs/libopus/celt/vq.h +++ b/libs/libopus/celt/vq.h @@ -37,10 +37,18 @@ #include "entdec.h" #include "modes.h" -#if defined(MIPSr1_ASM) -#include "mips/vq_mipsr1.h" +#if (defined(OPUS_X86_MAY_HAVE_SSE2) && !defined(FIXED_POINT)) +#include "x86/vq_sse.h" #endif +void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread); + +opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch); + +#if !defined(OVERRIDE_OP_PVQ_SEARCH) +#define op_pvq_search(x, iy, K, N, arch) \ + (op_pvq_search_c(x, iy, K, N, arch)) +#endif /** Algebraic pulse-vector quantiser. The signal x is replaced by the sum of * the pitch and a combination of pulses such that its norm is still equal @@ -51,12 +59,8 @@ * @param enc Entropy encoder state * @ret A mask indicating which blocks in the band received pulses */ -unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, - ec_enc *enc -#ifdef RESYNTH - , opus_val16 gain -#endif - ); +unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc, + opus_val16 gain, int resynth, int arch); /** Algebraic pulse decoder * @param X Decoded normalised spectrum (returned) diff --git a/libs/libopus/celt/x86/celt_lpc_sse.c b/libs/libopus/celt/x86/celt_lpc_sse.c deleted file mode 100644 index 67e5592ac..000000000 --- a/libs/libopus/celt/x86/celt_lpc_sse.c +++ /dev/null @@ -1,132 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "celt_lpc.h" -#include "stack_alloc.h" -#include "mathops.h" -#include "pitch.h" -#include "x86cpu.h" - -#if defined(FIXED_POINT) - -void celt_fir_sse4_1(const opus_val16 *_x, - const opus_val16 *num, - opus_val16 *_y, - int N, - int ord, - opus_val16 *mem, - int arch) -{ - int i,j; - VARDECL(opus_val16, rnum); - VARDECL(opus_val16, x); - - __m128i vecNoA; - opus_int32 noA ; - SAVE_STACK; - - ALLOC(rnum, ord, opus_val16); - ALLOC(x, N+ord, opus_val16); - for(i=0;i> 1; - vecNoA = _mm_set_epi32(noA, noA, noA, noA); - - for (i=0;i +#include +#include +#include "celt_lpc.h" +#include "stack_alloc.h" +#include "mathops.h" +#include "pitch.h" +#include "x86cpu.h" + +#if defined(FIXED_POINT) + +void celt_fir_sse4_1(const opus_val16 *x, + const opus_val16 *num, + opus_val16 *y, + int N, + int ord, + int arch) +{ + int i,j; + VARDECL(opus_val16, rnum); + + __m128i vecNoA; + opus_int32 noA ; + SAVE_STACK; + + ALLOC(rnum, ord, opus_val16); + for(i=0;i> 1; + vecNoA = _mm_set_epi32(noA, noA, noA, noA); + + for (i=0;i= 3); sum0 = _mm_setzero_si128(); @@ -177,19 +185,56 @@ void xcorr_kernel_sse4_1(const opus_val16 * x, const opus_val16 * y, opus_val32 vecSum = _mm_add_epi32(vecSum, sum2); } - for (;j +#include +#include "celt_lpc.h" +#include "stack_alloc.h" +#include "mathops.h" +#include "vq.h" +#include "x86cpu.h" + + +#ifndef FIXED_POINT + +opus_val16 op_pvq_search_sse2(celt_norm *_X, int *iy, int K, int N, int arch) +{ + int i, j; + int pulsesLeft; + float xy, yy; + VARDECL(celt_norm, y); + VARDECL(celt_norm, X); + VARDECL(float, signy); + __m128 signmask; + __m128 sums; + __m128i fours; + SAVE_STACK; + + (void)arch; + /* All bits set to zero, except for the sign bit. */ + signmask = _mm_set_ps1(-0.f); + fours = _mm_set_epi32(4, 4, 4, 4); + ALLOC(y, N+3, celt_norm); + ALLOC(X, N+3, celt_norm); + ALLOC(signy, N+3, float); + + OPUS_COPY(X, _X, N); + X[N] = X[N+1] = X[N+2] = 0; + sums = _mm_setzero_ps(); + for (j=0;j (N>>1)) + { + __m128i pulses_sum; + __m128 yy4, xy4; + __m128 rcp4; + opus_val32 sum = _mm_cvtss_f32(sums); + /* If X is too small, just replace it with a pulse at 0 */ + /* Prevents infinities and NaNs from causing too many pulses + to be allocated. 64 is an approximation of infinity here. */ + if (!(sum > EPSILON && sum < 64)) + { + X[0] = QCONST16(1.f,14); + j=1; do + X[j]=0; + while (++j=0); + + /* This should never happen, but just in case it does (e.g. on silence) + we fill the first bin with pulses. */ + if (pulsesLeft > N+3) + { + opus_val16 tmp = (opus_val16)pulsesLeft; + yy = MAC16_16(yy, tmp, tmp); + yy = MAC16_16(yy, tmp, y[0]); + iy[0] += pulsesLeft; + pulsesLeft=0; + } + + for (i=0;ichar*: Opus packet diff --git a/libs/libopus/include/opus_custom.h b/libs/libopus/include/opus_custom.h index 41f36bf2f..2227be011 100644 --- a/libs/libopus/include/opus_custom.h +++ b/libs/libopus/include/opus_custom.h @@ -178,7 +178,7 @@ OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT OpusCustomEncoder *opus_custom_encode ) OPUS_ARG_NONNULL(1); -/** Destroys a an encoder state. +/** Destroys an encoder state. * @param[in] st OpusCustomEncoder*: State to be freed. */ OPUS_CUSTOM_EXPORT void opus_custom_encoder_destroy(OpusCustomEncoder *st); @@ -286,7 +286,7 @@ OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT OpusCustomDecoder *opus_custom_decode int *error ) OPUS_ARG_NONNULL(1); -/** Destroys a an decoder state. +/** Destroys a decoder state. * @param[in] st OpusCustomDecoder*: State to be freed. */ OPUS_CUSTOM_EXPORT void opus_custom_decoder_destroy(OpusCustomDecoder *st); diff --git a/libs/libopus/include/opus_defines.h b/libs/libopus/include/opus_defines.h index 315412dd1..ceee5b840 100644 --- a/libs/libopus/include/opus_defines.h +++ b/libs/libopus/include/opus_defines.h @@ -64,7 +64,7 @@ extern "C" { /**Export control for opus functions */ #ifndef OPUS_EXPORT -# if defined(WIN32) +# if defined(_WIN32) # if defined(OPUS_BUILD) && defined(DLL_EXPORT) # define OPUS_EXPORT __declspec(dllexport) # else @@ -165,8 +165,13 @@ extern "C" { #define OPUS_GET_EXPERT_FRAME_DURATION_REQUEST 4041 #define OPUS_SET_PREDICTION_DISABLED_REQUEST 4042 #define OPUS_GET_PREDICTION_DISABLED_REQUEST 4043 - /* Don't use 4045, it's already taken by OPUS_GET_GAIN_REQUEST */ +#define OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST 4046 +#define OPUS_GET_PHASE_INVERSION_DISABLED_REQUEST 4047 +#define OPUS_GET_IN_DTX_REQUEST 4049 + +/** Defines for the presence of extended APIs. */ +#define OPUS_HAVE_OPUS_PROJECTION_H /* Macros to trigger compilation errors when the wrong types are provided to a CTL */ #define __opus_check_int(x) (((void)((x) == (opus_int32)0)), (opus_int32)(x)) @@ -208,6 +213,9 @@ extern "C" { #define OPUS_FRAMESIZE_20_MS 5004 /**< Use 20 ms frames */ #define OPUS_FRAMESIZE_40_MS 5005 /**< Use 40 ms frames */ #define OPUS_FRAMESIZE_60_MS 5006 /**< Use 60 ms frames */ +#define OPUS_FRAMESIZE_80_MS 5007 /**< Use 80 ms frames */ +#define OPUS_FRAMESIZE_100_MS 5008 /**< Use 100 ms frames */ +#define OPUS_FRAMESIZE_120_MS 5009 /**< Use 120 ms frames */ /**@}*/ @@ -566,7 +574,9 @@ extern "C" { *
OPUS_FRAMESIZE_20_MS
Use 20 ms frames.
*
OPUS_FRAMESIZE_40_MS
Use 40 ms frames.
*
OPUS_FRAMESIZE_60_MS
Use 60 ms frames.
- *
OPUS_FRAMESIZE_VARIABLE
Optimize the frame size dynamically.
+ *
OPUS_FRAMESIZE_80_MS
Use 80 ms frames.
+ *
OPUS_FRAMESIZE_100_MS
Use 100 ms frames.
+ *
OPUS_FRAMESIZE_120_MS
Use 120 ms frames.
* * @hideinitializer */ #define OPUS_SET_EXPERT_FRAME_DURATION(x) OPUS_SET_EXPERT_FRAME_DURATION_REQUEST, __opus_check_int(x) @@ -581,7 +591,9 @@ extern "C" { *
OPUS_FRAMESIZE_20_MS
Use 20 ms frames.
*
OPUS_FRAMESIZE_40_MS
Use 40 ms frames.
*
OPUS_FRAMESIZE_60_MS
Use 60 ms frames.
- *
OPUS_FRAMESIZE_VARIABLE
Optimize the frame size dynamically.
+ *
OPUS_FRAMESIZE_80_MS
Use 80 ms frames.
+ *
OPUS_FRAMESIZE_100_MS
Use 100 ms frames.
+ *
OPUS_FRAMESIZE_120_MS
Use 120 ms frames.
* * @hideinitializer */ #define OPUS_GET_EXPERT_FRAME_DURATION(x) OPUS_GET_EXPERT_FRAME_DURATION_REQUEST, __opus_check_int_ptr(x) @@ -681,6 +693,40 @@ extern "C" { */ #define OPUS_GET_SAMPLE_RATE(x) OPUS_GET_SAMPLE_RATE_REQUEST, __opus_check_int_ptr(x) +/** If set to 1, disables the use of phase inversion for intensity stereo, + * improving the quality of mono downmixes, but slightly reducing normal + * stereo quality. Disabling phase inversion in the decoder does not comply + * with RFC 6716, although it does not cause any interoperability issue and + * is expected to become part of the Opus standard once RFC 6716 is updated + * by draft-ietf-codec-opus-update. + * @see OPUS_GET_PHASE_INVERSION_DISABLED + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Enable phase inversion (default).
+ *
1
Disable phase inversion.
+ *
+ * @hideinitializer */ +#define OPUS_SET_PHASE_INVERSION_DISABLED(x) OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured phase inversion status. + * @see OPUS_SET_PHASE_INVERSION_DISABLED + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Stereo phase inversion enabled (default).
+ *
1
Stereo phase inversion disabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_PHASE_INVERSION_DISABLED(x) OPUS_GET_PHASE_INVERSION_DISABLED_REQUEST, __opus_check_int_ptr(x) +/** Gets the DTX state of the encoder. + * Returns whether the last encoded frame was either a comfort noise update + * during DTX or not encoded because of DTX. + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
The encoder is not in DTX.
+ *
1
The encoder is in DTX.
+ *
+ * @hideinitializer */ +#define OPUS_GET_IN_DTX(x) OPUS_GET_IN_DTX_REQUEST, __opus_check_int_ptr(x) + /**@}*/ /** @defgroup opus_decoderctls Decoder related CTLs diff --git a/libs/libopus/include/opus_multistream.h b/libs/libopus/include/opus_multistream.h index 3622e009f..babcee690 100644 --- a/libs/libopus/include/opus_multistream.h +++ b/libs/libopus/include/opus_multistream.h @@ -273,7 +273,7 @@ OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_surround_enc unsigned char *mapping, int application, int *error -) OPUS_ARG_NONNULL(5); +) OPUS_ARG_NONNULL(4) OPUS_ARG_NONNULL(5) OPUS_ARG_NONNULL(6); /** Initialize a previously allocated multistream encoder state. * The memory pointed to by \a st must be at least the size returned by @@ -342,7 +342,7 @@ OPUS_EXPORT int opus_multistream_surround_encoder_init( int *coupled_streams, unsigned char *mapping, int application -) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(5) OPUS_ARG_NONNULL(6) OPUS_ARG_NONNULL(7); /** Encodes a multistream Opus frame. * @param st OpusMSEncoder*: Multistream encoder state. diff --git a/libs/libopus/include/opus_projection.h b/libs/libopus/include/opus_projection.h new file mode 100644 index 000000000..9dabf4e85 --- /dev/null +++ b/libs/libopus/include/opus_projection.h @@ -0,0 +1,568 @@ +/* Copyright (c) 2017 Google Inc. + Written by Andrew Allen */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_projection.h + * @brief Opus projection reference API + */ + +#ifndef OPUS_PROJECTION_H +#define OPUS_PROJECTION_H + +#include "opus_multistream.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond OPUS_INTERNAL_DOC */ + +/** These are the actual encoder and decoder CTL ID numbers. + * They should not be used directly by applications.c + * In general, SETs should be even and GETs should be odd.*/ +/**@{*/ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_GAIN_REQUEST 6001 +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_SIZE_REQUEST 6003 +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_REQUEST 6005 +/**@}*/ + + +/** @endcond */ + +/** @defgroup opus_projection_ctls Projection specific encoder and decoder CTLs + * + * These are convenience macros that are specific to the + * opus_projection_encoder_ctl() and opus_projection_decoder_ctl() + * interface. + * The CTLs from @ref opus_genericctls, @ref opus_encoderctls, + * @ref opus_decoderctls, and @ref opus_multistream_ctls may be applied to a + * projection encoder or decoder as well. + */ +/**@{*/ + +/** Gets the gain (in dB. S7.8-format) of the demixing matrix from the encoder. + * @param[out] x opus_int32 *: Returns the gain (in dB. S7.8-format) + * of the demixing matrix. + * @hideinitializer + */ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_GAIN(x) OPUS_PROJECTION_GET_DEMIXING_MATRIX_GAIN_REQUEST, __opus_check_int_ptr(x) + + +/** Gets the size in bytes of the demixing matrix from the encoder. + * @param[out] x opus_int32 *: Returns the size in bytes of the + * demixing matrix. + * @hideinitializer + */ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_SIZE(x) OPUS_PROJECTION_GET_DEMIXING_MATRIX_SIZE_REQUEST, __opus_check_int_ptr(x) + + +/** Copies the demixing matrix to the supplied pointer location. + * @param[out] x unsigned char *: Returns the demixing matrix to the + * supplied pointer location. + * @param y opus_int32: The size in bytes of the reserved memory at the + * pointer location. + * @hideinitializer + */ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX(x,y) OPUS_PROJECTION_GET_DEMIXING_MATRIX_REQUEST, x, __opus_check_int(y) + + +/**@}*/ + +/** Opus projection encoder state. + * This contains the complete state of a projection Opus encoder. + * It is position independent and can be freely copied. + * @see opus_projection_ambisonics_encoder_create + */ +typedef struct OpusProjectionEncoder OpusProjectionEncoder; + + +/** Opus projection decoder state. + * This contains the complete state of a projection Opus decoder. + * It is position independent and can be freely copied. + * @see opus_projection_decoder_create + * @see opus_projection_decoder_init + */ +typedef struct OpusProjectionDecoder OpusProjectionDecoder; + + +/**\name Projection encoder functions */ +/**@{*/ + +/** Gets the size of an OpusProjectionEncoder structure. + * @param channels int: The total number of input channels to encode. + * This must be no more than 255. + * @param mapping_family int: The mapping family to use for selecting + * the appropriate projection. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_projection_ambisonics_encoder_get_size( + int channels, + int mapping_family +); + + +/** Allocates and initializes a projection encoder state. + * Call opus_projection_encoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param mapping_family int: The mapping family to use for selecting + * the appropriate projection. + * @param[out] streams int *: The total number of streams that will + * be encoded from the input. + * @param[out] coupled_streams int *: Number of coupled (2 channel) + * streams that will be encoded from the input. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusProjectionEncoder *opus_projection_ambisonics_encoder_create( + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + int application, + int *error +) OPUS_ARG_NONNULL(4) OPUS_ARG_NONNULL(5); + + +/** Initialize a previously allocated projection encoder state. + * The memory pointed to by \a st must be at least the size returned by + * opus_projection_ambisonics_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_projection_ambisonics_encoder_create + * @see opus_projection_ambisonics_encoder_get_size + * @param st OpusProjectionEncoder*: Projection encoder state to initialize. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_projection_ambisonics_encoder_init( + OpusProjectionEncoder *st, + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(5) OPUS_ARG_NONNULL(6); + + +/** Encodes a projection Opus frame. + * @param st OpusProjectionEncoder*: Projection encoder state. + * @param[in] pcm const opus_int16*: The input signal as interleaved + * samples. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_encode( + OpusProjectionEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + + +/** Encodes a projection Opus frame from floating point input. + * @param st OpusProjectionEncoder*: Projection encoder state. + * @param[in] pcm const float*: The input signal as interleaved + * samples with a normal range of + * +/-1.0. + * Samples with a range beyond +/-1.0 + * are supported but will be clipped by + * decoders using the integer API and + * should only be used if it is known + * that the far end supports extended + * dynamic range. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_encode_float( + OpusProjectionEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + + +/** Frees an OpusProjectionEncoder allocated by + * opus_projection_ambisonics_encoder_create(). + * @param st OpusProjectionEncoder*: Projection encoder state to be freed. + */ +OPUS_EXPORT void opus_projection_encoder_destroy(OpusProjectionEncoder *st); + + +/** Perform a CTL function on a projection Opus encoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusProjectionEncoder*: Projection encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_encoderctls, @ref opus_multistream_ctls, or + * @ref opus_projection_ctls + * @see opus_genericctls + * @see opus_encoderctls + * @see opus_multistream_ctls + * @see opus_projection_ctls + */ +OPUS_EXPORT int opus_projection_encoder_ctl(OpusProjectionEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); + + +/**@}*/ + +/**\name Projection decoder functions */ +/**@{*/ + +/** Gets the size of an OpusProjectionDecoder structure. + * @param channels int: The total number of output channels. + * This must be no more than 255. + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_projection_decoder_get_size( + int channels, + int streams, + int coupled_streams +); + + +/** Allocates and initializes a projection decoder state. + * Call opus_projection_decoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] demixing_matrix const unsigned char[demixing_matrix_size]: Demixing matrix + * that mapping from coded channels to output channels, + * as described in @ref opus_projection and + * @ref opus_projection_ctls. + * @param demixing_matrix_size opus_int32: The size in bytes of the + * demixing matrix, as + * described in @ref + * opus_projection_ctls. + * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusProjectionDecoder *opus_projection_decoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + unsigned char *demixing_matrix, + opus_int32 demixing_matrix_size, + int *error +) OPUS_ARG_NONNULL(5); + + +/** Intialize a previously allocated projection decoder state object. + * The memory pointed to by \a st must be at least the size returned by + * opus_projection_decoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_projection_decoder_create + * @see opus_projection_deocder_get_size + * @param st OpusProjectionDecoder*: Projection encoder state to initialize. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] demixing_matrix const unsigned char[demixing_matrix_size]: Demixing matrix + * that mapping from coded channels to output channels, + * as described in @ref opus_projection and + * @ref opus_projection_ctls. + * @param demixing_matrix_size opus_int32: The size in bytes of the + * demixing matrix, as + * described in @ref + * opus_projection_ctls. + * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_projection_decoder_init( + OpusProjectionDecoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + unsigned char *demixing_matrix, + opus_int32 demixing_matrix_size +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + + +/** Decode a projection Opus packet. + * @param st OpusProjectionDecoder*: Projection decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_decode( + OpusProjectionDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + + +/** Decode a projection Opus packet with floating point output. + * @param st OpusProjectionDecoder*: Projection decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_decode_float( + OpusProjectionDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + + +/** Perform a CTL function on a projection Opus decoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusProjectionDecoder*: Projection decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_decoderctls, @ref opus_multistream_ctls, or + * @ref opus_projection_ctls. + * @see opus_genericctls + * @see opus_decoderctls + * @see opus_multistream_ctls + * @see opus_projection_ctls + */ +OPUS_EXPORT int opus_projection_decoder_ctl(OpusProjectionDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + + +/** Frees an OpusProjectionDecoder allocated by + * opus_projection_decoder_create(). + * @param st OpusProjectionDecoder: Projection decoder state to be freed. + */ +OPUS_EXPORT void opus_projection_decoder_destroy(OpusProjectionDecoder *st); + + +/**@}*/ + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_PROJECTION_H */ diff --git a/libs/libopus/include/opus_types.h b/libs/libopus/include/opus_types.h index b28e03aea..7cf675580 100644 --- a/libs/libopus/include/opus_types.h +++ b/libs/libopus/include/opus_types.h @@ -33,14 +33,29 @@ #ifndef OPUS_TYPES_H #define OPUS_TYPES_H +#define opus_int int /* used for counters etc; at least 16 bits */ +#define opus_int64 long long +#define opus_int8 signed char + +#define opus_uint unsigned int /* used for counters etc; at least 16 bits */ +#define opus_uint64 unsigned long long +#define opus_uint8 unsigned char + /* Use the real stdint.h if it's there (taken from Paul Hsieh's pstdint.h) */ -#if (defined(__STDC__) && __STDC__ && __STDC_VERSION__ >= 199901L) || (defined(__GNUC__) && (defined(_STDINT_H) || defined(_STDINT_H_)) || defined (HAVE_STDINT_H)) +#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined(__GNUC__) && (defined(_STDINT_H) || defined(_STDINT_H_)) || defined (HAVE_STDINT_H)) #include - +# undef opus_int64 +# undef opus_int8 +# undef opus_uint64 +# undef opus_uint8 + typedef int8_t opus_int8; + typedef uint8_t opus_uint8; typedef int16_t opus_int16; typedef uint16_t opus_uint16; typedef int32_t opus_int32; typedef uint32_t opus_uint32; + typedef int64_t opus_int64; + typedef uint64_t opus_uint64; #elif defined(_WIN32) # if defined(__CYGWIN__) @@ -148,12 +163,4 @@ #endif -#define opus_int int /* used for counters etc; at least 16 bits */ -#define opus_int64 long long -#define opus_int8 signed char - -#define opus_uint unsigned int /* used for counters etc; at least 16 bits */ -#define opus_uint64 unsigned long long -#define opus_uint8 unsigned char - #endif /* OPUS_TYPES_H */ diff --git a/libs/libopus/moz.build b/libs/libopus/moz.build index 6bd177cc9..a612bc664 100644 --- a/libs/libopus/moz.build +++ b/libs/libopus/moz.build @@ -1,10 +1,11 @@ # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. with Files('*'): - BUG_COMPONENT = ('Core', 'Video/Audio') + BUG_COMPONENT = ('Core', 'Audio/Video') EXPORTS.opus += [ 'include/opus.h', @@ -19,13 +20,12 @@ ALLOW_COMPILER_WARNINGS = True FINAL_LIBRARY = 'multimedia' DEFINES['OPUS_BUILD'] = True -DEFINES['OPUS_VERSION'] = '"v1.1.3-mozilla"' +DEFINES['OPUS_VERSION'] = '2654707e86cc94413998976d179b2ab4a2aa3114' DEFINES['USE_ALLOCA'] = True +DEFINES['ENABLE_HARDENING'] = True -# We only need to export symbols if we're built into libmultimedia -# instead of libxul. -if not CONFIG['GKMEDIAS_SHARED_LIBRARY']: - DEFINES['OPUS_EXPORT'] = '' +# Don't export symbols +DEFINES['OPUS_EXPORT'] = '' if CONFIG['CPU_ARCH'] == 'arm' and CONFIG['GNU_AS']: DEFINES['OPUS_ARM_ASM'] = True @@ -46,7 +46,7 @@ if CONFIG['OS_ARCH'] in ('Linux', 'Darwin', 'DragonFly', 'FreeBSD', if CONFIG['OS_ARCH'] == 'WINNT': DEFINES['inline'] = '__inline' - if CONFIG['GNU_CC']: + if CONFIG['CC_TYPE'] in ('clang', 'gcc'): DEFINES['HAVE_LRINTF'] = True if CONFIG['OS_ARCH'] == 'AIX': @@ -63,6 +63,8 @@ LOCAL_INCLUDES += [ 'celt', 'include', 'silk', + 'silk/fixed', + 'silk/float', 'src', ] @@ -76,18 +78,32 @@ UNIFIED_SOURCES += opus_sources SOURCES += opus_nonunified_sources if CONFIG['MOZ_SAMPLE_TYPE_FLOAT32']: - LOCAL_INCLUDES += [ - 'silk/float', - ] UNIFIED_SOURCES += silk_sources_float UNIFIED_SOURCES += opus_sources_float else: - LOCAL_INCLUDES += [ - 'silk/fixed', - ] UNIFIED_SOURCES += silk_sources_fixed -# for webrtc - UNIFIED_SOURCES += opus_sources_float + +if CONFIG['CPU_ARCH'] in ('x86', 'x86_64'): + DEFINES['OPUS_HAVE_RTCD'] = True + DEFINES['OPUS_X86_MAY_HAVE_SSE'] = True + DEFINES['OPUS_X86_MAY_HAVE_SSE2'] = True + DEFINES['OPUS_X86_MAY_HAVE_SSE4_1'] = True + DEFINES['OPUS_X86_MAY_HAVE_AVX'] = True + SOURCES += celt_sources_sse + SOURCES += celt_sources_sse2 + SOURCES += celt_sources_sse4_1 + SOURCES += silk_sources_sse4_1 + if not CONFIG['MOZ_SAMPLE_TYPE_FLOAT32']: + SOURCES += silk_sources_fixed_sse4_1 + for f in SOURCES: + if f in celt_sources_sse: + SOURCES[f].flags += CONFIG['SSE_FLAGS'] + if f in celt_sources_sse2: + SOURCES[f].flags += CONFIG['SSE2_FLAGS'] + if f in celt_sources_sse4_1 or \ + f in silk_sources_sse4_1 or \ + f in silk_sources_fixed_sse4_1: + SOURCES[f].flags += ['-msse4.1'] if CONFIG['CPU_ARCH'] == 'arm' and CONFIG['GNU_AS']: SOURCES += celt_sources_arm @@ -109,7 +125,20 @@ if CONFIG['CPU_ARCH'] == 'arm' and CONFIG['GNU_AS']: ] ASFLAGS += CONFIG['NEON_FLAGS'] +if CONFIG['CPU_ARCH'] == 'aarch64' and CONFIG['CC_TYPE'] in ('clang', 'gcc'): + DEFINES['OPUS_ARM_PRESUME_AARCH64_NEON_INTR'] = True + DEFINES['OPUS_ARM_PRESUME_NEON'] = True + DEFINES['OPUS_ARM_PRESUME_NEON_INTR'] = True + SOURCES += celt_sources_arm_neon_intr + SOURCES += silk_sources_arm_neon_intr + if CONFIG['MOZ_SAMPLE_TYPE_FLOAT32']: + DEFINES['OPUS_ARM_MAY_HAVE_NEON'] = True + DEFINES['OPUS_ARM_MAY_HAVE_NEON_INTR'] = True + else: + SOURCES += silk_sources_fixed_arm_neon_intr + # Suppress warnings in third-party code. -if CONFIG['GNU_CC']: - if CONFIG['CLANG_CXX']: +if CONFIG['CC_TYPE'] in ('clang', 'gcc'): + if CONFIG['CC_TYPE'] == 'clang': CFLAGS += ['-Wno-#pragma-messages'] + diff --git a/libs/libopus/nonunified.patch b/libs/libopus/nonunified.patch index cf9ff554f..e3568ca6e 100644 --- a/libs/libopus/nonunified.patch +++ b/libs/libopus/nonunified.patch @@ -27,10 +27,10 @@ index 8a39b9f..dfc2c62 100644 celt_sources_sse = [ 'celt/x86/pitch_sse.c', 'celt/x86/x86_celt_map.c', -@@ -105,8 +112,6 @@ silk_sources = [ - 'silk/log2lin.c', +@@ -114,8 +114,6 @@ silk_sources = [ 'silk/LP_variable_cutoff.c', 'silk/LPC_analysis_filter.c', + 'silk/LPC_fit.c', - 'silk/LPC_inv_pred_gain.c', - 'silk/NLSF2A.c', 'silk/NLSF_decode.c', diff --git a/libs/libopus/nonunified2.patch b/libs/libopus/nonunified2.patch new file mode 100644 index 000000000..02241de52 --- /dev/null +++ b/libs/libopus/nonunified2.patch @@ -0,0 +1,34 @@ +diff --git a/media/libopus/sources.mozbuild b/media/libopus/sources.mozbuild +--- a/media/libopus/sources.mozbuild ++++ b/media/libopus/sources.mozbuild +@@ -1,12 +1,11 @@ + # THIS FILE WAS AUTOMATICALLY GENERATED BY gen-sources.py. DO NOT EDIT. + celt_sources = [ + 'celt/bands.c', +- 'celt/celt.c', + 'celt/celt_lpc.c', + 'celt/cwrs.c', + 'celt/entcode.c', + 'celt/entdec.c', + 'celt/entenc.c', + 'celt/kiss_fft.c', + 'celt/laplace.c', + 'celt/mathops.c', +@@ -14,16 +13,18 @@ celt_sources = [ + 'celt/modes.c', + 'celt/pitch.c', + 'celt/quant_bands.c', + 'celt/rate.c', + 'celt/vq.c', + ] + + opus_nonunified_sources = [ ++ # Disabled because of undefined reference to celt_fatal at link time ++ 'celt/celt.c', + # Disabled because of name clash of opus_custom_encoder_get_size. + 'celt/celt_decoder.c', + 'celt/celt_encoder.c', + # Disabled for (safe) warning about QA redefinition. + 'silk/LPC_inv_pred_gain.c', + 'silk/NLSF2A.c', + ] diff --git a/libs/libopus/silk/A2NLSF.c b/libs/libopus/silk/A2NLSF.c index b6e9e5ffc..b487686ff 100644 --- a/libs/libopus/silk/A2NLSF.c +++ b/libs/libopus/silk/A2NLSF.c @@ -40,7 +40,7 @@ POSSIBILITY OF SUCH DAMAGE. /* Number of binary divisions, when not in low complexity mode */ #define BIN_DIV_STEPS_A2NLSF_FIX 3 /* must be no higher than 16 - log2( LSF_COS_TAB_SZ_FIX ) */ -#define MAX_ITERATIONS_A2NLSF_FIX 30 +#define MAX_ITERATIONS_A2NLSF_FIX 16 /* Helper function for A2NLSF(..) */ /* Transforms polynomials from cos(n*f) to cos(f)^n */ @@ -130,7 +130,7 @@ void silk_A2NLSF( const opus_int d /* I Filter order (must be even) */ ) { - opus_int i, k, m, dd, root_ix, ffrac; + opus_int i, k, m, dd, root_ix, ffrac; opus_int32 xlo, xhi, xmid; opus_int32 ylo, yhi, ymid, thr; opus_int32 nom, den; @@ -239,13 +239,13 @@ void silk_A2NLSF( /* Set NLSFs to white spectrum and exit */ NLSF[ 0 ] = (opus_int16)silk_DIV32_16( 1 << 15, d + 1 ); for( k = 1; k < d; k++ ) { - NLSF[ k ] = (opus_int16)silk_SMULBB( k + 1, NLSF[ 0 ] ); + NLSF[ k ] = (opus_int16)silk_ADD16( NLSF[ k-1 ], NLSF[ 0 ] ); } return; } /* Error: Apply progressively more bandwidth expansion and run again */ - silk_bwexpander_32( a_Q16, d, 65536 - silk_SMULBB( 10 + i, i ) ); /* 10_Q16 = 0.00015*/ + silk_bwexpander_32( a_Q16, d, 65536 - silk_LSHIFT( 1, i ) ); silk_A2NLSF_init( a_Q16, P, Q, dd ); p = P; /* Pointer to polynomial */ diff --git a/libs/libopus/silk/API.h b/libs/libopus/silk/API.h index 0131acbb0..4d90ff9aa 100644 --- a/libs/libopus/silk/API.h +++ b/libs/libopus/silk/API.h @@ -80,7 +80,8 @@ opus_int silk_Encode( /* O Returns error co opus_int nSamplesIn, /* I Number of samples in input vector */ ec_enc *psRangeEnc, /* I/O Compressor data structure */ opus_int32 *nBytesOut, /* I/O Number of bytes in payload (input: Max bytes) */ - const opus_int prefillFlag /* I Flag to indicate prefilling buffers no coding */ + const opus_int prefillFlag, /* I Flag to indicate prefilling buffers no coding */ + int activity /* I Decision of Opus voice activity detector */ ); /****************************************/ diff --git a/libs/libopus/silk/CNG.c b/libs/libopus/silk/CNG.c index 8443ad63b..2a910099e 100644 --- a/libs/libopus/silk/CNG.c +++ b/libs/libopus/silk/CNG.c @@ -118,6 +118,10 @@ void silk_CNG( /* Smooth gains */ for( i = 0; i < psDec->nb_subfr; i++ ) { psCNG->CNG_smth_Gain_Q16 += silk_SMULWB( psDecCtrl->Gains_Q16[ i ] - psCNG->CNG_smth_Gain_Q16, CNG_GAIN_SMTH_Q16 ); + /* If the smoothed gain is 3 dB greater than this subframe's gain, use this subframe's gain to adapt faster. */ + if( silk_SMULWW( psCNG->CNG_smth_Gain_Q16, CNG_GAIN_SMTH_THRESHOLD_Q16 ) > psDecCtrl->Gains_Q16[ i ] ) { + psCNG->CNG_smth_Gain_Q16 = psDecCtrl->Gains_Q16[ i ]; + } } } @@ -138,16 +142,16 @@ void silk_CNG( gain_Q16 = silk_LSHIFT32( silk_SQRT_APPROX( gain_Q16 ), 8 ); } gain_Q10 = silk_RSHIFT( gain_Q16, 6 ); - + silk_CNG_exc( CNG_sig_Q14 + MAX_LPC_ORDER, psCNG->CNG_exc_buf_Q14, length, &psCNG->rand_seed ); /* Convert CNG NLSF to filter representation */ - silk_NLSF2A( A_Q12, psCNG->CNG_smth_NLSF_Q15, psDec->LPC_order ); + silk_NLSF2A( A_Q12, psCNG->CNG_smth_NLSF_Q15, psDec->LPC_order, psDec->arch ); /* Generate CNG signal, by synthesis filtering */ silk_memcpy( CNG_sig_Q14, psCNG->CNG_synth_state, MAX_LPC_ORDER * sizeof( opus_int32 ) ); + celt_assert( psDec->LPC_order == 10 || psDec->LPC_order == 16 ); for( i = 0; i < length; i++ ) { - silk_assert( psDec->LPC_order == 10 || psDec->LPC_order == 16 ); /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ LPC_pred_Q10 = silk_RSHIFT( psDec->LPC_order, 1 ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, CNG_sig_Q14[ MAX_LPC_ORDER + i - 1 ], A_Q12[ 0 ] ); @@ -170,11 +174,11 @@ void silk_CNG( } /* Update states */ - CNG_sig_Q14[ MAX_LPC_ORDER + i ] = silk_ADD_LSHIFT( CNG_sig_Q14[ MAX_LPC_ORDER + i ], LPC_pred_Q10, 4 ); - + CNG_sig_Q14[ MAX_LPC_ORDER + i ] = silk_ADD_SAT32( CNG_sig_Q14[ MAX_LPC_ORDER + i ], silk_LSHIFT_SAT32( LPC_pred_Q10, 4 ) ); + /* Scale with Gain and add to input signal */ frame[ i ] = (opus_int16)silk_ADD_SAT16( frame[ i ], silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( CNG_sig_Q14[ MAX_LPC_ORDER + i ], gain_Q10 ), 8 ) ) ); - + } silk_memcpy( psCNG->CNG_synth_state, &CNG_sig_Q14[ length ], MAX_LPC_ORDER * sizeof( opus_int32 ) ); } else { diff --git a/libs/libopus/silk/LPC_analysis_filter.c b/libs/libopus/silk/LPC_analysis_filter.c index 20906673f..d34b5eb70 100644 --- a/libs/libopus/silk/LPC_analysis_filter.c +++ b/libs/libopus/silk/LPC_analysis_filter.c @@ -39,6 +39,13 @@ POSSIBILITY OF SUCH DAMAGE. /* first d output samples are set to zero */ /*******************************************/ +/* OPT: Using celt_fir() for this function should be faster, but it may cause + integer overflows in intermediate values (not final results), which the + current implementation silences by casting to unsigned. Enabling + this should be safe in pretty much all cases, even though it is not technically + C89-compliant. */ +#define USE_CELT_FIR 0 + void silk_LPC_analysis_filter( opus_int16 *out, /* O Output signal */ const opus_int16 *in, /* I Input signal */ @@ -49,8 +56,7 @@ void silk_LPC_analysis_filter( ) { opus_int j; -#ifdef FIXED_POINT - opus_int16 mem[SILK_MAX_ORDER_LPC]; +#if defined(FIXED_POINT) && USE_CELT_FIR opus_int16 num[SILK_MAX_ORDER_LPC]; #else int ix; @@ -58,19 +64,16 @@ void silk_LPC_analysis_filter( const opus_int16 *in_ptr; #endif - silk_assert( d >= 6 ); - silk_assert( (d & 1) == 0 ); - silk_assert( d <= len ); + celt_assert( d >= 6 ); + celt_assert( (d & 1) == 0 ); + celt_assert( d <= len ); -#ifdef FIXED_POINT - silk_assert( d <= SILK_MAX_ORDER_LPC ); +#if defined(FIXED_POINT) && USE_CELT_FIR + celt_assert( d <= SILK_MAX_ORDER_LPC ); for ( j = 0; j < d; j++ ) { num[ j ] = -B[ j ]; } - for (j=0;j maxabs ) { + maxabs = absval; + idx = k; + } + } + maxabs = silk_RSHIFT_ROUND( maxabs, QIN - QOUT ); + + if( maxabs > silk_int16_MAX ) { + /* Reduce magnitude of prediction coefficients */ + maxabs = silk_min( maxabs, 163838 ); /* ( silk_int32_MAX >> 14 ) + silk_int16_MAX = 163838 */ + chirp_Q16 = SILK_FIX_CONST( 0.999, 16 ) - silk_DIV32( silk_LSHIFT( maxabs - silk_int16_MAX, 14 ), + silk_RSHIFT32( silk_MUL( maxabs, idx + 1), 2 ) ); + silk_bwexpander_32( a_QIN, d, chirp_Q16 ); + } else { + break; + } + } + + if( i == 10 ) { + /* Reached the last iteration, clip the coefficients */ + for( k = 0; k < d; k++ ) { + a_QOUT[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( a_QIN[ k ], QIN - QOUT ) ); + a_QIN[ k ] = silk_LSHIFT( (opus_int32)a_QOUT[ k ], QIN - QOUT ); + } + } else { + for( k = 0; k < d; k++ ) { + a_QOUT[ k ] = (opus_int16)silk_RSHIFT_ROUND( a_QIN[ k ], QIN - QOUT ); + } + } +} diff --git a/libs/libopus/silk/LPC_inv_pred_gain.c b/libs/libopus/silk/LPC_inv_pred_gain.c index 4af89aa5f..a3746a6ef 100644 --- a/libs/libopus/silk/LPC_inv_pred_gain.c +++ b/libs/libopus/silk/LPC_inv_pred_gain.c @@ -30,6 +30,7 @@ POSSIBILITY OF SUCH DAMAGE. #endif #include "SigProc_FIX.h" +#include "define.h" #define QA 24 #define A_LIMIT SILK_FIX_CONST( 0.99975, QA ) @@ -38,117 +39,103 @@ POSSIBILITY OF SUCH DAMAGE. /* Compute inverse of LPC prediction gain, and */ /* test if LPC coefficients are stable (all poles within unit circle) */ -static opus_int32 LPC_inverse_pred_gain_QA( /* O Returns inverse prediction gain in energy domain, Q30 */ - opus_int32 A_QA[ 2 ][ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */ +static opus_int32 LPC_inverse_pred_gain_QA_c( /* O Returns inverse prediction gain in energy domain, Q30 */ + opus_int32 A_QA[ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */ const opus_int order /* I Prediction order */ ) { opus_int k, n, mult2Q; - opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp_QA; - opus_int32 *Aold_QA, *Anew_QA; + opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp1, tmp2; - Anew_QA = A_QA[ order & 1 ]; - - invGain_Q30 = (opus_int32)1 << 30; + invGain_Q30 = SILK_FIX_CONST( 1, 30 ); for( k = order - 1; k > 0; k-- ) { /* Check for stability */ - if( ( Anew_QA[ k ] > A_LIMIT ) || ( Anew_QA[ k ] < -A_LIMIT ) ) { + if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) { return 0; } /* Set RC equal to negated AR coef */ - rc_Q31 = -silk_LSHIFT( Anew_QA[ k ], 31 - QA ); + rc_Q31 = -silk_LSHIFT( A_QA[ k ], 31 - QA ); /* rc_mult1_Q30 range: [ 1 : 2^30 ] */ - rc_mult1_Q30 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 ); + rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) ); silk_assert( rc_mult1_Q30 > ( 1 << 15 ) ); /* reduce A_LIMIT if fails */ silk_assert( rc_mult1_Q30 <= ( 1 << 30 ) ); - /* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */ - mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) ); - rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 ); - /* Update inverse gain */ /* invGain_Q30 range: [ 0 : 2^30 ] */ invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); silk_assert( invGain_Q30 >= 0 ); silk_assert( invGain_Q30 <= ( 1 << 30 ) ); + if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) { + return 0; + } - /* Swap pointers */ - Aold_QA = Anew_QA; - Anew_QA = A_QA[ k & 1 ]; + /* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */ + mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) ); + rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 ); /* Update AR coefficient */ - for( n = 0; n < k; n++ ) { - tmp_QA = Aold_QA[ n ] - MUL32_FRAC_Q( Aold_QA[ k - n - 1 ], rc_Q31, 31 ); - Anew_QA[ n ] = MUL32_FRAC_Q( tmp_QA, rc_mult2 , mult2Q ); + for( n = 0; n < (k + 1) >> 1; n++ ) { + opus_int64 tmp64; + tmp1 = A_QA[ n ]; + tmp2 = A_QA[ k - n - 1 ]; + tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1, + MUL32_FRAC_Q( tmp2, rc_Q31, 31 ) ), rc_mult2 ), mult2Q); + if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { + return 0; + } + A_QA[ n ] = ( opus_int32 )tmp64; + tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2, + MUL32_FRAC_Q( tmp1, rc_Q31, 31 ) ), rc_mult2), mult2Q); + if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { + return 0; + } + A_QA[ k - n - 1 ] = ( opus_int32 )tmp64; } } /* Check for stability */ - if( ( Anew_QA[ 0 ] > A_LIMIT ) || ( Anew_QA[ 0 ] < -A_LIMIT ) ) { + if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) { return 0; } /* Set RC equal to negated AR coef */ - rc_Q31 = -silk_LSHIFT( Anew_QA[ 0 ], 31 - QA ); + rc_Q31 = -silk_LSHIFT( A_QA[ 0 ], 31 - QA ); /* Range: [ 1 : 2^30 ] */ - rc_mult1_Q30 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 ); + rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) ); /* Update inverse gain */ /* Range: [ 0 : 2^30 ] */ invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); - silk_assert( invGain_Q30 >= 0 ); - silk_assert( invGain_Q30 <= 1<<30 ); + silk_assert( invGain_Q30 >= 0 ); + silk_assert( invGain_Q30 <= ( 1 << 30 ) ); + if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) { + return 0; + } return invGain_Q30; } /* For input in Q12 domain */ -opus_int32 silk_LPC_inverse_pred_gain( /* O Returns inverse prediction gain in energy domain, Q30 */ +opus_int32 silk_LPC_inverse_pred_gain_c( /* O Returns inverse prediction gain in energy domain, Q30 */ const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */ const opus_int order /* I Prediction order */ ) { opus_int k; - opus_int32 Atmp_QA[ 2 ][ SILK_MAX_ORDER_LPC ]; - opus_int32 *Anew_QA; + opus_int32 Atmp_QA[ SILK_MAX_ORDER_LPC ]; opus_int32 DC_resp = 0; - Anew_QA = Atmp_QA[ order & 1 ]; - /* Increase Q domain of the AR coefficients */ for( k = 0; k < order; k++ ) { DC_resp += (opus_int32)A_Q12[ k ]; - Anew_QA[ k ] = silk_LSHIFT32( (opus_int32)A_Q12[ k ], QA - 12 ); + Atmp_QA[ k ] = silk_LSHIFT32( (opus_int32)A_Q12[ k ], QA - 12 ); } /* If the DC is unstable, we don't even need to do the full calculations */ if( DC_resp >= 4096 ) { return 0; } - return LPC_inverse_pred_gain_QA( Atmp_QA, order ); + return LPC_inverse_pred_gain_QA_c( Atmp_QA, order ); } - -#ifdef FIXED_POINT - -/* For input in Q24 domain */ -opus_int32 silk_LPC_inverse_pred_gain_Q24( /* O Returns inverse prediction gain in energy domain, Q30 */ - const opus_int32 *A_Q24, /* I Prediction coefficients [order] */ - const opus_int order /* I Prediction order */ -) -{ - opus_int k; - opus_int32 Atmp_QA[ 2 ][ SILK_MAX_ORDER_LPC ]; - opus_int32 *Anew_QA; - - Anew_QA = Atmp_QA[ order & 1 ]; - - /* Increase Q domain of the AR coefficients */ - for( k = 0; k < order; k++ ) { - Anew_QA[ k ] = silk_RSHIFT32( A_Q24[ k ], 24 - QA ); - } - - return LPC_inverse_pred_gain_QA( Atmp_QA, order ); -} -#endif diff --git a/libs/libopus/silk/LP_variable_cutoff.c b/libs/libopus/silk/LP_variable_cutoff.c index f639e1f89..79112ad35 100644 --- a/libs/libopus/silk/LP_variable_cutoff.c +++ b/libs/libopus/silk/LP_variable_cutoff.c @@ -130,6 +130,6 @@ void silk_LP_variable_cutoff( /* ARMA low-pass filtering */ silk_assert( TRANSITION_NB == 3 && TRANSITION_NA == 2 ); - silk_biquad_alt( frame, B_Q28, A_Q28, psLP->In_LP_State, frame, frame_length, 1); + silk_biquad_alt_stride1( frame, B_Q28, A_Q28, psLP->In_LP_State, frame, frame_length); } } diff --git a/libs/libopus/silk/MacroCount.h b/libs/libopus/silk/MacroCount.h index 834817d05..dab2f57a6 100644 --- a/libs/libopus/silk/MacroCount.h +++ b/libs/libopus/silk/MacroCount.h @@ -27,9 +27,9 @@ POSSIBILITY OF SUCH DAMAGE. #ifndef SIGPROCFIX_API_MACROCOUNT_H #define SIGPROCFIX_API_MACROCOUNT_H -#include #ifdef silk_MACRO_COUNT +#include #define varDefine opus_int64 ops_count = 0; extern opus_int64 ops_count; @@ -319,14 +319,6 @@ static OPUS_INLINE opus_int32 silk_ADD_POS_SAT32(opus_int64 a, opus_int64 b){ return(tmp); } -#undef silk_ADD_POS_SAT64 -static OPUS_INLINE opus_int64 silk_ADD_POS_SAT64(opus_int64 a, opus_int64 b){ - opus_int64 tmp; - ops_count += 1; - tmp = ((((a)+(b)) & 0x8000000000000000LL) ? silk_int64_MAX : ((a)+(b))); - return(tmp); -} - #undef silk_LSHIFT8 static OPUS_INLINE opus_int8 silk_LSHIFT8(opus_int8 a, opus_int32 shift){ opus_int8 ret; @@ -699,7 +691,7 @@ return(ret); #undef silk_LIMIT_32 -static OPUS_INLINE opus_int silk_LIMIT_32(opus_int32 a, opus_int32 limit1, opus_int32 limit2) +static OPUS_INLINE opus_int32 silk_LIMIT_32(opus_int32 a, opus_int32 limit1, opus_int32 limit2) { opus_int32 ret; ops_count += 6; diff --git a/libs/libopus/silk/MacroDebug.h b/libs/libopus/silk/MacroDebug.h index 35aedc5c5..8dd4ce2ee 100644 --- a/libs/libopus/silk/MacroDebug.h +++ b/libs/libopus/silk/MacroDebug.h @@ -539,8 +539,7 @@ static OPUS_INLINE opus_int32 silk_DIV32_16_(opus_int32 a32, opus_int32 b32, cha no checking needed for silk_POS_SAT32 no checking needed for silk_ADD_POS_SAT8 no checking needed for silk_ADD_POS_SAT16 - no checking needed for silk_ADD_POS_SAT32 - no checking needed for silk_ADD_POS_SAT64 */ + no checking needed for silk_ADD_POS_SAT32 */ #undef silk_LSHIFT8 #define silk_LSHIFT8(a,b) silk_LSHIFT8_((a), (b), __FILE__, __LINE__) diff --git a/libs/libopus/silk/NLSF2A.c b/libs/libopus/silk/NLSF2A.c index b1c559ea6..d5b773063 100644 --- a/libs/libopus/silk/NLSF2A.c +++ b/libs/libopus/silk/NLSF2A.c @@ -66,7 +66,8 @@ static OPUS_INLINE void silk_NLSF2A_find_poly( void silk_NLSF2A( opus_int16 *a_Q12, /* O monic whitening filter coefficients in Q12, [ d ] */ const opus_int16 *NLSF, /* I normalized line spectral frequencies in Q15, [ d ] */ - const opus_int d /* I filter order (should be even) */ + const opus_int d, /* I filter order (should be even) */ + int arch /* I Run-time architecture */ ) { /* This ordering was found to maximize quality. It improves numerical accuracy of @@ -83,15 +84,14 @@ void silk_NLSF2A( opus_int32 P[ SILK_MAX_ORDER_LPC / 2 + 1 ], Q[ SILK_MAX_ORDER_LPC / 2 + 1 ]; opus_int32 Ptmp, Qtmp, f_int, f_frac, cos_val, delta; opus_int32 a32_QA1[ SILK_MAX_ORDER_LPC ]; - opus_int32 maxabs, absval, idx=0, sc_Q16; silk_assert( LSF_COS_TAB_SZ_FIX == 128 ); - silk_assert( d==10||d==16 ); + celt_assert( d==10 || d==16 ); /* convert LSFs to 2*cos(LSF), using piecewise linear curve from table */ ordering = d == 16 ? ordering16 : ordering10; for( k = 0; k < d; k++ ) { - silk_assert(NLSF[k] >= 0 ); + silk_assert( NLSF[k] >= 0 ); /* f_int on a scale 0-127 (rounded down) */ f_int = silk_RSHIFT( NLSF[k], 15 - 7 ); @@ -126,52 +126,15 @@ void silk_NLSF2A( a32_QA1[ d-k-1 ] = Qtmp - Ptmp; /* QA+1 */ } - /* Limit the maximum absolute value of the prediction coefficients, so that they'll fit in int16 */ - for( i = 0; i < 10; i++ ) { - /* Find maximum absolute value and its index */ - maxabs = 0; - for( k = 0; k < d; k++ ) { - absval = silk_abs( a32_QA1[k] ); - if( absval > maxabs ) { - maxabs = absval; - idx = k; - } - } - maxabs = silk_RSHIFT_ROUND( maxabs, QA + 1 - 12 ); /* QA+1 -> Q12 */ - - if( maxabs > silk_int16_MAX ) { - /* Reduce magnitude of prediction coefficients */ - maxabs = silk_min( maxabs, 163838 ); /* ( silk_int32_MAX >> 14 ) + silk_int16_MAX = 163838 */ - sc_Q16 = SILK_FIX_CONST( 0.999, 16 ) - silk_DIV32( silk_LSHIFT( maxabs - silk_int16_MAX, 14 ), - silk_RSHIFT32( silk_MUL( maxabs, idx + 1), 2 ) ); - silk_bwexpander_32( a32_QA1, d, sc_Q16 ); - } else { - break; - } - } + /* Convert int32 coefficients to Q12 int16 coefs */ + silk_LPC_fit( a_Q12, a32_QA1, 12, QA + 1, d ); - if( i == 10 ) { - /* Reached the last iteration, clip the coefficients */ + for( i = 0; silk_LPC_inverse_pred_gain( a_Q12, d, arch ) == 0 && i < MAX_LPC_STABILIZE_ITERATIONS; i++ ) { + /* Prediction coefficients are (too close to) unstable; apply bandwidth expansion */ + /* on the unscaled coefficients, convert to Q12 and measure again */ + silk_bwexpander_32( a32_QA1, d, 65536 - silk_LSHIFT( 2, i ) ); for( k = 0; k < d; k++ ) { - a_Q12[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ) ); /* QA+1 -> Q12 */ - a32_QA1[ k ] = silk_LSHIFT( (opus_int32)a_Q12[ k ], QA + 1 - 12 ); - } - } else { - for( k = 0; k < d; k++ ) { - a_Q12[ k ] = (opus_int16)silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ); /* QA+1 -> Q12 */ - } - } - - for( i = 0; i < MAX_LPC_STABILIZE_ITERATIONS; i++ ) { - if( silk_LPC_inverse_pred_gain( a_Q12, d ) < SILK_FIX_CONST( 1.0 / MAX_PREDICTION_POWER_GAIN, 30 ) ) { - /* Prediction coefficients are (too close to) unstable; apply bandwidth expansion */ - /* on the unscaled coefficients, convert to Q12 and measure again */ - silk_bwexpander_32( a32_QA1, d, 65536 - silk_LSHIFT( 2, i ) ); - for( k = 0; k < d; k++ ) { - a_Q12[ k ] = (opus_int16)silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ); /* QA+1 -> Q12 */ - } - } else { - break; + a_Q12[ k ] = (opus_int16)silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ); /* QA+1 -> Q12 */ } } } diff --git a/libs/libopus/silk/NLSF_VQ.c b/libs/libopus/silk/NLSF_VQ.c index 69b6e22e1..b83182a79 100644 --- a/libs/libopus/silk/NLSF_VQ.c +++ b/libs/libopus/silk/NLSF_VQ.c @@ -33,36 +33,44 @@ POSSIBILITY OF SUCH DAMAGE. /* Compute quantization errors for an LPC_order element input vector for a VQ codebook */ void silk_NLSF_VQ( - opus_int32 err_Q26[], /* O Quantization errors [K] */ + opus_int32 err_Q24[], /* O Quantization errors [K] */ const opus_int16 in_Q15[], /* I Input vectors to be quantized [LPC_order] */ const opus_uint8 pCB_Q8[], /* I Codebook vectors [K*LPC_order] */ + const opus_int16 pWght_Q9[], /* I Codebook weights [K*LPC_order] */ const opus_int K, /* I Number of codebook vectors */ const opus_int LPC_order /* I Number of LPCs */ ) { - opus_int i, m; - opus_int32 diff_Q15, sum_error_Q30, sum_error_Q26; + opus_int i, m; + opus_int32 diff_Q15, diffw_Q24, sum_error_Q24, pred_Q24; + const opus_int16 *w_Q9_ptr; + const opus_uint8 *cb_Q8_ptr; - silk_assert( LPC_order <= 16 ); - silk_assert( ( LPC_order & 1 ) == 0 ); + celt_assert( ( LPC_order & 1 ) == 0 ); /* Loop over codebook */ + cb_Q8_ptr = pCB_Q8; + w_Q9_ptr = pWght_Q9; for( i = 0; i < K; i++ ) { - sum_error_Q26 = 0; - for( m = 0; m < LPC_order; m += 2 ) { - /* Compute weighted squared quantization error for index m */ - diff_Q15 = silk_SUB_LSHIFT32( in_Q15[ m ], (opus_int32)*pCB_Q8++, 7 ); /* range: [ -32767 : 32767 ]*/ - sum_error_Q30 = silk_SMULBB( diff_Q15, diff_Q15 ); + sum_error_Q24 = 0; + pred_Q24 = 0; + for( m = LPC_order-2; m >= 0; m -= 2 ) { + /* Compute weighted absolute predictive quantization error for index m + 1 */ + diff_Q15 = silk_SUB_LSHIFT32( in_Q15[ m + 1 ], (opus_int32)cb_Q8_ptr[ m + 1 ], 7 ); /* range: [ -32767 : 32767 ]*/ + diffw_Q24 = silk_SMULBB( diff_Q15, w_Q9_ptr[ m + 1 ] ); + sum_error_Q24 = silk_ADD32( sum_error_Q24, silk_abs( silk_SUB_RSHIFT32( diffw_Q24, pred_Q24, 1 ) ) ); + pred_Q24 = diffw_Q24; - /* Compute weighted squared quantization error for index m + 1 */ - diff_Q15 = silk_SUB_LSHIFT32( in_Q15[m + 1], (opus_int32)*pCB_Q8++, 7 ); /* range: [ -32767 : 32767 ]*/ - sum_error_Q30 = silk_SMLABB( sum_error_Q30, diff_Q15, diff_Q15 ); + /* Compute weighted absolute predictive quantization error for index m */ + diff_Q15 = silk_SUB_LSHIFT32( in_Q15[ m ], (opus_int32)cb_Q8_ptr[ m ], 7 ); /* range: [ -32767 : 32767 ]*/ + diffw_Q24 = silk_SMULBB( diff_Q15, w_Q9_ptr[ m ] ); + sum_error_Q24 = silk_ADD32( sum_error_Q24, silk_abs( silk_SUB_RSHIFT32( diffw_Q24, pred_Q24, 1 ) ) ); + pred_Q24 = diffw_Q24; - sum_error_Q26 = silk_ADD_RSHIFT32( sum_error_Q26, sum_error_Q30, 4 ); - - silk_assert( sum_error_Q26 >= 0 ); - silk_assert( sum_error_Q30 >= 0 ); + silk_assert( sum_error_Q24 >= 0 ); } - err_Q26[ i ] = sum_error_Q26; + err_Q24[ i ] = sum_error_Q24; + cb_Q8_ptr += LPC_order; + w_Q9_ptr += LPC_order; } } diff --git a/libs/libopus/silk/NLSF_VQ_weights_laroia.c b/libs/libopus/silk/NLSF_VQ_weights_laroia.c index 04894c59a..9873bcde1 100644 --- a/libs/libopus/silk/NLSF_VQ_weights_laroia.c +++ b/libs/libopus/silk/NLSF_VQ_weights_laroia.c @@ -48,8 +48,8 @@ void silk_NLSF_VQ_weights_laroia( opus_int k; opus_int32 tmp1_int, tmp2_int; - silk_assert( D > 0 ); - silk_assert( ( D & 1 ) == 0 ); + celt_assert( D > 0 ); + celt_assert( ( D & 1 ) == 0 ); /* First value */ tmp1_int = silk_max_int( pNLSF_Q15[ 0 ], 1 ); diff --git a/libs/libopus/silk/NLSF_decode.c b/libs/libopus/silk/NLSF_decode.c index 9f715060b..eeb0ba8c9 100644 --- a/libs/libopus/silk/NLSF_decode.c +++ b/libs/libopus/silk/NLSF_decode.c @@ -32,7 +32,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "main.h" /* Predictive dequantizer for NLSF residuals */ -static OPUS_INLINE void silk_NLSF_residual_dequant( /* O Returns RD value in Q30 */ +static OPUS_INLINE void silk_NLSF_residual_dequant( /* O Returns RD value in Q30 */ opus_int16 x_Q10[], /* O Output [ order ] */ const opus_int8 indices[], /* I Quantization indices [ order ] */ const opus_uint8 pred_coef_Q8[], /* I Backward predictor coefs [ order ] */ @@ -70,15 +70,9 @@ void silk_NLSF_decode( opus_uint8 pred_Q8[ MAX_LPC_ORDER ]; opus_int16 ec_ix[ MAX_LPC_ORDER ]; opus_int16 res_Q10[ MAX_LPC_ORDER ]; - opus_int16 W_tmp_QW[ MAX_LPC_ORDER ]; - opus_int32 W_tmp_Q9, NLSF_Q15_tmp; + opus_int32 NLSF_Q15_tmp; const opus_uint8 *pCB_element; - - /* Decode first stage */ - pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ NLSFIndices[ 0 ] * psNLSF_CB->order ]; - for( i = 0; i < psNLSF_CB->order; i++ ) { - pNLSF_Q15[ i ] = silk_LSHIFT( (opus_int16)pCB_element[ i ], 7 ); - } + const opus_int16 *pCB_Wght_Q9; /* Unpack entropy table indices and predictor for current CB1 index */ silk_NLSF_unpack( ec_ix, pred_Q8, psNLSF_CB, NLSFIndices[ 0 ] ); @@ -86,13 +80,11 @@ void silk_NLSF_decode( /* Predictive residual dequantizer */ silk_NLSF_residual_dequant( res_Q10, &NLSFIndices[ 1 ], pred_Q8, psNLSF_CB->quantStepSize_Q16, psNLSF_CB->order ); - /* Weights from codebook vector */ - silk_NLSF_VQ_weights_laroia( W_tmp_QW, pNLSF_Q15, psNLSF_CB->order ); - - /* Apply inverse square-rooted weights and add to output */ + /* Apply inverse square-rooted weights to first stage and add to output */ + pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ NLSFIndices[ 0 ] * psNLSF_CB->order ]; + pCB_Wght_Q9 = &psNLSF_CB->CB1_Wght_Q9[ NLSFIndices[ 0 ] * psNLSF_CB->order ]; for( i = 0; i < psNLSF_CB->order; i++ ) { - W_tmp_Q9 = silk_SQRT_APPROX( silk_LSHIFT( (opus_int32)W_tmp_QW[ i ], 18 - NLSF_W_Q ) ); - NLSF_Q15_tmp = silk_ADD32( pNLSF_Q15[ i ], silk_DIV32_16( silk_LSHIFT( (opus_int32)res_Q10[ i ], 14 ), W_tmp_Q9 ) ); + NLSF_Q15_tmp = silk_ADD_LSHIFT32( silk_DIV32_16( silk_LSHIFT( (opus_int32)res_Q10[ i ], 14 ), pCB_Wght_Q9[ i ] ), (opus_int16)pCB_element[ i ], 7 ); pNLSF_Q15[ i ] = (opus_int16)silk_LIMIT( NLSF_Q15_tmp, 0, 32767 ); } diff --git a/libs/libopus/silk/NLSF_del_dec_quant.c b/libs/libopus/silk/NLSF_del_dec_quant.c index de88fee06..44a16acd0 100644 --- a/libs/libopus/silk/NLSF_del_dec_quant.c +++ b/libs/libopus/silk/NLSF_del_dec_quant.c @@ -84,7 +84,7 @@ opus_int32 silk_NLSF_del_dec_quant( /* O Returns nStates = 1; RD_Q25[ 0 ] = 0; prev_out_Q10[ 0 ] = 0; - for( i = order - 1; ; i-- ) { + for( i = order - 1; i >= 0; i-- ) { rates_Q5 = &ec_rates_Q5[ ec_ix[ i ] ]; in_Q10 = x_Q10[ i ]; for( j = 0; j < nStates; j++ ) { @@ -131,7 +131,7 @@ opus_int32 silk_NLSF_del_dec_quant( /* O Returns RD_Q25[ j + nStates ] = silk_SMLABB( silk_MLA( RD_tmp_Q25, silk_SMULBB( diff_Q10, diff_Q10 ), w_Q5[ i ] ), mu_Q20, rate1_Q5 ); } - if( nStates <= ( NLSF_QUANT_DEL_DEC_STATES >> 1 ) ) { + if( nStates <= NLSF_QUANT_DEL_DEC_STATES/2 ) { /* double number of states and copy */ for( j = 0; j < nStates; j++ ) { ind[ j + nStates ][ i ] = ind[ j ][ i ] + 1; @@ -140,7 +140,7 @@ opus_int32 silk_NLSF_del_dec_quant( /* O Returns for( j = nStates; j < NLSF_QUANT_DEL_DEC_STATES; j++ ) { ind[ j ][ i ] = ind[ j - nStates ][ i ]; } - } else if( i > 0 ) { + } else { /* sort lower and upper half of RD_Q25, pairwise */ for( j = 0; j < NLSF_QUANT_DEL_DEC_STATES; j++ ) { if( RD_Q25[ j ] > RD_Q25[ j + NLSF_QUANT_DEL_DEC_STATES ] ) { @@ -191,8 +191,6 @@ opus_int32 silk_NLSF_del_dec_quant( /* O Returns for( j = 0; j < NLSF_QUANT_DEL_DEC_STATES; j++ ) { ind[ j ][ i ] += silk_RSHIFT( ind_sort[ j ], NLSF_QUANT_DEL_DEC_STATES_LOG2 ); } - } else { /* i == 0 */ - break; } } diff --git a/libs/libopus/silk/NLSF_encode.c b/libs/libopus/silk/NLSF_encode.c index f03c3f1c3..01ac7db78 100644 --- a/libs/libopus/silk/NLSF_encode.c +++ b/libs/libopus/silk/NLSF_encode.c @@ -37,9 +37,9 @@ POSSIBILITY OF SUCH DAMAGE. /***********************/ opus_int32 silk_NLSF_encode( /* O Returns RD value in Q25 */ opus_int8 *NLSFIndices, /* I Codebook path vector [ LPC_ORDER + 1 ] */ - opus_int16 *pNLSF_Q15, /* I/O Quantized NLSF vector [ LPC_ORDER ] */ + opus_int16 *pNLSF_Q15, /* I/O (Un)quantized NLSF vector [ LPC_ORDER ] */ const silk_NLSF_CB_struct *psNLSF_CB, /* I Codebook object */ - const opus_int16 *pW_QW, /* I NLSF weight vector [ LPC_ORDER ] */ + const opus_int16 *pW_Q2, /* I NLSF weight vector [ LPC_ORDER ] */ const opus_int NLSF_mu_Q20, /* I Rate weight for the RD optimization */ const opus_int nSurvivors, /* I Max survivors after first stage */ const opus_int signalType /* I Signal type: 0/1/2 */ @@ -47,34 +47,32 @@ opus_int32 silk_NLSF_encode( /* O Returns { opus_int i, s, ind1, bestIndex, prob_Q8, bits_q7; opus_int32 W_tmp_Q9, ret; - VARDECL( opus_int32, err_Q26 ); + VARDECL( opus_int32, err_Q24 ); VARDECL( opus_int32, RD_Q25 ); VARDECL( opus_int, tempIndices1 ); VARDECL( opus_int8, tempIndices2 ); - opus_int16 res_Q15[ MAX_LPC_ORDER ]; opus_int16 res_Q10[ MAX_LPC_ORDER ]; opus_int16 NLSF_tmp_Q15[ MAX_LPC_ORDER ]; - opus_int16 W_tmp_QW[ MAX_LPC_ORDER ]; opus_int16 W_adj_Q5[ MAX_LPC_ORDER ]; opus_uint8 pred_Q8[ MAX_LPC_ORDER ]; opus_int16 ec_ix[ MAX_LPC_ORDER ]; const opus_uint8 *pCB_element, *iCDF_ptr; + const opus_int16 *pCB_Wght_Q9; SAVE_STACK; - silk_assert( nSurvivors <= NLSF_VQ_MAX_SURVIVORS ); - silk_assert( signalType >= 0 && signalType <= 2 ); + celt_assert( signalType >= 0 && signalType <= 2 ); silk_assert( NLSF_mu_Q20 <= 32767 && NLSF_mu_Q20 >= 0 ); /* NLSF stabilization */ silk_NLSF_stabilize( pNLSF_Q15, psNLSF_CB->deltaMin_Q15, psNLSF_CB->order ); /* First stage: VQ */ - ALLOC( err_Q26, psNLSF_CB->nVectors, opus_int32 ); - silk_NLSF_VQ( err_Q26, pNLSF_Q15, psNLSF_CB->CB1_NLSF_Q8, psNLSF_CB->nVectors, psNLSF_CB->order ); + ALLOC( err_Q24, psNLSF_CB->nVectors, opus_int32 ); + silk_NLSF_VQ( err_Q24, pNLSF_Q15, psNLSF_CB->CB1_NLSF_Q8, psNLSF_CB->CB1_Wght_Q9, psNLSF_CB->nVectors, psNLSF_CB->order ); /* Sort the quantization errors */ ALLOC( tempIndices1, nSurvivors, opus_int ); - silk_insertion_sort_increasing( err_Q26, tempIndices1, psNLSF_CB->nVectors, nSurvivors ); + silk_insertion_sort_increasing( err_Q24, tempIndices1, psNLSF_CB->nVectors, nSurvivors ); ALLOC( RD_Q25, nSurvivors, opus_int32 ); ALLOC( tempIndices2, nSurvivors * MAX_LPC_ORDER, opus_int8 ); @@ -85,23 +83,12 @@ opus_int32 silk_NLSF_encode( /* O Returns /* Residual after first stage */ pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ ind1 * psNLSF_CB->order ]; + pCB_Wght_Q9 = &psNLSF_CB->CB1_Wght_Q9[ ind1 * psNLSF_CB->order ]; for( i = 0; i < psNLSF_CB->order; i++ ) { NLSF_tmp_Q15[ i ] = silk_LSHIFT16( (opus_int16)pCB_element[ i ], 7 ); - res_Q15[ i ] = pNLSF_Q15[ i ] - NLSF_tmp_Q15[ i ]; - } - - /* Weights from codebook vector */ - silk_NLSF_VQ_weights_laroia( W_tmp_QW, NLSF_tmp_Q15, psNLSF_CB->order ); - - /* Apply square-rooted weights */ - for( i = 0; i < psNLSF_CB->order; i++ ) { - W_tmp_Q9 = silk_SQRT_APPROX( silk_LSHIFT( (opus_int32)W_tmp_QW[ i ], 18 - NLSF_W_Q ) ); - res_Q10[ i ] = (opus_int16)silk_RSHIFT( silk_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 ); - } - - /* Modify input weights accordingly */ - for( i = 0; i < psNLSF_CB->order; i++ ) { - W_adj_Q5[ i ] = silk_DIV32_16( silk_LSHIFT( (opus_int32)pW_QW[ i ], 5 ), W_tmp_QW[ i ] ); + W_tmp_Q9 = pCB_Wght_Q9[ i ]; + res_Q10[ i ] = (opus_int16)silk_RSHIFT( silk_SMULBB( pNLSF_Q15[ i ] - NLSF_tmp_Q15[ i ], W_tmp_Q9 ), 14 ); + W_adj_Q5[ i ] = silk_DIV32_varQ( (opus_int32)pW_Q2[ i ], silk_SMULBB( W_tmp_Q9, W_tmp_Q9 ), 21 ); } /* Unpack entropy table indices and predictor for current CB1 index */ diff --git a/libs/libopus/silk/NSQ.c b/libs/libopus/silk/NSQ.c index 43e3fee7e..45dd45ce8 100644 --- a/libs/libopus/silk/NSQ.c +++ b/libs/libopus/silk/NSQ.c @@ -37,7 +37,7 @@ POSSIBILITY OF SUCH DAMAGE. static OPUS_INLINE void silk_nsq_scale_states( const silk_encoder_state *psEncC, /* I Encoder State */ silk_nsq_state *NSQ, /* I/O NSQ state */ - const opus_int32 x_Q3[], /* I input in Q3 */ + const opus_int16 x16[], /* I input */ opus_int32 x_sc_Q10[], /* O input scaled with 1/Gain */ const opus_int16 sLTP[], /* I re-whitened LTP state in Q0 */ opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ @@ -75,21 +75,21 @@ static OPUS_INLINE void silk_noise_shape_quantizer( void silk_NSQ_c ( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ) { opus_int k, lag, start_idx, LSF_interpolation_flag; @@ -117,8 +117,7 @@ void silk_NSQ_c LSF_interpolation_flag = 1; } - ALLOC( sLTP_Q15, - psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); + ALLOC( sLTP_Q15, psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); /* Set up pointers to start of sub frame */ @@ -128,7 +127,7 @@ void silk_NSQ_c for( k = 0; k < psEncC->nb_subfr; k++ ) { A_Q12 = &PredCoef_Q12[ (( k >> 1 ) | ( 1 - LSF_interpolation_flag )) * MAX_LPC_ORDER ]; B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; - AR_shp_Q13 = &AR2_Q13[ k * MAX_SHAPE_LPC_ORDER ]; + AR_shp_Q13 = &AR_Q13[ k * MAX_SHAPE_LPC_ORDER ]; /* Noise shape parameters */ silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); @@ -144,7 +143,7 @@ void silk_NSQ_c if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) { /* Rewhiten with new A coefs */ start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; - silk_assert( start_idx > 0 ); + celt_assert( start_idx > 0 ); silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); @@ -154,13 +153,13 @@ void silk_NSQ_c } } - silk_nsq_scale_states( psEncC, NSQ, x_Q3, x_sc_Q10, sLTP, sLTP_Q15, k, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType ); + silk_nsq_scale_states( psEncC, NSQ, x16, x_sc_Q10, sLTP, sLTP_Q15, k, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType ); silk_noise_shape_quantizer( NSQ, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], Gains_Q16[ k ], Lambda_Q10, offset_Q10, psEncC->subfr_length, psEncC->shapingLPCOrder, psEncC->predictLPCOrder, psEncC->arch ); - x_Q3 += psEncC->subfr_length; + x16 += psEncC->subfr_length; pulses += psEncC->subfr_length; pxq += psEncC->subfr_length; } @@ -169,15 +168,14 @@ void silk_NSQ_c NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; /* Save quantized speech and noise shaping signals */ - /* DEBUG_STORE_DATA( enc.pcm, &NSQ->xq[ psEncC->ltp_mem_length ], psEncC->frame_length * sizeof( opus_int16 ) ) */ silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); RESTORE_STACK; } -/***********************************/ -/* silk_noise_shape_quantizer */ -/***********************************/ +/******************************/ +/* silk_noise_shape_quantizer */ +/******************************/ #if !defined(OPUS_X86_MAY_HAVE_SSE4_1) static OPUS_INLINE @@ -249,22 +247,22 @@ void silk_noise_shape_quantizer( } /* Noise shape feedback */ - silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ - n_AR_Q12 = silk_NSQ_noise_shape_feedback_loop(psLPC_Q14, NSQ->sAR2_Q14, AR_shp_Q13, shapingLPCOrder, arch); + celt_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ + n_AR_Q12 = silk_NSQ_noise_shape_feedback_loop(&NSQ->sDiff_shp_Q14, NSQ->sAR2_Q14, AR_shp_Q13, shapingLPCOrder, arch); n_AR_Q12 = silk_SMLAWB( n_AR_Q12, NSQ->sLF_AR_shp_Q14, Tilt_Q14 ); n_LF_Q12 = silk_SMULWB( NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - 1 ], LF_shp_Q14 ); n_LF_Q12 = silk_SMLAWT( n_LF_Q12, NSQ->sLF_AR_shp_Q14, LF_shp_Q14 ); - silk_assert( lag > 0 || signalType != TYPE_VOICED ); + celt_assert( lag > 0 || signalType != TYPE_VOICED ); /* Combine prediction and noise shaping signals */ tmp1 = silk_SUB32( silk_LSHIFT32( LPC_pred_Q10, 2 ), n_AR_Q12 ); /* Q12 */ tmp1 = silk_SUB32( tmp1, n_LF_Q12 ); /* Q12 */ if( lag > 0 ) { /* Symmetric, packed FIR coefficients */ - n_LTP_Q13 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); + n_LTP_Q13 = silk_SMULWB( silk_ADD_SAT32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); n_LTP_Q13 = silk_SMLAWT( n_LTP_Q13, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); n_LTP_Q13 = silk_LSHIFT( n_LTP_Q13, 1 ); shp_lag_ptr++; @@ -279,14 +277,27 @@ void silk_noise_shape_quantizer( r_Q10 = silk_SUB32( x_sc_Q10[ i ], tmp1 ); /* residual error Q10 */ /* Flip sign depending on dither */ - if ( NSQ->rand_seed < 0 ) { - r_Q10 = -r_Q10; + if( NSQ->rand_seed < 0 ) { + r_Q10 = -r_Q10; } r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); /* Find two quantization level candidates and measure their rate-distortion */ q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); + if (Lambda_Q10 > 2048) { + /* For aggressive RDO, the bias becomes more than one pulse. */ + int rdo_offset = Lambda_Q10/2 - 512; + if (q1_Q10 > rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 - rdo_offset, 10 ); + } else if (q1_Q10 < -rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 + rdo_offset, 10 ); + } else if (q1_Q10 < 0) { + q1_Q0 = -1; + } else { + q1_Q0 = 0; + } + } if( q1_Q0 > 0 ) { q1_Q10 = silk_SUB32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); @@ -337,7 +348,8 @@ void silk_noise_shape_quantizer( /* Update states */ psLPC_Q14++; *psLPC_Q14 = xq_Q14; - sLF_AR_shp_Q14 = silk_SUB_LSHIFT32( xq_Q14, n_AR_Q12, 2 ); + NSQ->sDiff_shp_Q14 = silk_SUB_LSHIFT32( xq_Q14, x_sc_Q10[ i ], 4 ); + sLF_AR_shp_Q14 = silk_SUB_LSHIFT32( NSQ->sDiff_shp_Q14, n_AR_Q12, 2 ); NSQ->sLF_AR_shp_Q14 = sLF_AR_shp_Q14; NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx ] = silk_SUB_LSHIFT32( sLF_AR_shp_Q14, n_LF_Q12, 2 ); @@ -356,7 +368,7 @@ void silk_noise_shape_quantizer( static OPUS_INLINE void silk_nsq_scale_states( const silk_encoder_state *psEncC, /* I Encoder State */ silk_nsq_state *NSQ, /* I/O NSQ state */ - const opus_int32 x_Q3[], /* I input in Q3 */ + const opus_int16 x16[], /* I input */ opus_int32 x_sc_Q10[], /* O input scaled with 1/Gain */ const opus_int16 sLTP[], /* I re-whitened LTP state in Q0 */ opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ @@ -368,28 +380,18 @@ static OPUS_INLINE void silk_nsq_scale_states( ) { opus_int i, lag; - opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q23; + opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q26; lag = pitchL[ subfr ]; inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); silk_assert( inv_gain_Q31 != 0 ); - /* Calculate gain adjustment factor */ - if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { - gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); - } else { - gain_adj_Q16 = (opus_int32)1 << 16; - } - /* Scale input */ - inv_gain_Q23 = silk_RSHIFT_ROUND( inv_gain_Q31, 8 ); + inv_gain_Q26 = silk_RSHIFT_ROUND( inv_gain_Q31, 5 ); for( i = 0; i < psEncC->subfr_length; i++ ) { - x_sc_Q10[ i ] = silk_SMULWW( x_Q3[ i ], inv_gain_Q23 ); + x_sc_Q10[ i ] = silk_SMULWW( x16[ i ], inv_gain_Q26 ); } - /* Save inverse gain */ - NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; - /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ if( NSQ->rewhite_flag ) { if( subfr == 0 ) { @@ -403,7 +405,9 @@ static OPUS_INLINE void silk_nsq_scale_states( } /* Adjust for changing gain */ - if( gain_adj_Q16 != (opus_int32)1 << 16 ) { + if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { + gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); + /* Scale long-term shaping state */ for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx; i++ ) { NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); @@ -417,6 +421,7 @@ static OPUS_INLINE void silk_nsq_scale_states( } NSQ->sLF_AR_shp_Q14 = silk_SMULWW( gain_adj_Q16, NSQ->sLF_AR_shp_Q14 ); + NSQ->sDiff_shp_Q14 = silk_SMULWW( gain_adj_Q16, NSQ->sDiff_shp_Q14 ); /* Scale short-term prediction and shaping states */ for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { @@ -425,5 +430,8 @@ static OPUS_INLINE void silk_nsq_scale_states( for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { NSQ->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sAR2_Q14[ i ] ); } + + /* Save inverse gain */ + NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; } } diff --git a/libs/libopus/silk/NSQ_del_dec.c b/libs/libopus/silk/NSQ_del_dec.c index ab6feeac9..41f3fc93e 100644 --- a/libs/libopus/silk/NSQ_del_dec.c +++ b/libs/libopus/silk/NSQ_del_dec.c @@ -43,6 +43,7 @@ typedef struct { opus_int32 Shape_Q14[ DECISION_DELAY ]; opus_int32 sAR2_Q14[ MAX_SHAPE_LPC_ORDER ]; opus_int32 LF_AR_Q14; + opus_int32 Diff_Q14; opus_int32 Seed; opus_int32 SeedInit; opus_int32 RD_Q10; @@ -53,6 +54,7 @@ typedef struct { opus_int32 RD_Q10; opus_int32 xq_Q14; opus_int32 LF_AR_Q14; + opus_int32 Diff_Q14; opus_int32 sLTP_shp_Q14; opus_int32 LPC_exc_Q14; } NSQ_sample_struct; @@ -66,7 +68,7 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( const silk_encoder_state *psEncC, /* I Encoder State */ silk_nsq_state *NSQ, /* I/O NSQ state */ NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ - const opus_int32 x_Q3[], /* I Input in Q3 */ + const opus_int16 x16[], /* I Input */ opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ @@ -107,27 +109,27 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( opus_int predictLPCOrder, /* I Prediction filter order */ opus_int warping_Q16, /* I */ opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ - opus_int *smpl_buf_idx, /* I Index to newest samples in buffers */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ opus_int decisionDelay, /* I */ int arch /* I */ ); void silk_NSQ_del_dec_c( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ) { opus_int i, k, lag, start_idx, LSF_interpolation_flag, Winner_ind, subfr; @@ -159,6 +161,7 @@ void silk_NSQ_del_dec_c( psDD->SeedInit = psDD->Seed; psDD->RD_Q10 = 0; psDD->LF_AR_Q14 = NSQ->sLF_AR_shp_Q14; + psDD->Diff_Q14 = NSQ->sDiff_shp_Q14; psDD->Shape_Q14[ 0 ] = NSQ->sLTP_shp_Q14[ psEncC->ltp_mem_length - 1 ]; silk_memcpy( psDD->sLPC_Q14, NSQ->sLPC_Q14, NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); silk_memcpy( psDD->sAR2_Q14, NSQ->sAR2_Q14, sizeof( NSQ->sAR2_Q14 ) ); @@ -186,8 +189,7 @@ void silk_NSQ_del_dec_c( LSF_interpolation_flag = 1; } - ALLOC( sLTP_Q15, - psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); + ALLOC( sLTP_Q15, psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); ALLOC( delayedGain_Q10, DECISION_DELAY, opus_int32 ); @@ -199,7 +201,7 @@ void silk_NSQ_del_dec_c( for( k = 0; k < psEncC->nb_subfr; k++ ) { A_Q12 = &PredCoef_Q12[ ( ( k >> 1 ) | ( 1 - LSF_interpolation_flag ) ) * MAX_LPC_ORDER ]; B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; - AR_shp_Q13 = &AR2_Q13[ k * MAX_SHAPE_LPC_ORDER ]; + AR_shp_Q13 = &AR_Q13[ k * MAX_SHAPE_LPC_ORDER ]; /* Noise shape parameters */ silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); @@ -235,7 +237,8 @@ void silk_NSQ_del_dec_c( psDD = &psDelDec[ Winner_ind ]; last_smple_idx = smpl_buf_idx + decisionDelay; for( i = 0; i < decisionDelay; i++ ) { - last_smple_idx = ( last_smple_idx - 1 ) & DECISION_DELAY_MASK; + last_smple_idx = ( last_smple_idx - 1 ) % DECISION_DELAY; + if( last_smple_idx < 0 ) last_smple_idx += DECISION_DELAY; pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], Gains_Q16[ 1 ] ), 14 ) ); @@ -247,7 +250,7 @@ void silk_NSQ_del_dec_c( /* Rewhiten with new A coefs */ start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; - silk_assert( start_idx > 0 ); + celt_assert( start_idx > 0 ); silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); @@ -257,7 +260,7 @@ void silk_NSQ_del_dec_c( } } - silk_nsq_del_dec_scale_states( psEncC, NSQ, psDelDec, x_Q3, x_sc_Q10, sLTP, sLTP_Q15, k, + silk_nsq_del_dec_scale_states( psEncC, NSQ, psDelDec, x16, x_sc_Q10, sLTP, sLTP_Q15, k, psEncC->nStatesDelayedDecision, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType, decisionDelay ); silk_noise_shape_quantizer_del_dec( NSQ, psDelDec, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, @@ -265,7 +268,7 @@ void silk_NSQ_del_dec_c( Gains_Q16[ k ], Lambda_Q10, offset_Q10, psEncC->subfr_length, subfr++, psEncC->shapingLPCOrder, psEncC->predictLPCOrder, psEncC->warping_Q16, psEncC->nStatesDelayedDecision, &smpl_buf_idx, decisionDelay, psEncC->arch ); - x_Q3 += psEncC->subfr_length; + x16 += psEncC->subfr_length; pulses += psEncC->subfr_length; pxq += psEncC->subfr_length; } @@ -286,7 +289,9 @@ void silk_NSQ_del_dec_c( last_smple_idx = smpl_buf_idx + decisionDelay; Gain_Q10 = silk_RSHIFT32( Gains_Q16[ psEncC->nb_subfr - 1 ], 6 ); for( i = 0; i < decisionDelay; i++ ) { - last_smple_idx = ( last_smple_idx - 1 ) & DECISION_DELAY_MASK; + last_smple_idx = ( last_smple_idx - 1 ) % DECISION_DELAY; + if( last_smple_idx < 0 ) last_smple_idx += DECISION_DELAY; + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], Gain_Q10 ), 8 ) ); @@ -297,10 +302,10 @@ void silk_NSQ_del_dec_c( /* Update states */ NSQ->sLF_AR_shp_Q14 = psDD->LF_AR_Q14; + NSQ->sDiff_shp_Q14 = psDD->Diff_Q14; NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; /* Save quantized speech signal */ - /* DEBUG_STORE_DATA( enc.pcm, &NSQ->xq[psEncC->ltp_mem_length], psEncC->frame_length * sizeof( opus_int16 ) ) */ silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); RESTORE_STACK; @@ -335,7 +340,7 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( opus_int predictLPCOrder, /* I Prediction filter order */ opus_int warping_Q16, /* I */ opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ - opus_int *smpl_buf_idx, /* I Index to newest samples in buffers */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ opus_int decisionDelay, /* I */ int arch /* I */ ) @@ -356,7 +361,7 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( NSQ_sample_struct *psSS; SAVE_STACK; - silk_assert( nStatesDelayedDecision > 0 ); + celt_assert( nStatesDelayedDecision > 0 ); ALLOC( psSampleState, nStatesDelayedDecision, NSQ_sample_pair ); shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ]; @@ -389,8 +394,8 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( /* Long-term shaping */ if( lag > 0 ) { /* Symmetric, packed FIR coefficients */ - n_LTP_Q14 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); - n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); + n_LTP_Q14 = silk_SMULWB( silk_ADD_SAT32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); + n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); n_LTP_Q14 = silk_SUB_LSHIFT32( LTP_pred_Q14, n_LTP_Q14, 2 ); /* Q12 -> Q14 */ shp_lag_ptr++; } else { @@ -414,9 +419,9 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( LPC_pred_Q14 = silk_LSHIFT( LPC_pred_Q14, 4 ); /* Q10 -> Q14 */ /* Noise shape feedback */ - silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ + celt_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ /* Output of lowpass section */ - tmp2 = silk_SMLAWB( psLPC_Q14[ 0 ], psDD->sAR2_Q14[ 0 ], warping_Q16 ); + tmp2 = silk_SMLAWB( psDD->Diff_Q14, psDD->sAR2_Q14[ 0 ], warping_Q16 ); /* Output of allpass section */ tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 ); psDD->sAR2_Q14[ 0 ] = tmp2; @@ -446,9 +451,9 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( /* Input minus prediction plus noise feedback */ /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP */ - tmp1 = silk_ADD32( n_AR_Q14, n_LF_Q14 ); /* Q14 */ + tmp1 = silk_ADD_SAT32( n_AR_Q14, n_LF_Q14 ); /* Q14 */ tmp2 = silk_ADD32( n_LTP_Q14, LPC_pred_Q14 ); /* Q13 */ - tmp1 = silk_SUB32( tmp2, tmp1 ); /* Q13 */ + tmp1 = silk_SUB_SAT32( tmp2, tmp1 ); /* Q13 */ tmp1 = silk_RSHIFT_ROUND( tmp1, 4 ); /* Q10 */ r_Q10 = silk_SUB32( x_Q10[ i ], tmp1 ); /* residual error Q10 */ @@ -462,6 +467,19 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( /* Find two quantization level candidates and measure their rate-distortion */ q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); + if (Lambda_Q10 > 2048) { + /* For aggressive RDO, the bias becomes more than one pulse. */ + int rdo_offset = Lambda_Q10/2 - 512; + if (q1_Q10 > rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 - rdo_offset, 10 ); + } else if (q1_Q10 < -rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 + rdo_offset, 10 ); + } else if (q1_Q10 < 0) { + q1_Q0 = -1; + } else { + q1_Q0 = 0; + } + } if( q1_Q0 > 0 ) { q1_Q10 = silk_SUB32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); @@ -515,8 +533,9 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); /* Update states */ - sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); - psSS[ 0 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); + psSS[ 0 ].Diff_Q14 = silk_SUB_LSHIFT32( xq_Q14, x_Q10[ i ], 4 ); + sLF_AR_shp_Q14 = silk_SUB32( psSS[ 0 ].Diff_Q14, n_AR_Q14 ); + psSS[ 0 ].sLTP_shp_Q14 = silk_SUB_SAT32( sLF_AR_shp_Q14, n_LF_Q14 ); psSS[ 0 ].LF_AR_Q14 = sLF_AR_shp_Q14; psSS[ 0 ].LPC_exc_Q14 = LPC_exc_Q14; psSS[ 0 ].xq_Q14 = xq_Q14; @@ -529,21 +548,22 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( exc_Q14 = -exc_Q14; } - /* Add predictions */ LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); /* Update states */ - sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); - psSS[ 1 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); + psSS[ 1 ].Diff_Q14 = silk_SUB_LSHIFT32( xq_Q14, x_Q10[ i ], 4 ); + sLF_AR_shp_Q14 = silk_SUB32( psSS[ 1 ].Diff_Q14, n_AR_Q14 ); + psSS[ 1 ].sLTP_shp_Q14 = silk_SUB_SAT32( sLF_AR_shp_Q14, n_LF_Q14 ); psSS[ 1 ].LF_AR_Q14 = sLF_AR_shp_Q14; psSS[ 1 ].LPC_exc_Q14 = LPC_exc_Q14; psSS[ 1 ].xq_Q14 = xq_Q14; } - *smpl_buf_idx = ( *smpl_buf_idx - 1 ) & DECISION_DELAY_MASK; /* Index to newest samples */ - last_smple_idx = ( *smpl_buf_idx + decisionDelay ) & DECISION_DELAY_MASK; /* Index to decisionDelay old samples */ + *smpl_buf_idx = ( *smpl_buf_idx - 1 ) % DECISION_DELAY; + if( *smpl_buf_idx < 0 ) *smpl_buf_idx += DECISION_DELAY; + last_smple_idx = ( *smpl_buf_idx + decisionDelay ) % DECISION_DELAY; /* Find winner */ RDmin_Q10 = psSampleState[ 0 ][ 0 ].RD_Q10; @@ -607,6 +627,7 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( psDD = &psDelDec[ k ]; psSS = &psSampleState[ k ][ 0 ]; psDD->LF_AR_Q14 = psSS->LF_AR_Q14; + psDD->Diff_Q14 = psSS->Diff_Q14; psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH + i ] = psSS->xq_Q14; psDD->Xq_Q14[ *smpl_buf_idx ] = psSS->xq_Q14; psDD->Q_Q10[ *smpl_buf_idx ] = psSS->Q_Q10; @@ -631,7 +652,7 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( const silk_encoder_state *psEncC, /* I Encoder State */ silk_nsq_state *NSQ, /* I/O NSQ state */ NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ - const opus_int32 x_Q3[], /* I Input in Q3 */ + const opus_int16 x16[], /* I Input */ opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ @@ -645,29 +666,19 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( ) { opus_int i, k, lag; - opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q23; + opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q26; NSQ_del_dec_struct *psDD; lag = pitchL[ subfr ]; inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); silk_assert( inv_gain_Q31 != 0 ); - /* Calculate gain adjustment factor */ - if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { - gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); - } else { - gain_adj_Q16 = (opus_int32)1 << 16; - } - /* Scale input */ - inv_gain_Q23 = silk_RSHIFT_ROUND( inv_gain_Q31, 8 ); + inv_gain_Q26 = silk_RSHIFT_ROUND( inv_gain_Q31, 5 ); for( i = 0; i < psEncC->subfr_length; i++ ) { - x_sc_Q10[ i ] = silk_SMULWW( x_Q3[ i ], inv_gain_Q23 ); + x_sc_Q10[ i ] = silk_SMULWW( x16[ i ], inv_gain_Q26 ); } - /* Save inverse gain */ - NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; - /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ if( NSQ->rewhite_flag ) { if( subfr == 0 ) { @@ -681,7 +692,9 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( } /* Adjust for changing gain */ - if( gain_adj_Q16 != (opus_int32)1 << 16 ) { + if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { + gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); + /* Scale long-term shaping state */ for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx; i++ ) { NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); @@ -699,6 +712,7 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( /* Scale scalar states */ psDD->LF_AR_Q14 = silk_SMULWW( gain_adj_Q16, psDD->LF_AR_Q14 ); + psDD->Diff_Q14 = silk_SMULWW( gain_adj_Q16, psDD->Diff_Q14 ); /* Scale short-term prediction and shaping states */ for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { @@ -712,5 +726,8 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( psDD->Shape_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Shape_Q14[ i ] ); } } + + /* Save inverse gain */ + NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; } } diff --git a/libs/libopus/silk/PLC.c b/libs/libopus/silk/PLC.c index fb6ea887b..4667440db 100644 --- a/libs/libopus/silk/PLC.c +++ b/libs/libopus/silk/PLC.c @@ -275,7 +275,7 @@ static OPUS_INLINE void silk_PLC_conceal( /* Reduce random noise for unvoiced frames with high LPC gain */ opus_int32 invGain_Q30, down_scale_Q30; - invGain_Q30 = silk_LPC_inverse_pred_gain( psPLC->prevLPC_Q12, psDec->LPC_order ); + invGain_Q30 = silk_LPC_inverse_pred_gain( psPLC->prevLPC_Q12, psDec->LPC_order, arch ); down_scale_Q30 = silk_min_32( silk_RSHIFT( (opus_int32)1 << 30, LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 ); down_scale_Q30 = silk_max_32( silk_RSHIFT( (opus_int32)1 << 30, LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 ); @@ -291,7 +291,7 @@ static OPUS_INLINE void silk_PLC_conceal( /* Rewhiten LTP state */ idx = psDec->ltp_mem_length - lag - psDec->LPC_order - LTP_ORDER / 2; - silk_assert( idx > 0 ); + celt_assert( idx > 0 ); silk_LPC_analysis_filter( &sLTP[ idx ], &psDec->outBuf[ idx ], A_Q12, psDec->ltp_mem_length - idx, psDec->LPC_order, arch ); /* Scale LTP state */ inv_gain_Q30 = silk_INVERSE32_varQ( psPLC->prevGain_Q16[ 1 ], 46 ); @@ -345,7 +345,7 @@ static OPUS_INLINE void silk_PLC_conceal( /* Copy LPC state */ silk_memcpy( sLPC_Q14_ptr, psDec->sLPC_Q14_buf, MAX_LPC_ORDER * sizeof( opus_int32 ) ); - silk_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */ + celt_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */ for( i = 0; i < psDec->frame_length; i++ ) { /* partly unrolled */ /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ diff --git a/libs/libopus/silk/SigProc_FIX.h b/libs/libopus/silk/SigProc_FIX.h index b63299441..1d9bf2f16 100644 --- a/libs/libopus/silk/SigProc_FIX.h +++ b/libs/libopus/silk/SigProc_FIX.h @@ -35,7 +35,7 @@ extern "C" /*#define silk_MACRO_COUNT */ /* Used to enable WMOPS counting */ -#define SILK_MAX_ORDER_LPC 16 /* max order of the LPC analysis in schur() and k2a() */ +#define SILK_MAX_ORDER_LPC 24 /* max order of the LPC analysis in schur() and k2a() */ #include /* for memset(), memcpy(), memmove() */ #include "typedef.h" @@ -47,6 +47,11 @@ extern "C" #include "x86/SigProc_FIX_sse.h" #endif +#if (defined(OPUS_ARM_ASM) || defined(OPUS_ARM_MAY_HAVE_NEON_INTR)) +#include "arm/biquad_alt_arm.h" +#include "arm/LPC_inv_pred_gain_arm.h" +#endif + /********************************************************************/ /* SIGNAL PROCESSING FUNCTIONS */ /********************************************************************/ @@ -96,14 +101,22 @@ void silk_resampler_down2_3( * slower than biquad() but uses more precise coefficients * can handle (slowly) varying coefficients */ -void silk_biquad_alt( +void silk_biquad_alt_stride1( const opus_int16 *in, /* I input signal */ const opus_int32 *B_Q28, /* I MA coefficients [3] */ const opus_int32 *A_Q28, /* I AR coefficients [2] */ opus_int32 *S, /* I/O State vector [2] */ opus_int16 *out, /* O output signal */ - const opus_int32 len, /* I signal length (must be even) */ - opus_int stride /* I Operate on interleaved signal if > 1 */ + const opus_int32 len /* I signal length (must be even) */ +); + +void silk_biquad_alt_stride2_c( + const opus_int16 *in, /* I input signal */ + const opus_int32 *B_Q28, /* I MA coefficients [3] */ + const opus_int32 *A_Q28, /* I AR coefficients [2] */ + opus_int32 *S, /* I/O State vector [4] */ + opus_int16 *out, /* O output signal */ + const opus_int32 len /* I signal length (must be even) */ ); /* Variable order MA prediction error filter. */ @@ -132,17 +145,11 @@ void silk_bwexpander_32( /* Compute inverse of LPC prediction gain, and */ /* test if LPC coefficients are stable (all poles within unit circle) */ -opus_int32 silk_LPC_inverse_pred_gain( /* O Returns inverse prediction gain in energy domain, Q30 */ +opus_int32 silk_LPC_inverse_pred_gain_c( /* O Returns inverse prediction gain in energy domain, Q30 */ const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */ const opus_int order /* I Prediction order */ ); -/* For input in Q24 domain */ -opus_int32 silk_LPC_inverse_pred_gain_Q24( /* O Returns inverse prediction gain in energy domain, Q30 */ - const opus_int32 *A_Q24, /* I Prediction coefficients [order] */ - const opus_int order /* I Prediction order */ -); - /* Split signal in two decimated bands using first-order allpass filters */ void silk_ana_filt_bank_1( const opus_int16 *in, /* I Input signal [N] */ @@ -152,6 +159,14 @@ void silk_ana_filt_bank_1( const opus_int32 N /* I Number of input samples */ ); +#if !defined(OVERRIDE_silk_biquad_alt_stride2) +#define silk_biquad_alt_stride2(in, B_Q28, A_Q28, S, out, len, arch) ((void)(arch), silk_biquad_alt_stride2_c(in, B_Q28, A_Q28, S, out, len)) +#endif + +#if !defined(OVERRIDE_silk_LPC_inverse_pred_gain) +#define silk_LPC_inverse_pred_gain(A_Q12, order, arch) ((void)(arch), silk_LPC_inverse_pred_gain_c(A_Q12, order)) +#endif + /********************************************************************/ /* SCALAR FUNCTIONS */ /********************************************************************/ @@ -271,7 +286,17 @@ void silk_A2NLSF( void silk_NLSF2A( opus_int16 *a_Q12, /* O monic whitening filter coefficients in Q12, [ d ] */ const opus_int16 *NLSF, /* I normalized line spectral frequencies in Q15, [ d ] */ - const opus_int d /* I filter order (should be even) */ + const opus_int d, /* I filter order (should be even) */ + int arch /* I Run-time architecture */ +); + +/* Convert int32 coefficients to int16 coefs and make sure there's no wrap-around */ +void silk_LPC_fit( + opus_int16 *a_QOUT, /* O Output signal */ + opus_int32 *a_QIN, /* I/O Input signal */ + const opus_int QOUT, /* I Input Q domain */ + const opus_int QIN, /* I Input Q domain */ + const opus_int d /* I Filter order */ ); void silk_insertion_sort_increasing( @@ -356,7 +381,7 @@ opus_int32 silk_inner_prod_aligned_scale( const opus_int len /* I vector lengths */ ); -opus_int64 silk_inner_prod16_aligned_64_c( +opus_int64 silk_inner_prod16_c( const opus_int16 *inVec1, /* I input vector 1 */ const opus_int16 *inVec2, /* I input vector 2 */ const opus_int len /* I vector lengths */ @@ -471,8 +496,7 @@ static OPUS_INLINE opus_int32 silk_ROR32( opus_int32 a32, opus_int rot ) /* Add with saturation for positive input values */ #define silk_ADD_POS_SAT8(a, b) ((((a)+(b)) & 0x80) ? silk_int8_MAX : ((a)+(b))) #define silk_ADD_POS_SAT16(a, b) ((((a)+(b)) & 0x8000) ? silk_int16_MAX : ((a)+(b))) -#define silk_ADD_POS_SAT32(a, b) ((((a)+(b)) & 0x80000000) ? silk_int32_MAX : ((a)+(b))) -#define silk_ADD_POS_SAT64(a, b) ((((a)+(b)) & 0x8000000000000000LL) ? silk_int64_MAX : ((a)+(b))) +#define silk_ADD_POS_SAT32(a, b) ((((opus_uint32)(a)+(opus_uint32)(b)) & 0x80000000) ? silk_int32_MAX : ((a)+(b))) #define silk_LSHIFT8(a, shift) ((opus_int8)((opus_uint8)(a)<<(shift))) /* shift >= 0, shift < 8 */ #define silk_LSHIFT16(a, shift) ((opus_int16)((opus_uint16)(a)<<(shift))) /* shift >= 0, shift < 16 */ @@ -572,7 +596,9 @@ static OPUS_INLINE opus_int64 silk_max_64(opus_int64 a, opus_int64 b) /* Make sure to store the result as the seed for the next call (also in between */ /* frames), otherwise result won't be random at all. When only using some of the */ /* bits, take the most significant bits by right-shifting. */ -#define silk_RAND(seed) (silk_MLA_ovflw(907633515, (seed), 196314165)) +#define RAND_MULTIPLIER 196314165 +#define RAND_INCREMENT 907633515 +#define silk_RAND(seed) (silk_MLA_ovflw((RAND_INCREMENT), (seed), (RAND_MULTIPLIER))) /* Add some multiplication functions that can be easily mapped to ARM. */ @@ -587,8 +613,8 @@ static OPUS_INLINE opus_int64 silk_max_64(opus_int64 a, opus_int64 b) #define silk_burg_modified(res_nrg, res_nrg_Q, A_Q16, x, minInvGain_Q30, subfr_length, nb_subfr, D, arch) \ ((void)(arch), silk_burg_modified_c(res_nrg, res_nrg_Q, A_Q16, x, minInvGain_Q30, subfr_length, nb_subfr, D, arch)) -#define silk_inner_prod16_aligned_64(inVec1, inVec2, len, arch) \ - ((void)(arch),silk_inner_prod16_aligned_64_c(inVec1, inVec2, len)) +#define silk_inner_prod16(inVec1, inVec2, len, arch) \ + ((void)(arch),silk_inner_prod16_c(inVec1, inVec2, len)) #endif #include "Inlines.h" diff --git a/libs/libopus/silk/VAD.c b/libs/libopus/silk/VAD.c index 0a782af2f..d0cda5216 100644 --- a/libs/libopus/silk/VAD.c +++ b/libs/libopus/silk/VAD.c @@ -101,9 +101,9 @@ opus_int silk_VAD_GetSA_Q8_c( /* O Return v /* Safety checks */ silk_assert( VAD_N_BANDS == 4 ); - silk_assert( MAX_FRAME_LENGTH >= psEncC->frame_length ); - silk_assert( psEncC->frame_length <= 512 ); - silk_assert( psEncC->frame_length == 8 * silk_RSHIFT( psEncC->frame_length, 3 ) ); + celt_assert( MAX_FRAME_LENGTH >= psEncC->frame_length ); + celt_assert( psEncC->frame_length <= 512 ); + celt_assert( psEncC->frame_length == 8 * silk_RSHIFT( psEncC->frame_length, 3 ) ); /***********************/ /* Filter and Decimate */ @@ -252,15 +252,14 @@ opus_int silk_VAD_GetSA_Q8_c( /* O Return v speech_nrg += ( b + 1 ) * silk_RSHIFT( Xnrg[ b ] - psSilk_VAD->NL[ b ], 4 ); } + if( psEncC->frame_length == 20 * psEncC->fs_kHz ) { + speech_nrg = silk_RSHIFT32( speech_nrg, 1 ); + } /* Power scaling */ if( speech_nrg <= 0 ) { SA_Q15 = silk_RSHIFT( SA_Q15, 1 ); - } else if( speech_nrg < 32768 ) { - if( psEncC->frame_length == 10 * psEncC->fs_kHz ) { - speech_nrg = silk_LSHIFT_SAT32( speech_nrg, 16 ); - } else { - speech_nrg = silk_LSHIFT_SAT32( speech_nrg, 15 ); - } + } else if( speech_nrg < 16384 ) { + speech_nrg = silk_LSHIFT32( speech_nrg, 16 ); /* square-root */ speech_nrg = silk_SQRT_APPROX( speech_nrg ); @@ -313,6 +312,8 @@ void silk_VAD_GetNoiseLevels( /* Initially faster smoothing */ if( psSilk_VAD->counter < 1000 ) { /* 1000 = 20 sec */ min_coef = silk_DIV32_16( silk_int16_MAX, silk_RSHIFT( psSilk_VAD->counter, 4 ) + 1 ); + /* Increment frame counter */ + psSilk_VAD->counter++; } else { min_coef = 0; } @@ -356,7 +357,4 @@ void silk_VAD_GetNoiseLevels( /* Store as part of state */ psSilk_VAD->NL[ k ] = nl; } - - /* Increment frame counter */ - psSilk_VAD->counter++; } diff --git a/libs/libopus/silk/VQ_WMat_EC.c b/libs/libopus/silk/VQ_WMat_EC.c index 7983f1db8..245a7e4b0 100644 --- a/libs/libopus/silk/VQ_WMat_EC.c +++ b/libs/libopus/silk/VQ_WMat_EC.c @@ -34,84 +34,95 @@ POSSIBILITY OF SUCH DAMAGE. /* Entropy constrained matrix-weighted VQ, hard-coded to 5-element vectors, for a single input data vector */ void silk_VQ_WMat_EC_c( opus_int8 *ind, /* O index of best codebook vector */ - opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */ + opus_int32 *res_nrg_Q15, /* O best residual energy */ + opus_int32 *rate_dist_Q8, /* O best total bitrate */ opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ - const opus_int16 *in_Q14, /* I input vector to be quantized */ - const opus_int32 *W_Q18, /* I weighting matrix */ + const opus_int32 *XX_Q17, /* I correlation matrix */ + const opus_int32 *xX_Q17, /* I correlation vector */ const opus_int8 *cb_Q7, /* I codebook */ const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ - const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */ + const opus_int subfr_len, /* I number of samples per subframe */ const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ - opus_int L /* I number of vectors in codebook */ + const opus_int L /* I number of vectors in codebook */ ) { opus_int k, gain_tmp_Q7; const opus_int8 *cb_row_Q7; - opus_int16 diff_Q14[ 5 ]; - opus_int32 sum1_Q14, sum2_Q16; + opus_int32 neg_xX_Q24[ 5 ]; + opus_int32 sum1_Q15, sum2_Q24; + opus_int32 bits_res_Q8, bits_tot_Q8; + + /* Negate and convert to new Q domain */ + neg_xX_Q24[ 0 ] = -silk_LSHIFT32( xX_Q17[ 0 ], 7 ); + neg_xX_Q24[ 1 ] = -silk_LSHIFT32( xX_Q17[ 1 ], 7 ); + neg_xX_Q24[ 2 ] = -silk_LSHIFT32( xX_Q17[ 2 ], 7 ); + neg_xX_Q24[ 3 ] = -silk_LSHIFT32( xX_Q17[ 3 ], 7 ); + neg_xX_Q24[ 4 ] = -silk_LSHIFT32( xX_Q17[ 4 ], 7 ); /* Loop over codebook */ - *rate_dist_Q14 = silk_int32_MAX; + *rate_dist_Q8 = silk_int32_MAX; + *res_nrg_Q15 = silk_int32_MAX; cb_row_Q7 = cb_Q7; + /* If things go really bad, at least *ind is set to something safe. */ + *ind = 0; for( k = 0; k < L; k++ ) { + opus_int32 penalty; gain_tmp_Q7 = cb_gain_Q7[k]; - - diff_Q14[ 0 ] = in_Q14[ 0 ] - silk_LSHIFT( cb_row_Q7[ 0 ], 7 ); - diff_Q14[ 1 ] = in_Q14[ 1 ] - silk_LSHIFT( cb_row_Q7[ 1 ], 7 ); - diff_Q14[ 2 ] = in_Q14[ 2 ] - silk_LSHIFT( cb_row_Q7[ 2 ], 7 ); - diff_Q14[ 3 ] = in_Q14[ 3 ] - silk_LSHIFT( cb_row_Q7[ 3 ], 7 ); - diff_Q14[ 4 ] = in_Q14[ 4 ] - silk_LSHIFT( cb_row_Q7[ 4 ], 7 ); - /* Weighted rate */ - sum1_Q14 = silk_SMULBB( mu_Q9, cl_Q5[ k ] ); + /* Quantization error: 1 - 2 * xX * cb + cb' * XX * cb */ + sum1_Q15 = SILK_FIX_CONST( 1.001, 15 ); /* Penalty for too large gain */ - sum1_Q14 = silk_ADD_LSHIFT32( sum1_Q14, silk_max( silk_SUB32( gain_tmp_Q7, max_gain_Q7 ), 0 ), 10 ); - - silk_assert( sum1_Q14 >= 0 ); - - /* first row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 1 ], diff_Q14[ 1 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 2 ], diff_Q14[ 2 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 3 ], diff_Q14[ 3 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 4 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 0 ], diff_Q14[ 0 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 0 ] ); - - /* second row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 7 ], diff_Q14[ 2 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 8 ], diff_Q14[ 3 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 9 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 6 ], diff_Q14[ 1 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 1 ] ); - - /* third row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 13 ], diff_Q14[ 3 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 14 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 12 ], diff_Q14[ 2 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 2 ] ); - - /* fourth row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 19 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 18 ], diff_Q14[ 3 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 3 ] ); - - /* last row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 24 ], diff_Q14[ 4 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 4 ] ); - - silk_assert( sum1_Q14 >= 0 ); + penalty = silk_LSHIFT32( silk_max( silk_SUB32( gain_tmp_Q7, max_gain_Q7 ), 0 ), 11 ); + + /* first row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 0 ], XX_Q17[ 1 ], cb_row_Q7[ 1 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 2 ], cb_row_Q7[ 2 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 3 ], cb_row_Q7[ 3 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 4 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 0 ], cb_row_Q7[ 0 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 0 ] ); + + /* second row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 1 ], XX_Q17[ 7 ], cb_row_Q7[ 2 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 8 ], cb_row_Q7[ 3 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 9 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 6 ], cb_row_Q7[ 1 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 1 ] ); + + /* third row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 2 ], XX_Q17[ 13 ], cb_row_Q7[ 3 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 14 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 12 ], cb_row_Q7[ 2 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 2 ] ); + + /* fourth row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 3 ], XX_Q17[ 19 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 18 ], cb_row_Q7[ 3 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 3 ] ); + + /* last row of XX_Q17 */ + sum2_Q24 = silk_LSHIFT32( neg_xX_Q24[ 4 ], 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 24 ], cb_row_Q7[ 4 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 4 ] ); /* find best */ - if( sum1_Q14 < *rate_dist_Q14 ) { - *rate_dist_Q14 = sum1_Q14; - *ind = (opus_int8)k; - *gain_Q7 = gain_tmp_Q7; + if( sum1_Q15 >= 0 ) { + /* Translate residual energy to bits using high-rate assumption (6 dB ==> 1 bit/sample) */ + bits_res_Q8 = silk_SMULBB( subfr_len, silk_lin2log( sum1_Q15 + penalty) - (15 << 7) ); + /* In the following line we reduce the codelength component by half ("-1"); seems to slightly improve quality */ + bits_tot_Q8 = silk_ADD_LSHIFT32( bits_res_Q8, cl_Q5[ k ], 3-1 ); + if( bits_tot_Q8 <= *rate_dist_Q8 ) { + *rate_dist_Q8 = bits_tot_Q8; + *res_nrg_Q15 = sum1_Q15 + penalty; + *ind = (opus_int8)k; + *gain_Q7 = gain_tmp_Q7; + } } /* Go to next cbk vector */ diff --git a/libs/libopus/silk/arm/LPC_inv_pred_gain_arm.h b/libs/libopus/silk/arm/LPC_inv_pred_gain_arm.h new file mode 100644 index 000000000..9895b555c --- /dev/null +++ b/libs/libopus/silk/arm/LPC_inv_pred_gain_arm.h @@ -0,0 +1,57 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifndef SILK_LPC_INV_PRED_GAIN_ARM_H +# define SILK_LPC_INV_PRED_GAIN_ARM_H + +# include "celt/arm/armcpu.h" + +# if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) +opus_int32 silk_LPC_inverse_pred_gain_neon( /* O Returns inverse prediction gain in energy domain, Q30 */ + const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */ + const opus_int order /* I Prediction order */ +); + +# if !defined(OPUS_HAVE_RTCD) && defined(OPUS_ARM_PRESUME_NEON) +# define OVERRIDE_silk_LPC_inverse_pred_gain (1) +# define silk_LPC_inverse_pred_gain(A_Q12, order, arch) ((void)(arch), PRESUME_NEON(silk_LPC_inverse_pred_gain)(A_Q12, order)) +# endif +# endif + +# if !defined(OVERRIDE_silk_LPC_inverse_pred_gain) +/*Is run-time CPU detection enabled on this platform?*/ +# if defined(OPUS_HAVE_RTCD) && (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR)) +extern opus_int32 (*const SILK_LPC_INVERSE_PRED_GAIN_IMPL[OPUS_ARCHMASK+1])(const opus_int16 *A_Q12, const opus_int order); +# define OVERRIDE_silk_LPC_inverse_pred_gain (1) +# define silk_LPC_inverse_pred_gain(A_Q12, order, arch) ((*SILK_LPC_INVERSE_PRED_GAIN_IMPL[(arch)&OPUS_ARCHMASK])(A_Q12, order)) +# elif defined(OPUS_ARM_PRESUME_NEON_INTR) +# define OVERRIDE_silk_LPC_inverse_pred_gain (1) +# define silk_LPC_inverse_pred_gain(A_Q12, order, arch) ((void)(arch), silk_LPC_inverse_pred_gain_neon(A_Q12, order)) +# endif +# endif + +#endif /* end SILK_LPC_INV_PRED_GAIN_ARM_H */ diff --git a/libs/libopus/silk/arm/LPC_inv_pred_gain_neon_intr.c b/libs/libopus/silk/arm/LPC_inv_pred_gain_neon_intr.c new file mode 100644 index 000000000..726e6667b --- /dev/null +++ b/libs/libopus/silk/arm/LPC_inv_pred_gain_neon_intr.c @@ -0,0 +1,288 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include "SigProc_FIX.h" +#include "define.h" + +#define QA 24 +#define A_LIMIT SILK_FIX_CONST( 0.99975, QA ) + +#define MUL32_FRAC_Q(a32, b32, Q) ((opus_int32)(silk_RSHIFT_ROUND64(silk_SMULL(a32, b32), Q))) + +/* The difficulty is how to judge a 64-bit signed integer tmp64 is 32-bit overflowed, + * since NEON has no 64-bit min, max or comparison instructions. + * A failed idea is to compare the results of vmovn(tmp64) and vqmovn(tmp64) whether they are equal or not. + * However, this idea fails when the tmp64 is something like 0xFFFFFFF980000000. + * Here we know that mult2Q >= 1, so the highest bit (bit 63, sign bit) of tmp64 must equal to bit 62. + * tmp64 was shifted left by 1 and we got tmp64'. If high_half(tmp64') != 0 and high_half(tmp64') != -1, + * then we know that bit 31 to bit 63 of tmp64 can not all be the sign bit, and therefore tmp64 is 32-bit overflowed. + * That is, we judge if tmp64' > 0x00000000FFFFFFFF, or tmp64' <= 0xFFFFFFFF00000000. + * We use narrowing shift right 31 bits to tmp32' to save data bandwidth and instructions. + * That is, we judge if tmp32' > 0x00000000, or tmp32' <= 0xFFFFFFFF. + */ + +/* Compute inverse of LPC prediction gain, and */ +/* test if LPC coefficients are stable (all poles within unit circle) */ +static OPUS_INLINE opus_int32 LPC_inverse_pred_gain_QA_neon( /* O Returns inverse prediction gain in energy domain, Q30 */ + opus_int32 A_QA[ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */ + const opus_int order /* I Prediction order */ +) +{ + opus_int k, n, mult2Q; + opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp1, tmp2; + opus_int32 max, min; + int32x4_t max_s32x4, min_s32x4; + int32x2_t max_s32x2, min_s32x2; + + max_s32x4 = vdupq_n_s32( silk_int32_MIN ); + min_s32x4 = vdupq_n_s32( silk_int32_MAX ); + invGain_Q30 = SILK_FIX_CONST( 1, 30 ); + for( k = order - 1; k > 0; k-- ) { + int32x2_t rc_Q31_s32x2, rc_mult2_s32x2; + int64x2_t mult2Q_s64x2; + + /* Check for stability */ + if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) { + return 0; + } + + /* Set RC equal to negated AR coef */ + rc_Q31 = -silk_LSHIFT( A_QA[ k ], 31 - QA ); + + /* rc_mult1_Q30 range: [ 1 : 2^30 ] */ + rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) ); + silk_assert( rc_mult1_Q30 > ( 1 << 15 ) ); /* reduce A_LIMIT if fails */ + silk_assert( rc_mult1_Q30 <= ( 1 << 30 ) ); + + /* Update inverse gain */ + /* invGain_Q30 range: [ 0 : 2^30 ] */ + invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); + silk_assert( invGain_Q30 >= 0 ); + silk_assert( invGain_Q30 <= ( 1 << 30 ) ); + if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) { + return 0; + } + + /* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */ + mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) ); + rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 ); + + /* Update AR coefficient */ + rc_Q31_s32x2 = vdup_n_s32( rc_Q31 ); + mult2Q_s64x2 = vdupq_n_s64( -mult2Q ); + rc_mult2_s32x2 = vdup_n_s32( rc_mult2 ); + + for( n = 0; n < ( ( k + 1 ) >> 1 ) - 3; n += 4 ) { + /* We always calculate extra elements of A_QA buffer when ( k % 4 ) != 0, to take the advantage of SIMD parallelization. */ + int32x4_t tmp1_s32x4, tmp2_s32x4, t0_s32x4, t1_s32x4, s0_s32x4, s1_s32x4, t_QA0_s32x4, t_QA1_s32x4; + int64x2_t t0_s64x2, t1_s64x2, t2_s64x2, t3_s64x2; + tmp1_s32x4 = vld1q_s32( A_QA + n ); + tmp2_s32x4 = vld1q_s32( A_QA + k - n - 4 ); + tmp2_s32x4 = vrev64q_s32( tmp2_s32x4 ); + tmp2_s32x4 = vcombine_s32( vget_high_s32( tmp2_s32x4 ), vget_low_s32( tmp2_s32x4 ) ); + t0_s32x4 = vqrdmulhq_lane_s32( tmp2_s32x4, rc_Q31_s32x2, 0 ); + t1_s32x4 = vqrdmulhq_lane_s32( tmp1_s32x4, rc_Q31_s32x2, 0 ); + t_QA0_s32x4 = vqsubq_s32( tmp1_s32x4, t0_s32x4 ); + t_QA1_s32x4 = vqsubq_s32( tmp2_s32x4, t1_s32x4 ); + t0_s64x2 = vmull_s32( vget_low_s32 ( t_QA0_s32x4 ), rc_mult2_s32x2 ); + t1_s64x2 = vmull_s32( vget_high_s32( t_QA0_s32x4 ), rc_mult2_s32x2 ); + t2_s64x2 = vmull_s32( vget_low_s32 ( t_QA1_s32x4 ), rc_mult2_s32x2 ); + t3_s64x2 = vmull_s32( vget_high_s32( t_QA1_s32x4 ), rc_mult2_s32x2 ); + t0_s64x2 = vrshlq_s64( t0_s64x2, mult2Q_s64x2 ); + t1_s64x2 = vrshlq_s64( t1_s64x2, mult2Q_s64x2 ); + t2_s64x2 = vrshlq_s64( t2_s64x2, mult2Q_s64x2 ); + t3_s64x2 = vrshlq_s64( t3_s64x2, mult2Q_s64x2 ); + t0_s32x4 = vcombine_s32( vmovn_s64( t0_s64x2 ), vmovn_s64( t1_s64x2 ) ); + t1_s32x4 = vcombine_s32( vmovn_s64( t2_s64x2 ), vmovn_s64( t3_s64x2 ) ); + s0_s32x4 = vcombine_s32( vshrn_n_s64( t0_s64x2, 31 ), vshrn_n_s64( t1_s64x2, 31 ) ); + s1_s32x4 = vcombine_s32( vshrn_n_s64( t2_s64x2, 31 ), vshrn_n_s64( t3_s64x2, 31 ) ); + max_s32x4 = vmaxq_s32( max_s32x4, s0_s32x4 ); + min_s32x4 = vminq_s32( min_s32x4, s0_s32x4 ); + max_s32x4 = vmaxq_s32( max_s32x4, s1_s32x4 ); + min_s32x4 = vminq_s32( min_s32x4, s1_s32x4 ); + t1_s32x4 = vrev64q_s32( t1_s32x4 ); + t1_s32x4 = vcombine_s32( vget_high_s32( t1_s32x4 ), vget_low_s32( t1_s32x4 ) ); + vst1q_s32( A_QA + n, t0_s32x4 ); + vst1q_s32( A_QA + k - n - 4, t1_s32x4 ); + } + for( ; n < (k + 1) >> 1; n++ ) { + opus_int64 tmp64; + tmp1 = A_QA[ n ]; + tmp2 = A_QA[ k - n - 1 ]; + tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1, + MUL32_FRAC_Q( tmp2, rc_Q31, 31 ) ), rc_mult2 ), mult2Q); + if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { + return 0; + } + A_QA[ n ] = ( opus_int32 )tmp64; + tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2, + MUL32_FRAC_Q( tmp1, rc_Q31, 31 ) ), rc_mult2), mult2Q); + if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) { + return 0; + } + A_QA[ k - n - 1 ] = ( opus_int32 )tmp64; + } + } + + /* Check for stability */ + if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) { + return 0; + } + + max_s32x2 = vmax_s32( vget_low_s32( max_s32x4 ), vget_high_s32( max_s32x4 ) ); + min_s32x2 = vmin_s32( vget_low_s32( min_s32x4 ), vget_high_s32( min_s32x4 ) ); + max_s32x2 = vmax_s32( max_s32x2, vreinterpret_s32_s64( vshr_n_s64( vreinterpret_s64_s32( max_s32x2 ), 32 ) ) ); + min_s32x2 = vmin_s32( min_s32x2, vreinterpret_s32_s64( vshr_n_s64( vreinterpret_s64_s32( min_s32x2 ), 32 ) ) ); + max = vget_lane_s32( max_s32x2, 0 ); + min = vget_lane_s32( min_s32x2, 0 ); + if( ( max > 0 ) || ( min < -1 ) ) { + return 0; + } + + /* Set RC equal to negated AR coef */ + rc_Q31 = -silk_LSHIFT( A_QA[ 0 ], 31 - QA ); + + /* Range: [ 1 : 2^30 ] */ + rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) ); + + /* Update inverse gain */ + /* Range: [ 0 : 2^30 ] */ + invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 ); + silk_assert( invGain_Q30 >= 0 ); + silk_assert( invGain_Q30 <= ( 1 << 30 ) ); + if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) { + return 0; + } + + return invGain_Q30; +} + +/* For input in Q12 domain */ +opus_int32 silk_LPC_inverse_pred_gain_neon( /* O Returns inverse prediction gain in energy domain, Q30 */ + const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */ + const opus_int order /* I Prediction order */ +) +{ +#ifdef OPUS_CHECK_ASM + const opus_int32 invGain_Q30_c = silk_LPC_inverse_pred_gain_c( A_Q12, order ); +#endif + + opus_int32 invGain_Q30; + if( ( SILK_MAX_ORDER_LPC != 24 ) || ( order & 1 )) { + invGain_Q30 = silk_LPC_inverse_pred_gain_c( A_Q12, order ); + } + else { + opus_int32 Atmp_QA[ SILK_MAX_ORDER_LPC ]; + opus_int32 DC_resp; + int16x8_t t0_s16x8, t1_s16x8, t2_s16x8; + int32x4_t t0_s32x4; + const opus_int leftover = order & 7; + + /* Increase Q domain of the AR coefficients */ + t0_s16x8 = vld1q_s16( A_Q12 + 0 ); + t1_s16x8 = vld1q_s16( A_Q12 + 8 ); + if ( order > 16 ) { + t2_s16x8 = vld1q_s16( A_Q12 + 16 ); + } + t0_s32x4 = vpaddlq_s16( t0_s16x8 ); + + switch( order - leftover ) + { + case 24: + t0_s32x4 = vpadalq_s16( t0_s32x4, t2_s16x8 ); + vst1q_s32( Atmp_QA + 16, vshll_n_s16( vget_low_s16 ( t2_s16x8 ), QA - 12 ) ); + vst1q_s32( Atmp_QA + 20, vshll_n_s16( vget_high_s16( t2_s16x8 ), QA - 12 ) ); + /* FALLTHROUGH */ + + case 16: + t0_s32x4 = vpadalq_s16( t0_s32x4, t1_s16x8 ); + vst1q_s32( Atmp_QA + 8, vshll_n_s16( vget_low_s16 ( t1_s16x8 ), QA - 12 ) ); + vst1q_s32( Atmp_QA + 12, vshll_n_s16( vget_high_s16( t1_s16x8 ), QA - 12 ) ); + /* FALLTHROUGH */ + + case 8: + { + const int32x2_t t_s32x2 = vpadd_s32( vget_low_s32( t0_s32x4 ), vget_high_s32( t0_s32x4 ) ); + const int64x1_t t_s64x1 = vpaddl_s32( t_s32x2 ); + DC_resp = vget_lane_s32( vreinterpret_s32_s64( t_s64x1 ), 0 ); + vst1q_s32( Atmp_QA + 0, vshll_n_s16( vget_low_s16 ( t0_s16x8 ), QA - 12 ) ); + vst1q_s32( Atmp_QA + 4, vshll_n_s16( vget_high_s16( t0_s16x8 ), QA - 12 ) ); + } + break; + + default: + DC_resp = 0; + break; + } + A_Q12 += order - leftover; + + switch( leftover ) + { + case 6: + DC_resp += (opus_int32)A_Q12[ 5 ]; + DC_resp += (opus_int32)A_Q12[ 4 ]; + Atmp_QA[ order - leftover + 5 ] = silk_LSHIFT32( (opus_int32)A_Q12[ 5 ], QA - 12 ); + Atmp_QA[ order - leftover + 4 ] = silk_LSHIFT32( (opus_int32)A_Q12[ 4 ], QA - 12 ); + /* FALLTHROUGH */ + + case 4: + DC_resp += (opus_int32)A_Q12[ 3 ]; + DC_resp += (opus_int32)A_Q12[ 2 ]; + Atmp_QA[ order - leftover + 3 ] = silk_LSHIFT32( (opus_int32)A_Q12[ 3 ], QA - 12 ); + Atmp_QA[ order - leftover + 2 ] = silk_LSHIFT32( (opus_int32)A_Q12[ 2 ], QA - 12 ); + /* FALLTHROUGH */ + + case 2: + DC_resp += (opus_int32)A_Q12[ 1 ]; + DC_resp += (opus_int32)A_Q12[ 0 ]; + Atmp_QA[ order - leftover + 1 ] = silk_LSHIFT32( (opus_int32)A_Q12[ 1 ], QA - 12 ); + Atmp_QA[ order - leftover + 0 ] = silk_LSHIFT32( (opus_int32)A_Q12[ 0 ], QA - 12 ); + /* FALLTHROUGH */ + + default: + break; + } + + /* If the DC is unstable, we don't even need to do the full calculations */ + if( DC_resp >= 4096 ) { + invGain_Q30 = 0; + } else { + invGain_Q30 = LPC_inverse_pred_gain_QA_neon( Atmp_QA, order ); + } + } + +#ifdef OPUS_CHECK_ASM + silk_assert( invGain_Q30_c == invGain_Q30 ); +#endif + + return invGain_Q30; +} diff --git a/libs/libopus/silk/arm/NSQ_del_dec_arm.h b/libs/libopus/silk/arm/NSQ_del_dec_arm.h new file mode 100644 index 000000000..9e76e1692 --- /dev/null +++ b/libs/libopus/silk/arm/NSQ_del_dec_arm.h @@ -0,0 +1,100 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifndef SILK_NSQ_DEL_DEC_ARM_H +#define SILK_NSQ_DEL_DEC_ARM_H + +#include "celt/arm/armcpu.h" + +#if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) +void silk_NSQ_del_dec_neon( + const silk_encoder_state *psEncC, silk_nsq_state *NSQ, + SideInfoIndices *psIndices, const opus_int16 x16[], opus_int8 pulses[], + const opus_int16 PredCoef_Q12[2 * MAX_LPC_ORDER], + const opus_int16 LTPCoef_Q14[LTP_ORDER * MAX_NB_SUBFR], + const opus_int16 AR_Q13[MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER], + const opus_int HarmShapeGain_Q14[MAX_NB_SUBFR], + const opus_int Tilt_Q14[MAX_NB_SUBFR], + const opus_int32 LF_shp_Q14[MAX_NB_SUBFR], + const opus_int32 Gains_Q16[MAX_NB_SUBFR], + const opus_int pitchL[MAX_NB_SUBFR], const opus_int Lambda_Q10, + const opus_int LTP_scale_Q14); + +#if !defined(OPUS_HAVE_RTCD) +#define OVERRIDE_silk_NSQ_del_dec (1) +#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, \ + LTPCoef_Q14, AR_Q13, HarmShapeGain_Q14, Tilt_Q14, \ + LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, \ + LTP_scale_Q14, arch) \ + ((void)(arch), \ + PRESUME_NEON(silk_NSQ_del_dec)( \ + psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, \ + AR_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, \ + Lambda_Q10, LTP_scale_Q14)) +#endif +#endif + +#if !defined(OVERRIDE_silk_NSQ_del_dec) +/*Is run-time CPU detection enabled on this platform?*/ +#if defined(OPUS_HAVE_RTCD) && (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && \ + !defined(OPUS_ARM_PRESUME_NEON_INTR)) +extern void (*const SILK_NSQ_DEL_DEC_IMPL[OPUS_ARCHMASK + 1])( + const silk_encoder_state *psEncC, silk_nsq_state *NSQ, + SideInfoIndices *psIndices, const opus_int16 x16[], opus_int8 pulses[], + const opus_int16 PredCoef_Q12[2 * MAX_LPC_ORDER], + const opus_int16 LTPCoef_Q14[LTP_ORDER * MAX_NB_SUBFR], + const opus_int16 AR_Q13[MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER], + const opus_int HarmShapeGain_Q14[MAX_NB_SUBFR], + const opus_int Tilt_Q14[MAX_NB_SUBFR], + const opus_int32 LF_shp_Q14[MAX_NB_SUBFR], + const opus_int32 Gains_Q16[MAX_NB_SUBFR], + const opus_int pitchL[MAX_NB_SUBFR], const opus_int Lambda_Q10, + const opus_int LTP_scale_Q14); +#define OVERRIDE_silk_NSQ_del_dec (1) +#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, \ + LTPCoef_Q14, AR_Q13, HarmShapeGain_Q14, Tilt_Q14, \ + LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, \ + LTP_scale_Q14, arch) \ + ((*SILK_NSQ_DEL_DEC_IMPL[(arch)&OPUS_ARCHMASK])( \ + psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, \ + AR_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, \ + Lambda_Q10, LTP_scale_Q14)) +#elif defined(OPUS_ARM_PRESUME_NEON_INTR) +#define OVERRIDE_silk_NSQ_del_dec (1) +#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, \ + LTPCoef_Q14, AR_Q13, HarmShapeGain_Q14, Tilt_Q14, \ + LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, \ + LTP_scale_Q14, arch) \ + ((void)(arch), \ + silk_NSQ_del_dec_neon(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, \ + LTPCoef_Q14, AR_Q13, HarmShapeGain_Q14, Tilt_Q14, \ + LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, \ + LTP_scale_Q14)) +#endif +#endif + +#endif /* end SILK_NSQ_DEL_DEC_ARM_H */ diff --git a/libs/libopus/silk/arm/NSQ_del_dec_neon_intr.c b/libs/libopus/silk/arm/NSQ_del_dec_neon_intr.c new file mode 100644 index 000000000..212410f36 --- /dev/null +++ b/libs/libopus/silk/arm/NSQ_del_dec_neon_intr.c @@ -0,0 +1,1124 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#ifdef OPUS_CHECK_ASM +# include +#endif +#include "main.h" +#include "stack_alloc.h" + +/* NEON intrinsics optimization now can only parallelize up to 4 delay decision states. */ +/* If there are more states, C function is called, and this optimization must be expanded. */ +#define NEON_MAX_DEL_DEC_STATES 4 + +typedef struct { + opus_int32 sLPC_Q14[ MAX_SUB_FRAME_LENGTH + NSQ_LPC_BUF_LENGTH ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 RandState[ DECISION_DELAY ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Q_Q10[ DECISION_DELAY ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Xq_Q14[ DECISION_DELAY ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Pred_Q15[ DECISION_DELAY ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Shape_Q14[ DECISION_DELAY ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 sAR2_Q14[ MAX_SHAPE_LPC_ORDER ][ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 LF_AR_Q14[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Diff_Q14[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Seed[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 SeedInit[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 RD_Q10[ NEON_MAX_DEL_DEC_STATES ]; +} NSQ_del_decs_struct; + +typedef struct { + opus_int32 Q_Q10[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 RD_Q10[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 xq_Q14[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 LF_AR_Q14[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 Diff_Q14[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 sLTP_shp_Q14[ NEON_MAX_DEL_DEC_STATES ]; + opus_int32 LPC_exc_Q14[ NEON_MAX_DEL_DEC_STATES ]; +} NSQ_samples_struct; + +static OPUS_INLINE void silk_nsq_del_dec_scale_states_neon( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_decs_struct psDelDec[], /* I/O Delayed decision states */ + const opus_int16 x16[], /* I Input */ + opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ + const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ + opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ + opus_int subfr, /* I Subframe number */ + const opus_int LTP_scale_Q14, /* I LTP state scaling */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ + const opus_int signal_type, /* I Signal type */ + const opus_int decisionDelay /* I Decision delay */ +); + +/******************************************/ +/* Noise shape quantizer for one subframe */ +/******************************************/ +static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_neon( + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_decs_struct psDelDec[], /* I/O Delayed decision states */ + opus_int signalType, /* I Signal type */ + const opus_int32 x_Q10[], /* I */ + opus_int8 pulses[], /* O */ + opus_int16 xq[], /* O */ + opus_int32 sLTP_Q15[], /* I/O LTP filter state */ + opus_int32 delayedGain_Q10[], /* I/O Gain delay buffer */ + const opus_int16 a_Q12[], /* I Short term prediction coefs */ + const opus_int16 b_Q14[], /* I Long term prediction coefs */ + const opus_int16 AR_shp_Q13[], /* I Noise shaping coefs */ + opus_int lag, /* I Pitch lag */ + opus_int32 HarmShapeFIRPacked_Q14, /* I */ + opus_int Tilt_Q14, /* I Spectral tilt */ + opus_int32 LF_shp_Q14, /* I */ + opus_int32 Gain_Q16, /* I */ + opus_int Lambda_Q10, /* I */ + opus_int offset_Q10, /* I */ + opus_int length, /* I Input length */ + opus_int subfr, /* I Subframe number */ + opus_int shapingLPCOrder, /* I Shaping LPC filter order */ + opus_int predictLPCOrder, /* I Prediction filter order */ + opus_int warping_Q16, /* I */ + opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ + opus_int decisionDelay /* I */ +); + +static OPUS_INLINE void copy_winner_state_kernel( + const NSQ_del_decs_struct *psDelDec, + const opus_int offset, + const opus_int last_smple_idx, + const opus_int Winner_ind, + const int32x2_t gain_lo_s32x2, + const int32x2_t gain_hi_s32x2, + const int32x4_t shift_s32x4, + int32x4_t t0_s32x4, + int32x4_t t1_s32x4, + opus_int8 *const pulses, + opus_int16 *pxq, + silk_nsq_state *NSQ +) +{ + int16x8_t t_s16x8; + int32x4_t o0_s32x4, o1_s32x4; + + t0_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 0 ][ Winner_ind ], t0_s32x4, 0 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 1 ][ Winner_ind ], t0_s32x4, 1 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 2 ][ Winner_ind ], t0_s32x4, 2 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 3 ][ Winner_ind ], t0_s32x4, 3 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 4 ][ Winner_ind ], t1_s32x4, 0 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 5 ][ Winner_ind ], t1_s32x4, 1 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 6 ][ Winner_ind ], t1_s32x4, 2 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Q_Q10[ last_smple_idx - 7 ][ Winner_ind ], t1_s32x4, 3 ); + t_s16x8 = vcombine_s16( vrshrn_n_s32( t0_s32x4, 10 ), vrshrn_n_s32( t1_s32x4, 10 ) ); + vst1_s8( &pulses[ offset ], vmovn_s16( t_s16x8 ) ); + + t0_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 0 ][ Winner_ind ], t0_s32x4, 0 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 1 ][ Winner_ind ], t0_s32x4, 1 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 2 ][ Winner_ind ], t0_s32x4, 2 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 3 ][ Winner_ind ], t0_s32x4, 3 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 4 ][ Winner_ind ], t1_s32x4, 0 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 5 ][ Winner_ind ], t1_s32x4, 1 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 6 ][ Winner_ind ], t1_s32x4, 2 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Xq_Q14[ last_smple_idx - 7 ][ Winner_ind ], t1_s32x4, 3 ); + o0_s32x4 = vqdmulhq_lane_s32( t0_s32x4, gain_lo_s32x2, 0 ); + o1_s32x4 = vqdmulhq_lane_s32( t1_s32x4, gain_lo_s32x2, 0 ); + o0_s32x4 = vmlaq_lane_s32( o0_s32x4, t0_s32x4, gain_hi_s32x2, 0 ); + o1_s32x4 = vmlaq_lane_s32( o1_s32x4, t1_s32x4, gain_hi_s32x2, 0 ); + o0_s32x4 = vrshlq_s32( o0_s32x4, shift_s32x4 ); + o1_s32x4 = vrshlq_s32( o1_s32x4, shift_s32x4 ); + vst1_s16( &pxq[ offset + 0 ], vqmovn_s32( o0_s32x4 ) ); + vst1_s16( &pxq[ offset + 4 ], vqmovn_s32( o1_s32x4 ) ); + + t0_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 0 ][ Winner_ind ], t0_s32x4, 0 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 1 ][ Winner_ind ], t0_s32x4, 1 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 2 ][ Winner_ind ], t0_s32x4, 2 ); + t0_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 3 ][ Winner_ind ], t0_s32x4, 3 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 4 ][ Winner_ind ], t1_s32x4, 0 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 5 ][ Winner_ind ], t1_s32x4, 1 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 6 ][ Winner_ind ], t1_s32x4, 2 ); + t1_s32x4 = vld1q_lane_s32( &psDelDec->Shape_Q14[ last_smple_idx - 7 ][ Winner_ind ], t1_s32x4, 3 ); + vst1q_s32( &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx + offset + 0 ], t0_s32x4 ); + vst1q_s32( &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx + offset + 4 ], t1_s32x4 ); +} + +static OPUS_INLINE void copy_winner_state( + const NSQ_del_decs_struct *psDelDec, + const opus_int decisionDelay, + const opus_int smpl_buf_idx, + const opus_int Winner_ind, + const opus_int32 gain, + const opus_int32 shift, + opus_int8 *const pulses, + opus_int16 *pxq, + silk_nsq_state *NSQ +) +{ + opus_int i, last_smple_idx; + const int32x2_t gain_lo_s32x2 = vdup_n_s32( silk_LSHIFT32( gain & 0x0000FFFF, 15 ) ); + const int32x2_t gain_hi_s32x2 = vdup_n_s32( gain >> 16 ); + const int32x4_t shift_s32x4 = vdupq_n_s32( -shift ); + int32x4_t t0_s32x4, t1_s32x4; + + t0_s32x4 = t1_s32x4 = vdupq_n_s32( 0 ); /* initialization */ + last_smple_idx = smpl_buf_idx + decisionDelay - 1 + DECISION_DELAY; + if( last_smple_idx >= DECISION_DELAY ) last_smple_idx -= DECISION_DELAY; + if( last_smple_idx >= DECISION_DELAY ) last_smple_idx -= DECISION_DELAY; + + for( i = 0; ( i < ( decisionDelay - 7 ) ) && ( last_smple_idx >= 7 ); i += 8, last_smple_idx -= 8 ) { + copy_winner_state_kernel( psDelDec, i - decisionDelay, last_smple_idx, Winner_ind, gain_lo_s32x2, gain_hi_s32x2, shift_s32x4, t0_s32x4, t1_s32x4, pulses, pxq, NSQ ); + } + for( ; ( i < decisionDelay ) && ( last_smple_idx >= 0 ); i++, last_smple_idx-- ) { + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDelDec->Q_Q10[ last_smple_idx ][ Winner_ind ], 10 ); + pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psDelDec->Xq_Q14[ last_smple_idx ][ Winner_ind ], gain ), shift ) ); + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDelDec->Shape_Q14[ last_smple_idx ][ Winner_ind ]; + } + + last_smple_idx += DECISION_DELAY; + for( ; i < ( decisionDelay - 7 ); i++, last_smple_idx-- ) { + copy_winner_state_kernel( psDelDec, i - decisionDelay, last_smple_idx, Winner_ind, gain_lo_s32x2, gain_hi_s32x2, shift_s32x4, t0_s32x4, t1_s32x4, pulses, pxq, NSQ ); + } + for( ; i < decisionDelay; i++, last_smple_idx-- ) { + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDelDec->Q_Q10[ last_smple_idx ][ Winner_ind ], 10 ); + pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psDelDec->Xq_Q14[ last_smple_idx ][ Winner_ind ], gain ), shift ) ); + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDelDec->Shape_Q14[ last_smple_idx ][ Winner_ind ]; + } +} + +void silk_NSQ_del_dec_neon( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ +) +{ +#ifdef OPUS_CHECK_ASM + silk_nsq_state NSQ_c; + SideInfoIndices psIndices_c; + opus_int8 pulses_c[ MAX_FRAME_LENGTH ]; + const opus_int8 *const pulses_a = pulses; + + ( void )pulses_a; + silk_memcpy( &NSQ_c, NSQ, sizeof( NSQ_c ) ); + silk_memcpy( &psIndices_c, psIndices, sizeof( psIndices_c ) ); + silk_memcpy( pulses_c, pulses, sizeof( pulses_c ) ); + silk_NSQ_del_dec_c( psEncC, &NSQ_c, &psIndices_c, x16, pulses_c, PredCoef_Q12, LTPCoef_Q14, AR_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, + pitchL, Lambda_Q10, LTP_scale_Q14 ); +#endif + + /* The optimization parallelizes the different delay decision states. */ + if(( psEncC->nStatesDelayedDecision > NEON_MAX_DEL_DEC_STATES ) || ( psEncC->nStatesDelayedDecision <= 2 )) { + /* NEON intrinsics optimization now can only parallelize up to 4 delay decision states. */ + /* If there are more states, C function is called, and this optimization must be expanded. */ + /* When the number of delay decision states is less than 3, there are penalties using this */ + /* optimization, and C function is called. */ + /* When the number of delay decision states is 2, it's better to specialize another */ + /* structure NSQ_del_dec2_struct and optimize with shorter NEON registers. (Low priority) */ + silk_NSQ_del_dec_c( psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, HarmShapeGain_Q14, + Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14 ); + } else { + opus_int i, k, lag, start_idx, LSF_interpolation_flag, Winner_ind, subfr; + opus_int smpl_buf_idx, decisionDelay; + const opus_int16 *A_Q12, *B_Q14, *AR_shp_Q13; + opus_int16 *pxq; + VARDECL( opus_int32, sLTP_Q15 ); + VARDECL( opus_int16, sLTP ); + opus_int32 HarmShapeFIRPacked_Q14; + opus_int offset_Q10; + opus_int32 RDmin_Q10, Gain_Q10; + VARDECL( opus_int32, x_sc_Q10 ); + VARDECL( opus_int32, delayedGain_Q10 ); + VARDECL( NSQ_del_decs_struct, psDelDec ); + int32x4_t t_s32x4; + SAVE_STACK; + + /* Set unvoiced lag to the previous one, overwrite later for voiced */ + lag = NSQ->lagPrev; + + silk_assert( NSQ->prev_gain_Q16 != 0 ); + + /* Initialize delayed decision states */ + ALLOC( psDelDec, 1, NSQ_del_decs_struct ); + /* Only RandState and RD_Q10 need to be initialized to 0. */ + silk_memset( psDelDec->RandState, 0, sizeof( psDelDec->RandState ) ); + vst1q_s32( psDelDec->RD_Q10, vdupq_n_s32( 0 ) ); + + for( k = 0; k < psEncC->nStatesDelayedDecision; k++ ) { + psDelDec->SeedInit[ k ] = psDelDec->Seed[ k ] = ( k + psIndices->Seed ) & 3; + } + vst1q_s32( psDelDec->LF_AR_Q14, vld1q_dup_s32( &NSQ->sLF_AR_shp_Q14 ) ); + vst1q_s32( psDelDec->Diff_Q14, vld1q_dup_s32( &NSQ->sDiff_shp_Q14 ) ); + vst1q_s32( psDelDec->Shape_Q14[ 0 ], vld1q_dup_s32( &NSQ->sLTP_shp_Q14[ psEncC->ltp_mem_length - 1 ] ) ); + for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { + vst1q_s32( psDelDec->sLPC_Q14[ i ], vld1q_dup_s32( &NSQ->sLPC_Q14[ i ] ) ); + } + for( i = 0; i < (opus_int)( sizeof( NSQ->sAR2_Q14 ) / sizeof( NSQ->sAR2_Q14[ 0 ] ) ); i++ ) { + vst1q_s32( psDelDec->sAR2_Q14[ i ], vld1q_dup_s32( &NSQ->sAR2_Q14[ i ] ) ); + } + + offset_Q10 = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ]; + smpl_buf_idx = 0; /* index of oldest samples */ + + decisionDelay = silk_min_int( DECISION_DELAY, psEncC->subfr_length ); + + /* For voiced frames limit the decision delay to lower than the pitch lag */ + if( psIndices->signalType == TYPE_VOICED ) { + opus_int pitch_min = pitchL[ 0 ]; + for( k = 1; k < psEncC->nb_subfr; k++ ) { + pitch_min = silk_min_int( pitch_min, pitchL[ k ] ); + } + decisionDelay = silk_min_int( decisionDelay, pitch_min - LTP_ORDER / 2 - 1 ); + } else { + if( lag > 0 ) { + decisionDelay = silk_min_int( decisionDelay, lag - LTP_ORDER / 2 - 1 ); + } + } + + if( psIndices->NLSFInterpCoef_Q2 == 4 ) { + LSF_interpolation_flag = 0; + } else { + LSF_interpolation_flag = 1; + } + + ALLOC( sLTP_Q15, psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); + ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); + ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); + ALLOC( delayedGain_Q10, DECISION_DELAY, opus_int32 ); + /* Set up pointers to start of sub frame */ + pxq = &NSQ->xq[ psEncC->ltp_mem_length ]; + NSQ->sLTP_shp_buf_idx = psEncC->ltp_mem_length; + NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; + subfr = 0; + for( k = 0; k < psEncC->nb_subfr; k++ ) { + A_Q12 = &PredCoef_Q12[ ( ( k >> 1 ) | ( 1 - LSF_interpolation_flag ) ) * MAX_LPC_ORDER ]; + B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; + AR_shp_Q13 = &AR_Q13[ k * MAX_SHAPE_LPC_ORDER ]; + + /* Noise shape parameters */ + silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); + HarmShapeFIRPacked_Q14 = silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 ); + HarmShapeFIRPacked_Q14 |= silk_LSHIFT( (opus_int32)silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 ); + + NSQ->rewhite_flag = 0; + if( psIndices->signalType == TYPE_VOICED ) { + /* Voiced */ + lag = pitchL[ k ]; + + /* Re-whitening */ + if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) { + if( k == 2 ) { + /* RESET DELAYED DECISIONS */ + /* Find winner */ + int32x4_t RD_Q10_s32x4; + RDmin_Q10 = psDelDec->RD_Q10[ 0 ]; + Winner_ind = 0; + for( i = 1; i < psEncC->nStatesDelayedDecision; i++ ) { + if( psDelDec->RD_Q10[ i ] < RDmin_Q10 ) { + RDmin_Q10 = psDelDec->RD_Q10[ i ]; + Winner_ind = i; + } + } + psDelDec->RD_Q10[ Winner_ind ] -= ( silk_int32_MAX >> 4 ); + RD_Q10_s32x4 = vld1q_s32( psDelDec->RD_Q10 ); + RD_Q10_s32x4 = vaddq_s32( RD_Q10_s32x4, vdupq_n_s32( silk_int32_MAX >> 4 ) ); + vst1q_s32( psDelDec->RD_Q10, RD_Q10_s32x4 ); + + /* Copy final part of signals from winner state to output and long-term filter states */ + copy_winner_state( psDelDec, decisionDelay, smpl_buf_idx, Winner_ind, Gains_Q16[ 1 ], 14, pulses, pxq, NSQ ); + + subfr = 0; + } + + /* Rewhiten with new A coefs */ + start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; + silk_assert( start_idx > 0 ); + + silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], + A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); + + NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; + NSQ->rewhite_flag = 1; + } + } + + silk_nsq_del_dec_scale_states_neon( psEncC, NSQ, psDelDec, x16, x_sc_Q10, sLTP, sLTP_Q15, k, + LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType, decisionDelay ); + + silk_noise_shape_quantizer_del_dec_neon( NSQ, psDelDec, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, + delayedGain_Q10, A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], + Gains_Q16[ k ], Lambda_Q10, offset_Q10, psEncC->subfr_length, subfr++, psEncC->shapingLPCOrder, + psEncC->predictLPCOrder, psEncC->warping_Q16, psEncC->nStatesDelayedDecision, &smpl_buf_idx, decisionDelay ); + + x16 += psEncC->subfr_length; + pulses += psEncC->subfr_length; + pxq += psEncC->subfr_length; + } + + /* Find winner */ + RDmin_Q10 = psDelDec->RD_Q10[ 0 ]; + Winner_ind = 0; + for( k = 1; k < psEncC->nStatesDelayedDecision; k++ ) { + if( psDelDec->RD_Q10[ k ] < RDmin_Q10 ) { + RDmin_Q10 = psDelDec->RD_Q10[ k ]; + Winner_ind = k; + } + } + + /* Copy final part of signals from winner state to output and long-term filter states */ + psIndices->Seed = psDelDec->SeedInit[ Winner_ind ]; + Gain_Q10 = silk_RSHIFT32( Gains_Q16[ psEncC->nb_subfr - 1 ], 6 ); + copy_winner_state( psDelDec, decisionDelay, smpl_buf_idx, Winner_ind, Gain_Q10, 8, pulses, pxq, NSQ ); + + t_s32x4 = vdupq_n_s32( 0 ); /* initialization */ + for( i = 0; i < ( NSQ_LPC_BUF_LENGTH - 3 ); i += 4 ) { + t_s32x4 = vld1q_lane_s32( &psDelDec->sLPC_Q14[ i + 0 ][ Winner_ind ], t_s32x4, 0 ); + t_s32x4 = vld1q_lane_s32( &psDelDec->sLPC_Q14[ i + 1 ][ Winner_ind ], t_s32x4, 1 ); + t_s32x4 = vld1q_lane_s32( &psDelDec->sLPC_Q14[ i + 2 ][ Winner_ind ], t_s32x4, 2 ); + t_s32x4 = vld1q_lane_s32( &psDelDec->sLPC_Q14[ i + 3 ][ Winner_ind ], t_s32x4, 3 ); + vst1q_s32( &NSQ->sLPC_Q14[ i ], t_s32x4 ); + } + + for( ; i < NSQ_LPC_BUF_LENGTH; i++ ) { + NSQ->sLPC_Q14[ i ] = psDelDec->sLPC_Q14[ i ][ Winner_ind ]; + } + + for( i = 0; i < (opus_int)( sizeof( NSQ->sAR2_Q14 ) / sizeof( NSQ->sAR2_Q14[ 0 ] ) - 3 ); i += 4 ) { + t_s32x4 = vld1q_lane_s32( &psDelDec->sAR2_Q14[ i + 0 ][ Winner_ind ], t_s32x4, 0 ); + t_s32x4 = vld1q_lane_s32( &psDelDec->sAR2_Q14[ i + 1 ][ Winner_ind ], t_s32x4, 1 ); + t_s32x4 = vld1q_lane_s32( &psDelDec->sAR2_Q14[ i + 2 ][ Winner_ind ], t_s32x4, 2 ); + t_s32x4 = vld1q_lane_s32( &psDelDec->sAR2_Q14[ i + 3 ][ Winner_ind ], t_s32x4, 3 ); + vst1q_s32( &NSQ->sAR2_Q14[ i ], t_s32x4 ); + } + + for( ; i < (opus_int)( sizeof( NSQ->sAR2_Q14 ) / sizeof( NSQ->sAR2_Q14[ 0 ] ) ); i++ ) { + NSQ->sAR2_Q14[ i ] = psDelDec->sAR2_Q14[ i ][ Winner_ind ]; + } + + /* Update states */ + NSQ->sLF_AR_shp_Q14 = psDelDec->LF_AR_Q14[ Winner_ind ]; + NSQ->sDiff_shp_Q14 = psDelDec->Diff_Q14[ Winner_ind ]; + NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; + + /* Save quantized speech signal */ + silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); + silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); + RESTORE_STACK; + } + +#ifdef OPUS_CHECK_ASM + silk_assert( !memcmp( &NSQ_c, NSQ, sizeof( NSQ_c ) ) ); + silk_assert( !memcmp( &psIndices_c, psIndices, sizeof( psIndices_c ) ) ); + silk_assert( !memcmp( pulses_c, pulses_a, sizeof( pulses_c ) ) ); +#endif +} + +/******************************************/ +/* Noise shape quantizer for one subframe */ +/******************************************/ +/* Note: Function silk_short_prediction_create_arch_coef_neon() defined in NSQ_neon.h is actually a hacking C function. */ +/* Therefore here we append "_local" to the NEON function name to avoid confusion. */ +static OPUS_INLINE void silk_short_prediction_create_arch_coef_neon_local(opus_int32 *out, const opus_int16 *in, opus_int order) +{ + int16x8_t t_s16x8; + int32x4_t t0_s32x4, t1_s32x4, t2_s32x4, t3_s32x4; + silk_assert( order == 10 || order == 16 ); + + t_s16x8 = vld1q_s16( in + 0 ); /* 7 6 5 4 3 2 1 0 */ + t_s16x8 = vrev64q_s16( t_s16x8 ); /* 4 5 6 7 0 1 2 3 */ + t2_s32x4 = vshll_n_s16( vget_high_s16( t_s16x8 ), 15 ); /* 4 5 6 7 */ + t3_s32x4 = vshll_n_s16( vget_low_s16( t_s16x8 ), 15 ); /* 0 1 2 3 */ + + if( order == 16 ) { + t_s16x8 = vld1q_s16( in + 8 ); /* F E D C B A 9 8 */ + t_s16x8 = vrev64q_s16( t_s16x8 ); /* C D E F 8 9 A B */ + t0_s32x4 = vshll_n_s16( vget_high_s16( t_s16x8 ), 15 ); /* C D E F */ + t1_s32x4 = vshll_n_s16( vget_low_s16( t_s16x8 ), 15 ); /* 8 9 A B */ + } else { + int16x4_t t_s16x4; + + t0_s32x4 = vdupq_n_s32( 0 ); /* zero zero zero zero */ + t_s16x4 = vld1_s16( in + 6 ); /* 9 8 7 6 */ + t_s16x4 = vrev64_s16( t_s16x4 ); /* 6 7 8 9 */ + t1_s32x4 = vshll_n_s16( t_s16x4, 15 ); + t1_s32x4 = vcombine_s32( vget_low_s32(t0_s32x4), vget_low_s32( t1_s32x4 ) ); /* 8 9 zero zero */ + } + vst1q_s32( out + 0, t0_s32x4 ); + vst1q_s32( out + 4, t1_s32x4 ); + vst1q_s32( out + 8, t2_s32x4 ); + vst1q_s32( out + 12, t3_s32x4 ); +} + +static OPUS_INLINE int32x4_t silk_SMLAWB_lane0_neon( + const int32x4_t out_s32x4, + const int32x4_t in_s32x4, + const int32x2_t coef_s32x2 +) +{ + return vaddq_s32( out_s32x4, vqdmulhq_lane_s32( in_s32x4, coef_s32x2, 0 ) ); +} + +static OPUS_INLINE int32x4_t silk_SMLAWB_lane1_neon( + const int32x4_t out_s32x4, + const int32x4_t in_s32x4, + const int32x2_t coef_s32x2 +) +{ + return vaddq_s32( out_s32x4, vqdmulhq_lane_s32( in_s32x4, coef_s32x2, 1 ) ); +} + +/* Note: This function has different return value than silk_noise_shape_quantizer_short_prediction_neon(). */ +/* Therefore here we append "_local" to the function name to avoid confusion. */ +static OPUS_INLINE int32x4_t silk_noise_shape_quantizer_short_prediction_neon_local(const opus_int32 *buf32, const opus_int32 *a_Q12_arch, opus_int order) +{ + const int32x4_t a_Q12_arch0_s32x4 = vld1q_s32( a_Q12_arch + 0 ); + const int32x4_t a_Q12_arch1_s32x4 = vld1q_s32( a_Q12_arch + 4 ); + const int32x4_t a_Q12_arch2_s32x4 = vld1q_s32( a_Q12_arch + 8 ); + const int32x4_t a_Q12_arch3_s32x4 = vld1q_s32( a_Q12_arch + 12 ); + int32x4_t LPC_pred_Q14_s32x4; + + silk_assert( order == 10 || order == 16 ); + /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ + LPC_pred_Q14_s32x4 = vdupq_n_s32( silk_RSHIFT( order, 1 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 0 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch0_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 1 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch0_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 2 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch0_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 3 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch0_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 4 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch1_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 5 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch1_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 6 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch1_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 7 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch1_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 8 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch2_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 9 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch2_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 10 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch2_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 11 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch2_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 12 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch3_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 13 * NEON_MAX_DEL_DEC_STATES ), vget_low_s32( a_Q12_arch3_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane0_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 14 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch3_s32x4 ) ); + LPC_pred_Q14_s32x4 = silk_SMLAWB_lane1_neon( LPC_pred_Q14_s32x4, vld1q_s32( buf32 + 15 * NEON_MAX_DEL_DEC_STATES ), vget_high_s32( a_Q12_arch3_s32x4 ) ); + + return LPC_pred_Q14_s32x4; +} + +static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_neon( + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_decs_struct psDelDec[], /* I/O Delayed decision states */ + opus_int signalType, /* I Signal type */ + const opus_int32 x_Q10[], /* I */ + opus_int8 pulses[], /* O */ + opus_int16 xq[], /* O */ + opus_int32 sLTP_Q15[], /* I/O LTP filter state */ + opus_int32 delayedGain_Q10[], /* I/O Gain delay buffer */ + const opus_int16 a_Q12[], /* I Short term prediction coefs */ + const opus_int16 b_Q14[], /* I Long term prediction coefs */ + const opus_int16 AR_shp_Q13[], /* I Noise shaping coefs */ + opus_int lag, /* I Pitch lag */ + opus_int32 HarmShapeFIRPacked_Q14, /* I */ + opus_int Tilt_Q14, /* I Spectral tilt */ + opus_int32 LF_shp_Q14, /* I */ + opus_int32 Gain_Q16, /* I */ + opus_int Lambda_Q10, /* I */ + opus_int offset_Q10, /* I */ + opus_int length, /* I Input length */ + opus_int subfr, /* I Subframe number */ + opus_int shapingLPCOrder, /* I Shaping LPC filter order */ + opus_int predictLPCOrder, /* I Prediction filter order */ + opus_int warping_Q16, /* I */ + opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ + opus_int decisionDelay /* I */ +) +{ + opus_int i, j, k, Winner_ind, RDmin_ind, RDmax_ind, last_smple_idx; + opus_int32 Winner_rand_state; + opus_int32 LTP_pred_Q14, n_LTP_Q14; + opus_int32 RDmin_Q10, RDmax_Q10; + opus_int32 Gain_Q10; + opus_int32 *pred_lag_ptr, *shp_lag_ptr; + opus_int32 a_Q12_arch[MAX_LPC_ORDER]; + const int32x2_t warping_Q16_s32x2 = vdup_n_s32( silk_LSHIFT32( warping_Q16, 16 ) >> 1 ); + const opus_int32 LF_shp_Q29 = silk_LSHIFT32( LF_shp_Q14, 16 ) >> 1; + opus_int32 AR_shp_Q28[ MAX_SHAPE_LPC_ORDER ]; + const uint32x4_t rand_multiplier_u32x4 = vdupq_n_u32( RAND_MULTIPLIER ); + const uint32x4_t rand_increment_u32x4 = vdupq_n_u32( RAND_INCREMENT ); + + VARDECL( NSQ_samples_struct, psSampleState ); + SAVE_STACK; + + silk_assert( nStatesDelayedDecision > 0 ); + silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ + ALLOC( psSampleState, 2, NSQ_samples_struct ); + + shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ]; + pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; + Gain_Q10 = silk_RSHIFT( Gain_Q16, 6 ); + + for( i = 0; i < ( MAX_SHAPE_LPC_ORDER - 7 ); i += 8 ) { + const int16x8_t t_s16x8 = vld1q_s16( AR_shp_Q13 + i ); + vst1q_s32( AR_shp_Q28 + i + 0, vshll_n_s16( vget_low_s16( t_s16x8 ), 15 ) ); + vst1q_s32( AR_shp_Q28 + i + 4, vshll_n_s16( vget_high_s16( t_s16x8 ), 15 ) ); + } + + for( ; i < MAX_SHAPE_LPC_ORDER; i++ ) { + AR_shp_Q28[i] = silk_LSHIFT32( AR_shp_Q13[i], 15 ); + } + + silk_short_prediction_create_arch_coef_neon_local( a_Q12_arch, a_Q12, predictLPCOrder ); + + for( i = 0; i < length; i++ ) { + int32x4_t Seed_s32x4, LPC_pred_Q14_s32x4; + int32x4_t sign_s32x4, tmp1_s32x4, tmp2_s32x4; + int32x4_t n_AR_Q14_s32x4, n_LF_Q14_s32x4; + int32x2_t AR_shp_Q28_s32x2; + int16x4_t r_Q10_s16x4, rr_Q10_s16x4; + + /* Perform common calculations used in all states */ + + /* Long-term prediction */ + if( signalType == TYPE_VOICED ) { + /* Unrolled loop */ + /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ + LTP_pred_Q14 = 2; + LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ 0 ], b_Q14[ 0 ] ); + LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] ); + LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] ); + LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] ); + LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); + LTP_pred_Q14 = silk_LSHIFT( LTP_pred_Q14, 1 ); /* Q13 -> Q14 */ + pred_lag_ptr++; + } else { + LTP_pred_Q14 = 0; + } + + /* Long-term shaping */ + if( lag > 0 ) { + /* Symmetric, packed FIR coefficients */ + n_LTP_Q14 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); + n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); + n_LTP_Q14 = silk_SUB_LSHIFT32( LTP_pred_Q14, n_LTP_Q14, 2 ); /* Q12 -> Q14 */ + shp_lag_ptr++; + } else { + n_LTP_Q14 = 0; + } + + /* Generate dither */ + Seed_s32x4 = vld1q_s32( psDelDec->Seed ); + Seed_s32x4 = vreinterpretq_s32_u32( vmlaq_u32( rand_increment_u32x4, vreinterpretq_u32_s32( Seed_s32x4 ), rand_multiplier_u32x4 ) ); + vst1q_s32( psDelDec->Seed, Seed_s32x4 ); + + /* Short-term prediction */ + LPC_pred_Q14_s32x4 = silk_noise_shape_quantizer_short_prediction_neon_local(psDelDec->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 16 + i ], a_Q12_arch, predictLPCOrder); + LPC_pred_Q14_s32x4 = vshlq_n_s32( LPC_pred_Q14_s32x4, 4 ); /* Q10 -> Q14 */ + + /* Noise shape feedback */ + /* Output of lowpass section */ + tmp2_s32x4 = silk_SMLAWB_lane0_neon( vld1q_s32( psDelDec->Diff_Q14 ), vld1q_s32( psDelDec->sAR2_Q14[ 0 ] ), warping_Q16_s32x2 ); + /* Output of allpass section */ + tmp1_s32x4 = vsubq_s32( vld1q_s32( psDelDec->sAR2_Q14[ 1 ] ), tmp2_s32x4 ); + tmp1_s32x4 = silk_SMLAWB_lane0_neon( vld1q_s32( psDelDec->sAR2_Q14[ 0 ] ), tmp1_s32x4, warping_Q16_s32x2 ); + vst1q_s32( psDelDec->sAR2_Q14[ 0 ], tmp2_s32x4 ); + AR_shp_Q28_s32x2 = vld1_s32( AR_shp_Q28 ); + n_AR_Q14_s32x4 = vaddq_s32( vdupq_n_s32( silk_RSHIFT( shapingLPCOrder, 1 ) ), vqdmulhq_lane_s32( tmp2_s32x4, AR_shp_Q28_s32x2, 0 ) ); + + /* Loop over allpass sections */ + for( j = 2; j < shapingLPCOrder; j += 2 ) { + /* Output of allpass section */ + tmp2_s32x4 = vsubq_s32( vld1q_s32( psDelDec->sAR2_Q14[ j + 0 ] ), tmp1_s32x4 ); + tmp2_s32x4 = silk_SMLAWB_lane0_neon( vld1q_s32( psDelDec->sAR2_Q14[ j - 1 ] ), tmp2_s32x4, warping_Q16_s32x2 ); + vst1q_s32( psDelDec->sAR2_Q14[ j - 1 ], tmp1_s32x4 ); + n_AR_Q14_s32x4 = vaddq_s32( n_AR_Q14_s32x4, vqdmulhq_lane_s32( tmp1_s32x4, AR_shp_Q28_s32x2, 1 ) ); + /* Output of allpass section */ + tmp1_s32x4 = vsubq_s32( vld1q_s32( psDelDec->sAR2_Q14[ j + 1 ] ), tmp2_s32x4 ); + tmp1_s32x4 = silk_SMLAWB_lane0_neon( vld1q_s32( psDelDec->sAR2_Q14[ j + 0 ] ), tmp1_s32x4, warping_Q16_s32x2 ); + vst1q_s32( psDelDec->sAR2_Q14[ j + 0 ], tmp2_s32x4 ); + AR_shp_Q28_s32x2 = vld1_s32( &AR_shp_Q28[ j ] ); + n_AR_Q14_s32x4 = vaddq_s32( n_AR_Q14_s32x4, vqdmulhq_lane_s32( tmp2_s32x4, AR_shp_Q28_s32x2, 0 ) ); + } + vst1q_s32( psDelDec->sAR2_Q14[ shapingLPCOrder - 1 ], tmp1_s32x4 ); + n_AR_Q14_s32x4 = vaddq_s32( n_AR_Q14_s32x4, vqdmulhq_lane_s32( tmp1_s32x4, AR_shp_Q28_s32x2, 1 ) ); + n_AR_Q14_s32x4 = vshlq_n_s32( n_AR_Q14_s32x4, 1 ); /* Q11 -> Q12 */ + n_AR_Q14_s32x4 = vaddq_s32( n_AR_Q14_s32x4, vqdmulhq_n_s32( vld1q_s32( psDelDec->LF_AR_Q14 ), silk_LSHIFT32( Tilt_Q14, 16 ) >> 1 ) ); /* Q12 */ + n_AR_Q14_s32x4 = vshlq_n_s32( n_AR_Q14_s32x4, 2 ); /* Q12 -> Q14 */ + n_LF_Q14_s32x4 = vqdmulhq_n_s32( vld1q_s32( psDelDec->Shape_Q14[ *smpl_buf_idx ] ), LF_shp_Q29 ); /* Q12 */ + n_LF_Q14_s32x4 = vaddq_s32( n_LF_Q14_s32x4, vqdmulhq_n_s32( vld1q_s32( psDelDec->LF_AR_Q14 ), silk_LSHIFT32( LF_shp_Q14 >> 16 , 15 ) ) ); /* Q12 */ + n_LF_Q14_s32x4 = vshlq_n_s32( n_LF_Q14_s32x4, 2 ); /* Q12 -> Q14 */ + + /* Input minus prediction plus noise feedback */ + /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP */ + tmp1_s32x4 = vaddq_s32( n_AR_Q14_s32x4, n_LF_Q14_s32x4 ); /* Q14 */ + tmp2_s32x4 = vaddq_s32( vdupq_n_s32( n_LTP_Q14 ), LPC_pred_Q14_s32x4 ); /* Q13 */ + tmp1_s32x4 = vsubq_s32( tmp2_s32x4, tmp1_s32x4 ); /* Q13 */ + tmp1_s32x4 = vrshrq_n_s32( tmp1_s32x4, 4 ); /* Q10 */ + tmp1_s32x4 = vsubq_s32( vdupq_n_s32( x_Q10[ i ] ), tmp1_s32x4 ); /* residual error Q10 */ + + /* Flip sign depending on dither */ + sign_s32x4 = vreinterpretq_s32_u32( vcltq_s32( Seed_s32x4, vdupq_n_s32( 0 ) ) ); + tmp1_s32x4 = veorq_s32( tmp1_s32x4, sign_s32x4 ); + tmp1_s32x4 = vsubq_s32( tmp1_s32x4, sign_s32x4 ); + tmp1_s32x4 = vmaxq_s32( tmp1_s32x4, vdupq_n_s32( -( 31 << 10 ) ) ); + tmp1_s32x4 = vminq_s32( tmp1_s32x4, vdupq_n_s32( 30 << 10 ) ); + r_Q10_s16x4 = vmovn_s32( tmp1_s32x4 ); + + /* Find two quantization level candidates and measure their rate-distortion */ + { + int16x4_t q1_Q10_s16x4 = vsub_s16( r_Q10_s16x4, vdup_n_s16( offset_Q10 ) ); + int16x4_t q1_Q0_s16x4 = vshr_n_s16( q1_Q10_s16x4, 10 ); + int16x4_t q2_Q10_s16x4; + int32x4_t rd1_Q10_s32x4, rd2_Q10_s32x4; + uint32x4_t t_u32x4; + + if( Lambda_Q10 > 2048 ) { + /* For aggressive RDO, the bias becomes more than one pulse. */ + const int rdo_offset = Lambda_Q10/2 - 512; + const uint16x4_t greaterThanRdo = vcgt_s16( q1_Q10_s16x4, vdup_n_s16( rdo_offset ) ); + const uint16x4_t lessThanMinusRdo = vclt_s16( q1_Q10_s16x4, vdup_n_s16( -rdo_offset ) ); + /* If Lambda_Q10 > 32767, then q1_Q0, q1_Q10 and q2_Q10 must change to 32-bit. */ + silk_assert( Lambda_Q10 <= 32767 ); + + q1_Q0_s16x4 = vreinterpret_s16_u16( vclt_s16( q1_Q10_s16x4, vdup_n_s16( 0 ) ) ); + q1_Q0_s16x4 = vbsl_s16( greaterThanRdo, vsub_s16( q1_Q10_s16x4, vdup_n_s16( rdo_offset ) ), q1_Q0_s16x4 ); + q1_Q0_s16x4 = vbsl_s16( lessThanMinusRdo, vadd_s16( q1_Q10_s16x4, vdup_n_s16( rdo_offset ) ), q1_Q0_s16x4 ); + q1_Q0_s16x4 = vshr_n_s16( q1_Q0_s16x4, 10 ); + } + { + const uint16x4_t equal0_u16x4 = vceq_s16( q1_Q0_s16x4, vdup_n_s16( 0 ) ); + const uint16x4_t equalMinus1_u16x4 = vceq_s16( q1_Q0_s16x4, vdup_n_s16( -1 ) ); + const uint16x4_t lessThanMinus1_u16x4 = vclt_s16( q1_Q0_s16x4, vdup_n_s16( -1 ) ); + int16x4_t tmp1_s16x4, tmp2_s16x4; + + q1_Q10_s16x4 = vshl_n_s16( q1_Q0_s16x4, 10 ); + tmp1_s16x4 = vadd_s16( q1_Q10_s16x4, vdup_n_s16( offset_Q10 - QUANT_LEVEL_ADJUST_Q10 ) ); + q1_Q10_s16x4 = vadd_s16( q1_Q10_s16x4, vdup_n_s16( offset_Q10 + QUANT_LEVEL_ADJUST_Q10 ) ); + q1_Q10_s16x4 = vbsl_s16( lessThanMinus1_u16x4, q1_Q10_s16x4, tmp1_s16x4 ); + q1_Q10_s16x4 = vbsl_s16( equal0_u16x4, vdup_n_s16( offset_Q10 ), q1_Q10_s16x4 ); + q1_Q10_s16x4 = vbsl_s16( equalMinus1_u16x4, vdup_n_s16( offset_Q10 - ( 1024 - QUANT_LEVEL_ADJUST_Q10 ) ), q1_Q10_s16x4 ); + q2_Q10_s16x4 = vadd_s16( q1_Q10_s16x4, vdup_n_s16( 1024 ) ); + q2_Q10_s16x4 = vbsl_s16( equal0_u16x4, vdup_n_s16( offset_Q10 + 1024 - QUANT_LEVEL_ADJUST_Q10 ), q2_Q10_s16x4 ); + q2_Q10_s16x4 = vbsl_s16( equalMinus1_u16x4, vdup_n_s16( offset_Q10 ), q2_Q10_s16x4 ); + tmp1_s16x4 = q1_Q10_s16x4; + tmp2_s16x4 = q2_Q10_s16x4; + tmp1_s16x4 = vbsl_s16( vorr_u16( equalMinus1_u16x4, lessThanMinus1_u16x4 ), vneg_s16( tmp1_s16x4 ), tmp1_s16x4 ); + tmp2_s16x4 = vbsl_s16( lessThanMinus1_u16x4, vneg_s16( tmp2_s16x4 ), tmp2_s16x4 ); + rd1_Q10_s32x4 = vmull_s16( tmp1_s16x4, vdup_n_s16( Lambda_Q10 ) ); + rd2_Q10_s32x4 = vmull_s16( tmp2_s16x4, vdup_n_s16( Lambda_Q10 ) ); + } + + rr_Q10_s16x4 = vsub_s16( r_Q10_s16x4, q1_Q10_s16x4 ); + rd1_Q10_s32x4 = vmlal_s16( rd1_Q10_s32x4, rr_Q10_s16x4, rr_Q10_s16x4 ); + rd1_Q10_s32x4 = vshrq_n_s32( rd1_Q10_s32x4, 10 ); + + rr_Q10_s16x4 = vsub_s16( r_Q10_s16x4, q2_Q10_s16x4 ); + rd2_Q10_s32x4 = vmlal_s16( rd2_Q10_s32x4, rr_Q10_s16x4, rr_Q10_s16x4 ); + rd2_Q10_s32x4 = vshrq_n_s32( rd2_Q10_s32x4, 10 ); + + tmp2_s32x4 = vld1q_s32( psDelDec->RD_Q10 ); + tmp1_s32x4 = vaddq_s32( tmp2_s32x4, vminq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ) ); + tmp2_s32x4 = vaddq_s32( tmp2_s32x4, vmaxq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ) ); + vst1q_s32( psSampleState[ 0 ].RD_Q10, tmp1_s32x4 ); + vst1q_s32( psSampleState[ 1 ].RD_Q10, tmp2_s32x4 ); + t_u32x4 = vcltq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ); + tmp1_s32x4 = vbslq_s32( t_u32x4, vmovl_s16( q1_Q10_s16x4 ), vmovl_s16( q2_Q10_s16x4 ) ); + tmp2_s32x4 = vbslq_s32( t_u32x4, vmovl_s16( q2_Q10_s16x4 ), vmovl_s16( q1_Q10_s16x4 ) ); + vst1q_s32( psSampleState[ 0 ].Q_Q10, tmp1_s32x4 ); + vst1q_s32( psSampleState[ 1 ].Q_Q10, tmp2_s32x4 ); + } + + { + /* Update states for best quantization */ + int32x4_t exc_Q14_s32x4, LPC_exc_Q14_s32x4, xq_Q14_s32x4, sLF_AR_shp_Q14_s32x4; + + /* Quantized excitation */ + exc_Q14_s32x4 = vshlq_n_s32( tmp1_s32x4, 4 ); + exc_Q14_s32x4 = veorq_s32( exc_Q14_s32x4, sign_s32x4 ); + exc_Q14_s32x4 = vsubq_s32( exc_Q14_s32x4, sign_s32x4 ); + + /* Add predictions */ + LPC_exc_Q14_s32x4 = vaddq_s32( exc_Q14_s32x4, vdupq_n_s32( LTP_pred_Q14 ) ); + xq_Q14_s32x4 = vaddq_s32( LPC_exc_Q14_s32x4, LPC_pred_Q14_s32x4 ); + + /* Update states */ + tmp1_s32x4 = vsubq_s32( xq_Q14_s32x4, vshlq_n_s32( vdupq_n_s32( x_Q10[ i ] ), 4 ) ); + vst1q_s32( psSampleState[ 0 ].Diff_Q14, tmp1_s32x4 ); + sLF_AR_shp_Q14_s32x4 = vsubq_s32( tmp1_s32x4, n_AR_Q14_s32x4 ); + vst1q_s32( psSampleState[ 0 ].sLTP_shp_Q14, vsubq_s32( sLF_AR_shp_Q14_s32x4, n_LF_Q14_s32x4 ) ); + vst1q_s32( psSampleState[ 0 ].LF_AR_Q14, sLF_AR_shp_Q14_s32x4 ); + vst1q_s32( psSampleState[ 0 ].LPC_exc_Q14, LPC_exc_Q14_s32x4 ); + vst1q_s32( psSampleState[ 0 ].xq_Q14, xq_Q14_s32x4 ); + + /* Quantized excitation */ + exc_Q14_s32x4 = vshlq_n_s32( tmp2_s32x4, 4 ); + exc_Q14_s32x4 = veorq_s32( exc_Q14_s32x4, sign_s32x4 ); + exc_Q14_s32x4 = vsubq_s32( exc_Q14_s32x4, sign_s32x4 ); + + /* Add predictions */ + LPC_exc_Q14_s32x4 = vaddq_s32( exc_Q14_s32x4, vdupq_n_s32( LTP_pred_Q14 ) ); + xq_Q14_s32x4 = vaddq_s32( LPC_exc_Q14_s32x4, LPC_pred_Q14_s32x4 ); + + /* Update states */ + tmp1_s32x4 = vsubq_s32( xq_Q14_s32x4, vshlq_n_s32( vdupq_n_s32( x_Q10[ i ] ), 4 ) ); + vst1q_s32( psSampleState[ 1 ].Diff_Q14, tmp1_s32x4 ); + sLF_AR_shp_Q14_s32x4 = vsubq_s32( tmp1_s32x4, n_AR_Q14_s32x4 ); + vst1q_s32( psSampleState[ 1 ].sLTP_shp_Q14, vsubq_s32( sLF_AR_shp_Q14_s32x4, n_LF_Q14_s32x4 ) ); + vst1q_s32( psSampleState[ 1 ].LF_AR_Q14, sLF_AR_shp_Q14_s32x4 ); + vst1q_s32( psSampleState[ 1 ].LPC_exc_Q14, LPC_exc_Q14_s32x4 ); + vst1q_s32( psSampleState[ 1 ].xq_Q14, xq_Q14_s32x4 ); + } + + *smpl_buf_idx = *smpl_buf_idx ? ( *smpl_buf_idx - 1 ) : ( DECISION_DELAY - 1); + last_smple_idx = *smpl_buf_idx + decisionDelay + DECISION_DELAY; + if( last_smple_idx >= DECISION_DELAY ) last_smple_idx -= DECISION_DELAY; + if( last_smple_idx >= DECISION_DELAY ) last_smple_idx -= DECISION_DELAY; + + /* Find winner */ + RDmin_Q10 = psSampleState[ 0 ].RD_Q10[ 0 ]; + Winner_ind = 0; + for( k = 1; k < nStatesDelayedDecision; k++ ) { + if( psSampleState[ 0 ].RD_Q10[ k ] < RDmin_Q10 ) { + RDmin_Q10 = psSampleState[ 0 ].RD_Q10[ k ]; + Winner_ind = k; + } + } + + /* Increase RD values of expired states */ + { + uint32x4_t t_u32x4; + Winner_rand_state = psDelDec->RandState[ last_smple_idx ][ Winner_ind ]; + t_u32x4 = vceqq_s32( vld1q_s32( psDelDec->RandState[ last_smple_idx ] ), vdupq_n_s32( Winner_rand_state ) ); + t_u32x4 = vmvnq_u32( t_u32x4 ); + t_u32x4 = vshrq_n_u32( t_u32x4, 5 ); + tmp1_s32x4 = vld1q_s32( psSampleState[ 0 ].RD_Q10 ); + tmp2_s32x4 = vld1q_s32( psSampleState[ 1 ].RD_Q10 ); + tmp1_s32x4 = vaddq_s32( tmp1_s32x4, vreinterpretq_s32_u32( t_u32x4 ) ); + tmp2_s32x4 = vaddq_s32( tmp2_s32x4, vreinterpretq_s32_u32( t_u32x4 ) ); + vst1q_s32( psSampleState[ 0 ].RD_Q10, tmp1_s32x4 ); + vst1q_s32( psSampleState[ 1 ].RD_Q10, tmp2_s32x4 ); + + /* Find worst in first set and best in second set */ + RDmax_Q10 = psSampleState[ 0 ].RD_Q10[ 0 ]; + RDmin_Q10 = psSampleState[ 1 ].RD_Q10[ 0 ]; + RDmax_ind = 0; + RDmin_ind = 0; + for( k = 1; k < nStatesDelayedDecision; k++ ) { + /* find worst in first set */ + if( psSampleState[ 0 ].RD_Q10[ k ] > RDmax_Q10 ) { + RDmax_Q10 = psSampleState[ 0 ].RD_Q10[ k ]; + RDmax_ind = k; + } + /* find best in second set */ + if( psSampleState[ 1 ].RD_Q10[ k ] < RDmin_Q10 ) { + RDmin_Q10 = psSampleState[ 1 ].RD_Q10[ k ]; + RDmin_ind = k; + } + } + } + + /* Replace a state if best from second set outperforms worst in first set */ + if( RDmin_Q10 < RDmax_Q10 ) { + opus_int32 (*ptr)[NEON_MAX_DEL_DEC_STATES] = psDelDec->RandState; + const int numOthers = (int)( ( sizeof( NSQ_del_decs_struct ) - sizeof( ( (NSQ_del_decs_struct *)0 )->sLPC_Q14 ) ) + / ( NEON_MAX_DEL_DEC_STATES * sizeof( opus_int32 ) ) ); + /* Only ( predictLPCOrder - 1 ) of sLPC_Q14 buffer need to be updated, though the first several */ + /* useless sLPC_Q14[] will be different comparing with C when predictLPCOrder < NSQ_LPC_BUF_LENGTH. */ + /* Here just update constant ( NSQ_LPC_BUF_LENGTH - 1 ) for simplicity. */ + for( j = i + 1; j < i + NSQ_LPC_BUF_LENGTH; j++ ) { + psDelDec->sLPC_Q14[ j ][ RDmax_ind ] = psDelDec->sLPC_Q14[ j ][ RDmin_ind ]; + } + for( j = 0; j < numOthers; j++ ) { + ptr[ j ][ RDmax_ind ] = ptr[ j ][ RDmin_ind ]; + } + + psSampleState[ 0 ].Q_Q10[ RDmax_ind ] = psSampleState[ 1 ].Q_Q10[ RDmin_ind ]; + psSampleState[ 0 ].RD_Q10[ RDmax_ind ] = psSampleState[ 1 ].RD_Q10[ RDmin_ind ]; + psSampleState[ 0 ].xq_Q14[ RDmax_ind ] = psSampleState[ 1 ].xq_Q14[ RDmin_ind ]; + psSampleState[ 0 ].LF_AR_Q14[ RDmax_ind ] = psSampleState[ 1 ].LF_AR_Q14[ RDmin_ind ]; + psSampleState[ 0 ].Diff_Q14[ RDmax_ind ] = psSampleState[ 1 ].Diff_Q14[ RDmin_ind ]; + psSampleState[ 0 ].sLTP_shp_Q14[ RDmax_ind ] = psSampleState[ 1 ].sLTP_shp_Q14[ RDmin_ind ]; + psSampleState[ 0 ].LPC_exc_Q14[ RDmax_ind ] = psSampleState[ 1 ].LPC_exc_Q14[ RDmin_ind ]; + } + + /* Write samples from winner to output and long-term filter states */ + if( subfr > 0 || i >= decisionDelay ) { + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDelDec->Q_Q10[ last_smple_idx ][ Winner_ind ], 10 ); + xq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( + silk_SMULWW( psDelDec->Xq_Q14[ last_smple_idx ][ Winner_ind ], delayedGain_Q10[ last_smple_idx ] ), 8 ) ); + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay ] = psDelDec->Shape_Q14[ last_smple_idx ][ Winner_ind ]; + sLTP_Q15[ NSQ->sLTP_buf_idx - decisionDelay ] = psDelDec->Pred_Q15[ last_smple_idx ][ Winner_ind ]; + } + NSQ->sLTP_shp_buf_idx++; + NSQ->sLTP_buf_idx++; + + /* Update states */ + vst1q_s32( psDelDec->LF_AR_Q14, vld1q_s32( psSampleState[ 0 ].LF_AR_Q14 ) ); + vst1q_s32( psDelDec->Diff_Q14, vld1q_s32( psSampleState[ 0 ].Diff_Q14 ) ); + vst1q_s32( psDelDec->sLPC_Q14[ NSQ_LPC_BUF_LENGTH + i ], vld1q_s32( psSampleState[ 0 ].xq_Q14 ) ); + vst1q_s32( psDelDec->Xq_Q14[ *smpl_buf_idx ], vld1q_s32( psSampleState[ 0 ].xq_Q14 ) ); + tmp1_s32x4 = vld1q_s32( psSampleState[ 0 ].Q_Q10 ); + vst1q_s32( psDelDec->Q_Q10[ *smpl_buf_idx ], tmp1_s32x4 ); + vst1q_s32( psDelDec->Pred_Q15[ *smpl_buf_idx ], vshlq_n_s32( vld1q_s32( psSampleState[ 0 ].LPC_exc_Q14 ), 1 ) ); + vst1q_s32( psDelDec->Shape_Q14[ *smpl_buf_idx ], vld1q_s32( psSampleState[ 0 ].sLTP_shp_Q14 ) ); + tmp1_s32x4 = vrshrq_n_s32( tmp1_s32x4, 10 ); + tmp1_s32x4 = vaddq_s32( vld1q_s32( psDelDec->Seed ), tmp1_s32x4 ); + vst1q_s32( psDelDec->Seed, tmp1_s32x4 ); + vst1q_s32( psDelDec->RandState[ *smpl_buf_idx ], tmp1_s32x4 ); + vst1q_s32( psDelDec->RD_Q10, vld1q_s32( psSampleState[ 0 ].RD_Q10 ) ); + delayedGain_Q10[ *smpl_buf_idx ] = Gain_Q10; + } + /* Update LPC states */ + silk_memcpy( psDelDec->sLPC_Q14[ 0 ], psDelDec->sLPC_Q14[ length ], NEON_MAX_DEL_DEC_STATES * NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); + + RESTORE_STACK; +} + +static OPUS_INLINE void silk_SMULWB_8_neon( + const opus_int16 *a, + const int32x2_t b, + opus_int32 *o +) +{ + const int16x8_t a_s16x8 = vld1q_s16( a ); + int32x4_t o0_s32x4, o1_s32x4; + + o0_s32x4 = vshll_n_s16( vget_low_s16( a_s16x8 ), 15 ); + o1_s32x4 = vshll_n_s16( vget_high_s16( a_s16x8 ), 15 ); + o0_s32x4 = vqdmulhq_lane_s32( o0_s32x4, b, 0 ); + o1_s32x4 = vqdmulhq_lane_s32( o1_s32x4, b, 0 ); + vst1q_s32( o, o0_s32x4 ); + vst1q_s32( o + 4, o1_s32x4 ); +} + +/* Only works when ( b >= -65536 ) && ( b < 65536 ). */ +static OPUS_INLINE void silk_SMULWW_small_b_4_neon( + opus_int32 *a, + const int32x2_t b_s32x2) +{ + int32x4_t o_s32x4; + + o_s32x4 = vld1q_s32( a ); + o_s32x4 = vqdmulhq_lane_s32( o_s32x4, b_s32x2, 0 ); + vst1q_s32( a, o_s32x4 ); +} + +/* Only works when ( b >= -65536 ) && ( b < 65536 ). */ +static OPUS_INLINE void silk_SMULWW_small_b_8_neon( + opus_int32 *a, + const int32x2_t b_s32x2 +) +{ + int32x4_t o0_s32x4, o1_s32x4; + + o0_s32x4 = vld1q_s32( a ); + o1_s32x4 = vld1q_s32( a + 4 ); + o0_s32x4 = vqdmulhq_lane_s32( o0_s32x4, b_s32x2, 0 ); + o1_s32x4 = vqdmulhq_lane_s32( o1_s32x4, b_s32x2, 0 ); + vst1q_s32( a, o0_s32x4 ); + vst1q_s32( a + 4, o1_s32x4 ); +} + +static OPUS_INLINE void silk_SMULWW_4_neon( + opus_int32 *a, + const int32x2_t b_s32x2) +{ + int32x4_t a_s32x4, o_s32x4; + + a_s32x4 = vld1q_s32( a ); + o_s32x4 = vqdmulhq_lane_s32( a_s32x4, b_s32x2, 0 ); + o_s32x4 = vmlaq_lane_s32( o_s32x4, a_s32x4, b_s32x2, 1 ); + vst1q_s32( a, o_s32x4 ); +} + +static OPUS_INLINE void silk_SMULWW_8_neon( + opus_int32 *a, + const int32x2_t b_s32x2 +) +{ + int32x4_t a0_s32x4, a1_s32x4, o0_s32x4, o1_s32x4; + + a0_s32x4 = vld1q_s32( a ); + a1_s32x4 = vld1q_s32( a + 4 ); + o0_s32x4 = vqdmulhq_lane_s32( a0_s32x4, b_s32x2, 0 ); + o1_s32x4 = vqdmulhq_lane_s32( a1_s32x4, b_s32x2, 0 ); + o0_s32x4 = vmlaq_lane_s32( o0_s32x4, a0_s32x4, b_s32x2, 1 ); + o1_s32x4 = vmlaq_lane_s32( o1_s32x4, a1_s32x4, b_s32x2, 1 ); + vst1q_s32( a, o0_s32x4 ); + vst1q_s32( a + 4, o1_s32x4 ); +} + +static OPUS_INLINE void silk_SMULWW_loop_neon( + const opus_int16 *a, + const opus_int32 b, + opus_int32 *o, + const opus_int loop_num +) +{ + opus_int i; + int32x2_t b_s32x2; + + b_s32x2 = vdup_n_s32( b ); + for( i = 0; i < loop_num - 7; i += 8 ) { + silk_SMULWB_8_neon( a + i, b_s32x2, o + i ); + } + for( ; i < loop_num; i++ ) { + o[ i ] = silk_SMULWW( a[ i ], b ); + } +} + +static OPUS_INLINE void silk_nsq_del_dec_scale_states_neon( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_decs_struct psDelDec[], /* I/O Delayed decision states */ + const opus_int16 x16[], /* I Input */ + opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ + const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ + opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ + opus_int subfr, /* I Subframe number */ + const opus_int LTP_scale_Q14, /* I LTP state scaling */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ + const opus_int signal_type, /* I Signal type */ + const opus_int decisionDelay /* I Decision delay */ +) +{ + opus_int i, lag; + opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q26; + + lag = pitchL[ subfr ]; + inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); + silk_assert( inv_gain_Q31 != 0 ); + + /* Scale input */ + inv_gain_Q26 = silk_RSHIFT_ROUND( inv_gain_Q31, 5 ); + silk_SMULWW_loop_neon( x16, inv_gain_Q26, x_sc_Q10, psEncC->subfr_length ); + + /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ + if( NSQ->rewhite_flag ) { + if( subfr == 0 ) { + /* Do LTP downscaling */ + inv_gain_Q31 = silk_LSHIFT( silk_SMULWB( inv_gain_Q31, LTP_scale_Q14 ), 2 ); + } + silk_SMULWW_loop_neon( sLTP + NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2, inv_gain_Q31, sLTP_Q15 + NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2, lag + LTP_ORDER / 2 ); + } + + /* Adjust for changing gain */ + if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { + int32x2_t gain_adj_Q16_s32x2; + gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); + + /* Scale long-term shaping state */ + if( ( gain_adj_Q16 >= -65536 ) && ( gain_adj_Q16 < 65536 ) ) { + gain_adj_Q16_s32x2 = vdup_n_s32( silk_LSHIFT32( gain_adj_Q16, 15 ) ); + for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 7; i += 8 ) { + silk_SMULWW_small_b_8_neon( NSQ->sLTP_shp_Q14 + i, gain_adj_Q16_s32x2 ); + } + for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { + NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); + } + + /* Scale long-term prediction state */ + if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { + for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx - decisionDelay - 7; i += 8 ) { + silk_SMULWW_small_b_8_neon( sLTP_Q15 + i, gain_adj_Q16_s32x2 ); + } + for( ; i < NSQ->sLTP_buf_idx - decisionDelay; i++ ) { + sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); + } + } + + /* Scale scalar states */ + silk_SMULWW_small_b_4_neon( psDelDec->LF_AR_Q14, gain_adj_Q16_s32x2 ); + silk_SMULWW_small_b_4_neon( psDelDec->Diff_Q14, gain_adj_Q16_s32x2 ); + + /* Scale short-term prediction and shaping states */ + for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { + silk_SMULWW_small_b_4_neon( psDelDec->sLPC_Q14[ i ], gain_adj_Q16_s32x2 ); + } + + for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { + silk_SMULWW_small_b_4_neon( psDelDec->sAR2_Q14[ i ], gain_adj_Q16_s32x2 ); + } + + for( i = 0; i < DECISION_DELAY; i++ ) { + silk_SMULWW_small_b_4_neon( psDelDec->Pred_Q15[ i ], gain_adj_Q16_s32x2 ); + silk_SMULWW_small_b_4_neon( psDelDec->Shape_Q14[ i ], gain_adj_Q16_s32x2 ); + } + } else { + gain_adj_Q16_s32x2 = vdup_n_s32( silk_LSHIFT32( gain_adj_Q16 & 0x0000FFFF, 15 ) ); + gain_adj_Q16_s32x2 = vset_lane_s32( gain_adj_Q16 >> 16, gain_adj_Q16_s32x2, 1 ); + for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 7; i += 8 ) { + silk_SMULWW_8_neon( NSQ->sLTP_shp_Q14 + i, gain_adj_Q16_s32x2 ); + } + for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { + NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); + } + + /* Scale long-term prediction state */ + if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { + for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx - decisionDelay - 7; i += 8 ) { + silk_SMULWW_8_neon( sLTP_Q15 + i, gain_adj_Q16_s32x2 ); + } + for( ; i < NSQ->sLTP_buf_idx - decisionDelay; i++ ) { + sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); + } + } + + /* Scale scalar states */ + silk_SMULWW_4_neon( psDelDec->LF_AR_Q14, gain_adj_Q16_s32x2 ); + silk_SMULWW_4_neon( psDelDec->Diff_Q14, gain_adj_Q16_s32x2 ); + + /* Scale short-term prediction and shaping states */ + for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { + silk_SMULWW_4_neon( psDelDec->sLPC_Q14[ i ], gain_adj_Q16_s32x2 ); + } + + for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { + silk_SMULWW_4_neon( psDelDec->sAR2_Q14[ i ], gain_adj_Q16_s32x2 ); + } + + for( i = 0; i < DECISION_DELAY; i++ ) { + silk_SMULWW_4_neon( psDelDec->Pred_Q15[ i ], gain_adj_Q16_s32x2 ); + silk_SMULWW_4_neon( psDelDec->Shape_Q14[ i ], gain_adj_Q16_s32x2 ); + } + } + + /* Save inverse gain */ + NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; + } +} diff --git a/libs/libopus/silk/arm/NSQ_neon.h b/libs/libopus/silk/arm/NSQ_neon.h index 77c946af8..b31d9442d 100644 --- a/libs/libopus/silk/arm/NSQ_neon.h +++ b/libs/libopus/silk/arm/NSQ_neon.h @@ -28,30 +28,31 @@ POSSIBILITY OF SUCH DAMAGE. #define SILK_NSQ_NEON_H #include "cpu_support.h" +#include "SigProc_FIX.h" #undef silk_short_prediction_create_arch_coef /* For vectorized calc, reverse a_Q12 coefs, convert to 32-bit, and shift for vqdmulhq_s32. */ static OPUS_INLINE void silk_short_prediction_create_arch_coef_neon(opus_int32 *out, const opus_int16 *in, opus_int order) { - out[15] = in[0] << 15; - out[14] = in[1] << 15; - out[13] = in[2] << 15; - out[12] = in[3] << 15; - out[11] = in[4] << 15; - out[10] = in[5] << 15; - out[9] = in[6] << 15; - out[8] = in[7] << 15; - out[7] = in[8] << 15; - out[6] = in[9] << 15; + out[15] = silk_LSHIFT32(in[0], 15); + out[14] = silk_LSHIFT32(in[1], 15); + out[13] = silk_LSHIFT32(in[2], 15); + out[12] = silk_LSHIFT32(in[3], 15); + out[11] = silk_LSHIFT32(in[4], 15); + out[10] = silk_LSHIFT32(in[5], 15); + out[9] = silk_LSHIFT32(in[6], 15); + out[8] = silk_LSHIFT32(in[7], 15); + out[7] = silk_LSHIFT32(in[8], 15); + out[6] = silk_LSHIFT32(in[9], 15); if (order == 16) { - out[5] = in[10] << 15; - out[4] = in[11] << 15; - out[3] = in[12] << 15; - out[2] = in[13] << 15; - out[1] = in[14] << 15; - out[0] = in[15] << 15; + out[5] = silk_LSHIFT32(in[10], 15); + out[4] = silk_LSHIFT32(in[11], 15); + out[3] = silk_LSHIFT32(in[12], 15); + out[2] = silk_LSHIFT32(in[13], 15); + out[1] = silk_LSHIFT32(in[14], 15); + out[0] = silk_LSHIFT32(in[15], 15); } else { diff --git a/libs/libopus/silk/arm/arm_silk_map.c b/libs/libopus/silk/arm/arm_silk_map.c index 9bd86a7b2..0b9bfec2c 100644 --- a/libs/libopus/silk/arm/arm_silk_map.c +++ b/libs/libopus/silk/arm/arm_silk_map.c @@ -28,13 +28,62 @@ POSSIBILITY OF SUCH DAMAGE. # include "config.h" #endif +#include "main_FIX.h" #include "NSQ.h" +#include "SigProc_FIX.h" #if defined(OPUS_HAVE_RTCD) # if (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && \ !defined(OPUS_ARM_PRESUME_NEON_INTR)) +void (*const SILK_BIQUAD_ALT_STRIDE2_IMPL[OPUS_ARCHMASK + 1])( + const opus_int16 *in, /* I input signal */ + const opus_int32 *B_Q28, /* I MA coefficients [3] */ + const opus_int32 *A_Q28, /* I AR coefficients [2] */ + opus_int32 *S, /* I/O State vector [4] */ + opus_int16 *out, /* O output signal */ + const opus_int32 len /* I signal length (must be even) */ +) = { + silk_biquad_alt_stride2_c, /* ARMv4 */ + silk_biquad_alt_stride2_c, /* EDSP */ + silk_biquad_alt_stride2_c, /* Media */ + silk_biquad_alt_stride2_neon, /* Neon */ +}; + +opus_int32 (*const SILK_LPC_INVERSE_PRED_GAIN_IMPL[OPUS_ARCHMASK + 1])( /* O Returns inverse prediction gain in energy domain, Q30 */ + const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */ + const opus_int order /* I Prediction order */ +) = { + silk_LPC_inverse_pred_gain_c, /* ARMv4 */ + silk_LPC_inverse_pred_gain_c, /* EDSP */ + silk_LPC_inverse_pred_gain_c, /* Media */ + silk_LPC_inverse_pred_gain_neon, /* Neon */ +}; + +void (*const SILK_NSQ_DEL_DEC_IMPL[OPUS_ARCHMASK + 1])( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ +) = { + silk_NSQ_del_dec_c, /* ARMv4 */ + silk_NSQ_del_dec_c, /* EDSP */ + silk_NSQ_del_dec_c, /* Media */ + silk_NSQ_del_dec_neon, /* Neon */ +}; + /*There is no table for silk_noise_shape_quantizer_short_prediction because the NEON version takes different parameters than the C version. Instead RTCD is done via if statements at the call sites. @@ -52,4 +101,23 @@ opus_int32 # endif +# if defined(FIXED_POINT) && \ + defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR) + +void (*const SILK_WARPED_AUTOCORRELATION_FIX_IMPL[OPUS_ARCHMASK + 1])( + opus_int32 *corr, /* O Result [order + 1] */ + opus_int *scale, /* O Scaling of the correlation vector */ + const opus_int16 *input, /* I Input data to correlate */ + const opus_int warping_Q16, /* I Warping coefficient */ + const opus_int length, /* I Length of input */ + const opus_int order /* I Correlation order (even) */ +) = { + silk_warped_autocorrelation_FIX_c, /* ARMv4 */ + silk_warped_autocorrelation_FIX_c, /* EDSP */ + silk_warped_autocorrelation_FIX_c, /* Media */ + silk_warped_autocorrelation_FIX_neon, /* Neon */ +}; + +# endif + #endif /* OPUS_HAVE_RTCD */ diff --git a/libs/libopus/silk/arm/biquad_alt_arm.h b/libs/libopus/silk/arm/biquad_alt_arm.h new file mode 100644 index 000000000..66ea9f43d --- /dev/null +++ b/libs/libopus/silk/arm/biquad_alt_arm.h @@ -0,0 +1,68 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifndef SILK_BIQUAD_ALT_ARM_H +# define SILK_BIQUAD_ALT_ARM_H + +# include "celt/arm/armcpu.h" + +# if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) +void silk_biquad_alt_stride2_neon( + const opus_int16 *in, /* I input signal */ + const opus_int32 *B_Q28, /* I MA coefficients [3] */ + const opus_int32 *A_Q28, /* I AR coefficients [2] */ + opus_int32 *S, /* I/O State vector [4] */ + opus_int16 *out, /* O output signal */ + const opus_int32 len /* I signal length (must be even) */ +); + +# if !defined(OPUS_HAVE_RTCD) && defined(OPUS_ARM_PRESUME_NEON) +# define OVERRIDE_silk_biquad_alt_stride2 (1) +# define silk_biquad_alt_stride2(in, B_Q28, A_Q28, S, out, len, arch) ((void)(arch), PRESUME_NEON(silk_biquad_alt_stride2)(in, B_Q28, A_Q28, S, out, len)) +# endif +# endif + +# if !defined(OVERRIDE_silk_biquad_alt_stride2) +/*Is run-time CPU detection enabled on this platform?*/ +# if defined(OPUS_HAVE_RTCD) && (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR)) +extern void (*const SILK_BIQUAD_ALT_STRIDE2_IMPL[OPUS_ARCHMASK+1])( + const opus_int16 *in, /* I input signal */ + const opus_int32 *B_Q28, /* I MA coefficients [3] */ + const opus_int32 *A_Q28, /* I AR coefficients [2] */ + opus_int32 *S, /* I/O State vector [4] */ + opus_int16 *out, /* O output signal */ + const opus_int32 len /* I signal length (must be even) */ + ); +# define OVERRIDE_silk_biquad_alt_stride2 (1) +# define silk_biquad_alt_stride2(in, B_Q28, A_Q28, S, out, len, arch) ((*SILK_BIQUAD_ALT_STRIDE2_IMPL[(arch)&OPUS_ARCHMASK])(in, B_Q28, A_Q28, S, out, len)) +# elif defined(OPUS_ARM_PRESUME_NEON_INTR) +# define OVERRIDE_silk_biquad_alt_stride2 (1) +# define silk_biquad_alt_stride2(in, B_Q28, A_Q28, S, out, len, arch) ((void)(arch), silk_biquad_alt_stride2_neon(in, B_Q28, A_Q28, S, out, len)) +# endif +# endif + +#endif /* end SILK_BIQUAD_ALT_ARM_H */ diff --git a/libs/libopus/silk/arm/biquad_alt_neon_intr.c b/libs/libopus/silk/arm/biquad_alt_neon_intr.c new file mode 100644 index 000000000..971573318 --- /dev/null +++ b/libs/libopus/silk/arm/biquad_alt_neon_intr.c @@ -0,0 +1,156 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#ifdef OPUS_CHECK_ASM +# include +# include "stack_alloc.h" +#endif +#include "SigProc_FIX.h" + +static inline void silk_biquad_alt_stride2_kernel( const int32x4_t A_L_s32x4, const int32x4_t A_U_s32x4, const int32x4_t B_Q28_s32x4, const int32x2_t t_s32x2, const int32x4_t in_s32x4, int32x4_t *S_s32x4, int32x2_t *out32_Q14_s32x2 ) +{ + int32x4_t t_s32x4, out32_Q14_s32x4; + + *out32_Q14_s32x2 = vadd_s32( vget_low_s32( *S_s32x4 ), t_s32x2 ); /* silk_SMLAWB( S{0,1}, B_Q28[ 0 ], in{0,1} ) */ + *S_s32x4 = vcombine_s32( vget_high_s32( *S_s32x4 ), vdup_n_s32( 0 ) ); /* S{0,1} = S{2,3}; S{2,3} = 0; */ + *out32_Q14_s32x2 = vshl_n_s32( *out32_Q14_s32x2, 2 ); /* out32_Q14_{0,1} = silk_LSHIFT( silk_SMLAWB( S{0,1}, B_Q28[ 0 ], in{0,1} ), 2 ); */ + out32_Q14_s32x4 = vcombine_s32( *out32_Q14_s32x2, *out32_Q14_s32x2 ); /* out32_Q14_{0,1,0,1} */ + t_s32x4 = vqdmulhq_s32( out32_Q14_s32x4, A_L_s32x4 ); /* silk_SMULWB( out32_Q14_{0,1,0,1}, A{0,0,1,1}_L_Q28 ) */ + *S_s32x4 = vrsraq_n_s32( *S_s32x4, t_s32x4, 14 ); /* S{0,1} = S{2,3} + silk_RSHIFT_ROUND(); S{2,3} = silk_RSHIFT_ROUND(); */ + t_s32x4 = vqdmulhq_s32( out32_Q14_s32x4, A_U_s32x4 ); /* silk_SMULWB( out32_Q14_{0,1,0,1}, A{0,0,1,1}_U_Q28 ) */ + *S_s32x4 = vaddq_s32( *S_s32x4, t_s32x4 ); /* S0 = silk_SMLAWB( S{0,1,2,3}, out32_Q14_{0,1,0,1}, A{0,0,1,1}_U_Q28 ); */ + t_s32x4 = vqdmulhq_s32( in_s32x4, B_Q28_s32x4 ); /* silk_SMULWB( B_Q28[ {1,1,2,2} ], in{0,1,0,1} ) */ + *S_s32x4 = vaddq_s32( *S_s32x4, t_s32x4 ); /* S0 = silk_SMLAWB( S0, B_Q28[ {1,1,2,2} ], in{0,1,0,1} ); */ +} + +void silk_biquad_alt_stride2_neon( + const opus_int16 *in, /* I input signal */ + const opus_int32 *B_Q28, /* I MA coefficients [3] */ + const opus_int32 *A_Q28, /* I AR coefficients [2] */ + opus_int32 *S, /* I/O State vector [4] */ + opus_int16 *out, /* O output signal */ + const opus_int32 len /* I signal length (must be even) */ +) +{ + /* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */ + opus_int k = 0; + const int32x2_t offset_s32x2 = vdup_n_s32( (1<<14) - 1 ); + const int32x4_t offset_s32x4 = vcombine_s32( offset_s32x2, offset_s32x2 ); + int16x4_t in_s16x4 = vdup_n_s16( 0 ); + int16x4_t out_s16x4; + int32x2_t A_Q28_s32x2, A_L_s32x2, A_U_s32x2, B_Q28_s32x2, t_s32x2; + int32x4_t A_L_s32x4, A_U_s32x4, B_Q28_s32x4, S_s32x4, out32_Q14_s32x4; + int32x2x2_t t0_s32x2x2, t1_s32x2x2, t2_s32x2x2, S_s32x2x2; + +#ifdef OPUS_CHECK_ASM + opus_int32 S_c[ 4 ]; + VARDECL( opus_int16, out_c ); + SAVE_STACK; + ALLOC( out_c, 2 * len, opus_int16 ); + + silk_memcpy( &S_c, S, sizeof( S_c ) ); + silk_biquad_alt_stride2_c( in, B_Q28, A_Q28, S_c, out_c, len ); +#endif + + /* Negate A_Q28 values and split in two parts */ + A_Q28_s32x2 = vld1_s32( A_Q28 ); + A_Q28_s32x2 = vneg_s32( A_Q28_s32x2 ); + A_L_s32x2 = vshl_n_s32( A_Q28_s32x2, 18 ); /* ( -A_Q28[] & 0x00003FFF ) << 18 */ + A_L_s32x2 = vreinterpret_s32_u32( vshr_n_u32( vreinterpret_u32_s32( A_L_s32x2 ), 3 ) ); /* ( -A_Q28[] & 0x00003FFF ) << 15 */ + A_U_s32x2 = vshr_n_s32( A_Q28_s32x2, 14 ); /* silk_RSHIFT( -A_Q28[], 14 ) */ + A_U_s32x2 = vshl_n_s32( A_U_s32x2, 16 ); /* silk_RSHIFT( -A_Q28[], 14 ) << 16 (Clip two leading bits to conform to C function.) */ + A_U_s32x2 = vshr_n_s32( A_U_s32x2, 1 ); /* silk_RSHIFT( -A_Q28[], 14 ) << 15 */ + + B_Q28_s32x2 = vld1_s32( B_Q28 ); + t_s32x2 = vld1_s32( B_Q28 + 1 ); + t0_s32x2x2 = vzip_s32( A_L_s32x2, A_L_s32x2 ); + t1_s32x2x2 = vzip_s32( A_U_s32x2, A_U_s32x2 ); + t2_s32x2x2 = vzip_s32( t_s32x2, t_s32x2 ); + A_L_s32x4 = vcombine_s32( t0_s32x2x2.val[ 0 ], t0_s32x2x2.val[ 1 ] ); /* A{0,0,1,1}_L_Q28 */ + A_U_s32x4 = vcombine_s32( t1_s32x2x2.val[ 0 ], t1_s32x2x2.val[ 1 ] ); /* A{0,0,1,1}_U_Q28 */ + B_Q28_s32x4 = vcombine_s32( t2_s32x2x2.val[ 0 ], t2_s32x2x2.val[ 1 ] ); /* B_Q28[ {1,1,2,2} ] */ + S_s32x4 = vld1q_s32( S ); /* S0 = S[ 0 ]; S3 = S[ 3 ]; */ + S_s32x2x2 = vtrn_s32( vget_low_s32( S_s32x4 ), vget_high_s32( S_s32x4 ) ); /* S2 = S[ 1 ]; S1 = S[ 2 ]; */ + S_s32x4 = vcombine_s32( S_s32x2x2.val[ 0 ], S_s32x2x2.val[ 1 ] ); + + for( ; k < len - 1; k += 2 ) { + int32x4_t in_s32x4[ 2 ], t_s32x4; + int32x2_t out32_Q14_s32x2[ 2 ]; + + /* S[ 2 * i + 0 ], S[ 2 * i + 1 ], S[ 2 * i + 2 ], S[ 2 * i + 3 ]: Q12 */ + in_s16x4 = vld1_s16( &in[ 2 * k ] ); /* in{0,1,2,3} = in[ 2 * k + {0,1,2,3} ]; */ + in_s32x4[ 0 ] = vshll_n_s16( in_s16x4, 15 ); /* in{0,1,2,3} << 15 */ + t_s32x4 = vqdmulhq_lane_s32( in_s32x4[ 0 ], B_Q28_s32x2, 0 ); /* silk_SMULWB( B_Q28[ 0 ], in{0,1,2,3} ) */ + in_s32x4[ 1 ] = vcombine_s32( vget_high_s32( in_s32x4[ 0 ] ), vget_high_s32( in_s32x4[ 0 ] ) ); /* in{2,3,2,3} << 15 */ + in_s32x4[ 0 ] = vcombine_s32( vget_low_s32 ( in_s32x4[ 0 ] ), vget_low_s32 ( in_s32x4[ 0 ] ) ); /* in{0,1,0,1} << 15 */ + silk_biquad_alt_stride2_kernel( A_L_s32x4, A_U_s32x4, B_Q28_s32x4, vget_low_s32 ( t_s32x4 ), in_s32x4[ 0 ], &S_s32x4, &out32_Q14_s32x2[ 0 ] ); + silk_biquad_alt_stride2_kernel( A_L_s32x4, A_U_s32x4, B_Q28_s32x4, vget_high_s32( t_s32x4 ), in_s32x4[ 1 ], &S_s32x4, &out32_Q14_s32x2[ 1 ] ); + + /* Scale back to Q0 and saturate */ + out32_Q14_s32x4 = vcombine_s32( out32_Q14_s32x2[ 0 ], out32_Q14_s32x2[ 1 ] ); /* out32_Q14_{0,1,2,3} */ + out32_Q14_s32x4 = vaddq_s32( out32_Q14_s32x4, offset_s32x4 ); /* out32_Q14_{0,1,2,3} + (1<<14) - 1 */ + out_s16x4 = vqshrn_n_s32( out32_Q14_s32x4, 14 ); /* (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_{0,1,2,3} + (1<<14) - 1, 14 ) ) */ + vst1_s16( &out[ 2 * k ], out_s16x4 ); /* out[ 2 * k + {0,1,2,3} ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_{0,1,2,3} + (1<<14) - 1, 14 ) ); */ + } + + /* Process leftover. */ + if( k < len ) { + int32x4_t in_s32x4; + int32x2_t out32_Q14_s32x2; + + /* S[ 2 * i + 0 ], S[ 2 * i + 1 ]: Q12 */ + in_s16x4 = vld1_lane_s16( &in[ 2 * k + 0 ], in_s16x4, 0 ); /* in{0,1} = in[ 2 * k + {0,1} ]; */ + in_s16x4 = vld1_lane_s16( &in[ 2 * k + 1 ], in_s16x4, 1 ); /* in{0,1} = in[ 2 * k + {0,1} ]; */ + in_s32x4 = vshll_n_s16( in_s16x4, 15 ); /* in{0,1} << 15 */ + t_s32x2 = vqdmulh_lane_s32( vget_low_s32( in_s32x4 ), B_Q28_s32x2, 0 ); /* silk_SMULWB( B_Q28[ 0 ], in{0,1} ) */ + in_s32x4 = vcombine_s32( vget_low_s32( in_s32x4 ), vget_low_s32( in_s32x4 ) ); /* in{0,1,0,1} << 15 */ + silk_biquad_alt_stride2_kernel( A_L_s32x4, A_U_s32x4, B_Q28_s32x4, t_s32x2, in_s32x4, &S_s32x4, &out32_Q14_s32x2 ); + + /* Scale back to Q0 and saturate */ + out32_Q14_s32x2 = vadd_s32( out32_Q14_s32x2, offset_s32x2 ); /* out32_Q14_{0,1} + (1<<14) - 1 */ + out32_Q14_s32x4 = vcombine_s32( out32_Q14_s32x2, out32_Q14_s32x2 ); /* out32_Q14_{0,1,0,1} + (1<<14) - 1 */ + out_s16x4 = vqshrn_n_s32( out32_Q14_s32x4, 14 ); /* (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_{0,1,0,1} + (1<<14) - 1, 14 ) ) */ + vst1_lane_s16( &out[ 2 * k + 0 ], out_s16x4, 0 ); /* out[ 2 * k + 0 ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_0 + (1<<14) - 1, 14 ) ); */ + vst1_lane_s16( &out[ 2 * k + 1 ], out_s16x4, 1 ); /* out[ 2 * k + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_1 + (1<<14) - 1, 14 ) ); */ + } + + vst1q_lane_s32( &S[ 0 ], S_s32x4, 0 ); /* S[ 0 ] = S0; */ + vst1q_lane_s32( &S[ 1 ], S_s32x4, 2 ); /* S[ 1 ] = S2; */ + vst1q_lane_s32( &S[ 2 ], S_s32x4, 1 ); /* S[ 2 ] = S1; */ + vst1q_lane_s32( &S[ 3 ], S_s32x4, 3 ); /* S[ 3 ] = S3; */ + +#ifdef OPUS_CHECK_ASM + silk_assert( !memcmp( S_c, S, sizeof( S_c ) ) ); + silk_assert( !memcmp( out_c, out, 2 * len * sizeof( opus_int16 ) ) ); + RESTORE_STACK; +#endif +} diff --git a/libs/libopus/silk/arm/macros_armv4.h b/libs/libopus/silk/arm/macros_armv4.h index 3f30e9728..877eb18dd 100644 --- a/libs/libopus/silk/arm/macros_armv4.h +++ b/libs/libopus/silk/arm/macros_armv4.h @@ -28,6 +28,11 @@ POSSIBILITY OF SUCH DAMAGE. #ifndef SILK_MACROS_ARMv4_H #define SILK_MACROS_ARMv4_H +/* This macro only avoids the undefined behaviour from a left shift of + a negative value. It should only be used in macros that can't include + SigProc_FIX.h. In other cases, use silk_LSHIFT32(). */ +#define SAFE_SHL(a,b) ((opus_int32)((opus_uint32)(a) << (b))) + /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */ #undef silk_SMULWB static OPUS_INLINE opus_int32 silk_SMULWB_armv4(opus_int32 a, opus_int16 b) @@ -38,7 +43,7 @@ static OPUS_INLINE opus_int32 silk_SMULWB_armv4(opus_int32 a, opus_int16 b) "#silk_SMULWB\n\t" "smull %0, %1, %2, %3\n\t" : "=&r"(rd_lo), "=&r"(rd_hi) - : "%r"(a), "r"(b<<16) + : "%r"(a), "r"(SAFE_SHL(b,16)) ); return rd_hi; } @@ -80,7 +85,7 @@ static OPUS_INLINE opus_int32 silk_SMULWW_armv4(opus_int32 a, opus_int32 b) : "=&r"(rd_lo), "=&r"(rd_hi) : "%r"(a), "r"(b) ); - return (rd_hi<<16)+(rd_lo>>16); + return SAFE_SHL(rd_hi,16)+(rd_lo>>16); } #define silk_SMULWW(a, b) (silk_SMULWW_armv4(a, b)) @@ -96,8 +101,10 @@ static OPUS_INLINE opus_int32 silk_SMLAWW_armv4(opus_int32 a, opus_int32 b, : "=&r"(rd_lo), "=&r"(rd_hi) : "%r"(b), "r"(c) ); - return a+(rd_hi<<16)+(rd_lo>>16); + return a+SAFE_SHL(rd_hi,16)+(rd_lo>>16); } #define silk_SMLAWW(a, b, c) (silk_SMLAWW_armv4(a, b, c)) +#undef SAFE_SHL + #endif /* SILK_MACROS_ARMv4_H */ diff --git a/libs/libopus/silk/arm/macros_armv5e.h b/libs/libopus/silk/arm/macros_armv5e.h index aad4117e4..b14ec65dd 100644 --- a/libs/libopus/silk/arm/macros_armv5e.h +++ b/libs/libopus/silk/arm/macros_armv5e.h @@ -29,6 +29,11 @@ POSSIBILITY OF SUCH DAMAGE. #ifndef SILK_MACROS_ARMv5E_H #define SILK_MACROS_ARMv5E_H +/* This macro only avoids the undefined behaviour from a left shift of + a negative value. It should only be used in macros that can't include + SigProc_FIX.h. In other cases, use silk_LSHIFT32(). */ +#define SAFE_SHL(a,b) ((opus_int32)((opus_uint32)(a) << (b))) + /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */ #undef silk_SMULWB static OPUS_INLINE opus_int32 silk_SMULWB_armv5e(opus_int32 a, opus_int16 b) @@ -190,7 +195,7 @@ static OPUS_INLINE opus_int32 silk_CLZ16_armv5(opus_int16 in16) "#silk_CLZ16\n\t" "clz %0, %1;\n" : "=r"(res) - : "r"(in16<<16|0x8000) + : "r"(SAFE_SHL(in16,16)|0x8000) ); return res; } @@ -210,4 +215,6 @@ static OPUS_INLINE opus_int32 silk_CLZ32_armv5(opus_int32 in32) } #define silk_CLZ32(in32) (silk_CLZ32_armv5(in32)) +#undef SAFE_SHL + #endif /* SILK_MACROS_ARMv5E_H */ diff --git a/libs/libopus/silk/biquad_alt.c b/libs/libopus/silk/biquad_alt.c index d55f5ee92..54566a43c 100644 --- a/libs/libopus/silk/biquad_alt.c +++ b/libs/libopus/silk/biquad_alt.c @@ -39,14 +39,13 @@ POSSIBILITY OF SUCH DAMAGE. #include "SigProc_FIX.h" /* Second order ARMA filter, alternative implementation */ -void silk_biquad_alt( +void silk_biquad_alt_stride1( const opus_int16 *in, /* I input signal */ const opus_int32 *B_Q28, /* I MA coefficients [3] */ const opus_int32 *A_Q28, /* I AR coefficients [2] */ opus_int32 *S, /* I/O State vector [2] */ opus_int16 *out, /* O output signal */ - const opus_int32 len, /* I signal length (must be even) */ - opus_int stride /* I Operate on interleaved signal if > 1 */ + const opus_int32 len /* I signal length (must be even) */ ) { /* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */ @@ -61,7 +60,7 @@ void silk_biquad_alt( for( k = 0; k < len; k++ ) { /* S[ 0 ], S[ 1 ]: Q12 */ - inval = in[ k * stride ]; + inval = in[ k ]; out32_Q14 = silk_LSHIFT( silk_SMLAWB( S[ 0 ], B_Q28[ 0 ], inval ), 2 ); S[ 0 ] = S[1] + silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14, A0_L_Q28 ), 14 ); @@ -73,6 +72,50 @@ void silk_biquad_alt( S[ 1 ] = silk_SMLAWB( S[ 1 ], B_Q28[ 2 ], inval ); /* Scale back to Q0 and saturate */ - out[ k * stride ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14 + (1<<14) - 1, 14 ) ); + out[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14 + (1<<14) - 1, 14 ) ); + } +} + +void silk_biquad_alt_stride2_c( + const opus_int16 *in, /* I input signal */ + const opus_int32 *B_Q28, /* I MA coefficients [3] */ + const opus_int32 *A_Q28, /* I AR coefficients [2] */ + opus_int32 *S, /* I/O State vector [4] */ + opus_int16 *out, /* O output signal */ + const opus_int32 len /* I signal length (must be even) */ +) +{ + /* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */ + opus_int k; + opus_int32 A0_U_Q28, A0_L_Q28, A1_U_Q28, A1_L_Q28, out32_Q14[ 2 ]; + + /* Negate A_Q28 values and split in two parts */ + A0_L_Q28 = ( -A_Q28[ 0 ] ) & 0x00003FFF; /* lower part */ + A0_U_Q28 = silk_RSHIFT( -A_Q28[ 0 ], 14 ); /* upper part */ + A1_L_Q28 = ( -A_Q28[ 1 ] ) & 0x00003FFF; /* lower part */ + A1_U_Q28 = silk_RSHIFT( -A_Q28[ 1 ], 14 ); /* upper part */ + + for( k = 0; k < len; k++ ) { + /* S[ 0 ], S[ 1 ], S[ 2 ], S[ 3 ]: Q12 */ + out32_Q14[ 0 ] = silk_LSHIFT( silk_SMLAWB( S[ 0 ], B_Q28[ 0 ], in[ 2 * k + 0 ] ), 2 ); + out32_Q14[ 1 ] = silk_LSHIFT( silk_SMLAWB( S[ 2 ], B_Q28[ 0 ], in[ 2 * k + 1 ] ), 2 ); + + S[ 0 ] = S[ 1 ] + silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14[ 0 ], A0_L_Q28 ), 14 ); + S[ 2 ] = S[ 3 ] + silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14[ 1 ], A0_L_Q28 ), 14 ); + S[ 0 ] = silk_SMLAWB( S[ 0 ], out32_Q14[ 0 ], A0_U_Q28 ); + S[ 2 ] = silk_SMLAWB( S[ 2 ], out32_Q14[ 1 ], A0_U_Q28 ); + S[ 0 ] = silk_SMLAWB( S[ 0 ], B_Q28[ 1 ], in[ 2 * k + 0 ] ); + S[ 2 ] = silk_SMLAWB( S[ 2 ], B_Q28[ 1 ], in[ 2 * k + 1 ] ); + + S[ 1 ] = silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14[ 0 ], A1_L_Q28 ), 14 ); + S[ 3 ] = silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14[ 1 ], A1_L_Q28 ), 14 ); + S[ 1 ] = silk_SMLAWB( S[ 1 ], out32_Q14[ 0 ], A1_U_Q28 ); + S[ 3 ] = silk_SMLAWB( S[ 3 ], out32_Q14[ 1 ], A1_U_Q28 ); + S[ 1 ] = silk_SMLAWB( S[ 1 ], B_Q28[ 2 ], in[ 2 * k + 0 ] ); + S[ 3 ] = silk_SMLAWB( S[ 3 ], B_Q28[ 2 ], in[ 2 * k + 1 ] ); + + /* Scale back to Q0 and saturate */ + out[ 2 * k + 0 ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14[ 0 ] + (1<<14) - 1, 14 ) ); + out[ 2 * k + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14[ 1 ] + (1<<14) - 1, 14 ) ); } } diff --git a/libs/libopus/silk/bwexpander.c b/libs/libopus/silk/bwexpander.c index 2eb445669..afa97907e 100644 --- a/libs/libopus/silk/bwexpander.c +++ b/libs/libopus/silk/bwexpander.c @@ -45,7 +45,7 @@ void silk_bwexpander( /* Bias in silk_SMULWB can lead to unstable filters */ for( i = 0; i < d - 1; i++ ) { ar[ i ] = (opus_int16)silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, ar[ i ] ), 16 ); - chirp_Q16 += silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, chirp_minus_one_Q16 ), 16 ); + chirp_Q16 += silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, chirp_minus_one_Q16 ), 16 ); } ar[ d - 1 ] = (opus_int16)silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, ar[ d - 1 ] ), 16 ); } diff --git a/libs/libopus/silk/bwexpander_32.c b/libs/libopus/silk/bwexpander_32.c index d0010f73d..0f32b9df1 100644 --- a/libs/libopus/silk/bwexpander_32.c +++ b/libs/libopus/silk/bwexpander_32.c @@ -31,7 +31,8 @@ POSSIBILITY OF SUCH DAMAGE. #include "SigProc_FIX.h" -/* Chirp (bandwidth expand) LP AR filter */ +/* Chirp (bandwidth expand) LP AR filter. + This logic is reused in _celt_lpc(). Any bug fixes should also be applied there. */ void silk_bwexpander_32( opus_int32 *ar, /* I/O AR filter to be expanded (without leading 1) */ const opus_int d, /* I Length of ar */ diff --git a/libs/libopus/silk/check_control_input.c b/libs/libopus/silk/check_control_input.c index b5de9ce48..739fb01f1 100644 --- a/libs/libopus/silk/check_control_input.c +++ b/libs/libopus/silk/check_control_input.c @@ -38,7 +38,7 @@ opus_int check_control_input( silk_EncControlStruct *encControl /* I Control structure */ ) { - silk_assert( encControl != NULL ); + celt_assert( encControl != NULL ); if( ( ( encControl->API_sampleRate != 8000 ) && ( encControl->API_sampleRate != 12000 ) && @@ -59,46 +59,46 @@ opus_int check_control_input( ( encControl->minInternalSampleRate > encControl->desiredInternalSampleRate ) || ( encControl->maxInternalSampleRate < encControl->desiredInternalSampleRate ) || ( encControl->minInternalSampleRate > encControl->maxInternalSampleRate ) ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_FS_NOT_SUPPORTED; } if( encControl->payloadSize_ms != 10 && encControl->payloadSize_ms != 20 && encControl->payloadSize_ms != 40 && encControl->payloadSize_ms != 60 ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_PACKET_SIZE_NOT_SUPPORTED; } if( encControl->packetLossPercentage < 0 || encControl->packetLossPercentage > 100 ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_LOSS_RATE; } if( encControl->useDTX < 0 || encControl->useDTX > 1 ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_DTX_SETTING; } if( encControl->useCBR < 0 || encControl->useCBR > 1 ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_CBR_SETTING; } if( encControl->useInBandFEC < 0 || encControl->useInBandFEC > 1 ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_INBAND_FEC_SETTING; } if( encControl->nChannelsAPI < 1 || encControl->nChannelsAPI > ENCODER_NUM_CHANNELS ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_NUMBER_OF_CHANNELS_ERROR; } if( encControl->nChannelsInternal < 1 || encControl->nChannelsInternal > ENCODER_NUM_CHANNELS ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_NUMBER_OF_CHANNELS_ERROR; } if( encControl->nChannelsInternal > encControl->nChannelsAPI ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_NUMBER_OF_CHANNELS_ERROR; } if( encControl->complexity < 0 || encControl->complexity > 10 ) { - silk_assert( 0 ); + celt_assert( 0 ); return SILK_ENC_INVALID_COMPLEXITY_SETTING; } diff --git a/libs/libopus/silk/control.h b/libs/libopus/silk/control.h index 747e5426a..b76ec33cd 100644 --- a/libs/libopus/silk/control.h +++ b/libs/libopus/silk/control.h @@ -77,6 +77,9 @@ typedef struct { /* I: Flag to enable in-band Forward Error Correction (FEC); 0/1 */ opus_int useInBandFEC; + /* I: Flag to actually code in-band Forward Error Correction (FEC) in the current packet; 0/1 */ + opus_int LBRR_coded; + /* I: Flag to enable discontinuous transmission (DTX); 0/1 */ opus_int useDTX; @@ -110,6 +113,11 @@ typedef struct { /* O: Tells the Opus encoder we're ready to switch */ opus_int switchReady; + /* O: SILK Signal type */ + opus_int signalType; + + /* O: SILK offset (dithering) */ + opus_int offset; } silk_EncControlStruct; /**************************************************************************/ diff --git a/libs/libopus/silk/control_SNR.c b/libs/libopus/silk/control_SNR.c index cee87eb0d..9a6db2754 100644 --- a/libs/libopus/silk/control_SNR.c +++ b/libs/libopus/silk/control_SNR.c @@ -32,45 +32,82 @@ POSSIBILITY OF SUCH DAMAGE. #include "main.h" #include "tuning_parameters.h" +/* These tables hold SNR values divided by 21 (so they fit in 8 bits) + for different target bitrates spaced at 400 bps interval. The first + 10 values are omitted (0-4 kb/s) because they're all zeros. + These tables were obtained by running different SNRs through the + encoder and measuring the active bitrate. */ +static const unsigned char silk_TargetRate_NB_21[117 - 10] = { + 0, 15, 39, 52, 61, 68, + 74, 79, 84, 88, 92, 95, 99,102,105,108,111,114,117,119,122,124, + 126,129,131,133,135,137,139,142,143,145,147,149,151,153,155,157, + 158,160,162,163,165,167,168,170,171,173,174,176,177,179,180,182, + 183,185,186,187,189,190,192,193,194,196,197,199,200,201,203,204, + 205,207,208,209,211,212,213,215,216,217,219,220,221,223,224,225, + 227,228,230,231,232,234,235,236,238,239,241,242,243,245,246,248, + 249,250,252,253,255 +}; + +static const unsigned char silk_TargetRate_MB_21[165 - 10] = { + 0, 0, 28, 43, 52, 59, + 65, 70, 74, 78, 81, 85, 87, 90, 93, 95, 98,100,102,105,107,109, + 111,113,115,116,118,120,122,123,125,127,128,130,131,133,134,136, + 137,138,140,141,143,144,145,147,148,149,151,152,153,154,156,157, + 158,159,160,162,163,164,165,166,167,168,169,171,172,173,174,175, + 176,177,178,179,180,181,182,183,184,185,186,187,188,188,189,190, + 191,192,193,194,195,196,197,198,199,200,201,202,203,203,204,205, + 206,207,208,209,210,211,212,213,214,214,215,216,217,218,219,220, + 221,222,223,224,224,225,226,227,228,229,230,231,232,233,234,235, + 236,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, + 251,252,253,254,255 +}; + +static const unsigned char silk_TargetRate_WB_21[201 - 10] = { + 0, 0, 0, 8, 29, 41, + 49, 56, 62, 66, 70, 74, 77, 80, 83, 86, 88, 91, 93, 95, 97, 99, + 101,103,105,107,108,110,112,113,115,116,118,119,121,122,123,125, + 126,127,129,130,131,132,134,135,136,137,138,140,141,142,143,144, + 145,146,147,148,149,150,151,152,153,154,156,157,158,159,159,160, + 161,162,163,164,165,166,167,168,169,170,171,171,172,173,174,175, + 176,177,177,178,179,180,181,181,182,183,184,185,185,186,187,188, + 189,189,190,191,192,192,193,194,195,195,196,197,198,198,199,200, + 200,201,202,203,203,204,205,206,206,207,208,209,209,210,211,211, + 212,213,214,214,215,216,216,217,218,219,219,220,221,221,222,223, + 224,224,225,226,226,227,228,229,229,230,231,232,232,233,234,234, + 235,236,237,237,238,239,240,240,241,242,243,243,244,245,246,246, + 247,248,249,249,250,251,252,253,255 +}; + /* Control SNR of redidual quantizer */ opus_int silk_control_SNR( silk_encoder_state *psEncC, /* I/O Pointer to Silk encoder state */ opus_int32 TargetRate_bps /* I Target max bitrate (bps) */ ) { - opus_int k, ret = SILK_NO_ERROR; - opus_int32 frac_Q6; - const opus_int32 *rateTable; - - /* Set bitrate/coding quality */ - TargetRate_bps = silk_LIMIT( TargetRate_bps, MIN_TARGET_RATE_BPS, MAX_TARGET_RATE_BPS ); - if( TargetRate_bps != psEncC->TargetRate_bps ) { - psEncC->TargetRate_bps = TargetRate_bps; - - /* If new TargetRate_bps, translate to SNR_dB value */ - if( psEncC->fs_kHz == 8 ) { - rateTable = silk_TargetRate_table_NB; - } else if( psEncC->fs_kHz == 12 ) { - rateTable = silk_TargetRate_table_MB; - } else { - rateTable = silk_TargetRate_table_WB; - } + int id; + int bound; + const unsigned char *snr_table; - /* Reduce bitrate for 10 ms modes in these calculations */ - if( psEncC->nb_subfr == 2 ) { - TargetRate_bps -= REDUCE_BITRATE_10_MS_BPS; - } - - /* Find bitrate interval in table and interpolate */ - for( k = 1; k < TARGET_RATE_TAB_SZ; k++ ) { - if( TargetRate_bps <= rateTable[ k ] ) { - frac_Q6 = silk_DIV32( silk_LSHIFT( TargetRate_bps - rateTable[ k - 1 ], 6 ), - rateTable[ k ] - rateTable[ k - 1 ] ); - psEncC->SNR_dB_Q7 = silk_LSHIFT( silk_SNR_table_Q1[ k - 1 ], 6 ) + silk_MUL( frac_Q6, silk_SNR_table_Q1[ k ] - silk_SNR_table_Q1[ k - 1 ] ); - break; - } - } + psEncC->TargetRate_bps = TargetRate_bps; + if( psEncC->nb_subfr == 2 ) { + TargetRate_bps -= 2000 + psEncC->fs_kHz/16; } - - return ret; + if( psEncC->fs_kHz == 8 ) { + bound = sizeof(silk_TargetRate_NB_21); + snr_table = silk_TargetRate_NB_21; + } else if( psEncC->fs_kHz == 12 ) { + bound = sizeof(silk_TargetRate_MB_21); + snr_table = silk_TargetRate_MB_21; + } else { + bound = sizeof(silk_TargetRate_WB_21); + snr_table = silk_TargetRate_WB_21; + } + id = (TargetRate_bps+200)/400; + id = silk_min(id - 10, bound-1); + if( id <= 0 ) { + psEncC->SNR_dB_Q7 = 0; + } else { + psEncC->SNR_dB_Q7 = snr_table[id]*21; + } + return SILK_NO_ERROR; } diff --git a/libs/libopus/silk/control_audio_bandwidth.c b/libs/libopus/silk/control_audio_bandwidth.c index 4f9bc5cbd..f6d22d839 100644 --- a/libs/libopus/silk/control_audio_bandwidth.c +++ b/libs/libopus/silk/control_audio_bandwidth.c @@ -39,9 +39,15 @@ opus_int silk_control_audio_bandwidth( ) { opus_int fs_kHz; + opus_int orig_kHz; opus_int32 fs_Hz; - fs_kHz = psEncC->fs_kHz; + orig_kHz = psEncC->fs_kHz; + /* Handle a bandwidth-switching reset where we need to be aware what the last sampling rate was. */ + if( orig_kHz == 0 ) { + orig_kHz = psEncC->sLP.saved_fs_kHz; + } + fs_kHz = orig_kHz; fs_Hz = silk_SMULBB( fs_kHz, 1000 ); if( fs_Hz == 0 ) { /* Encoder has just been initialized */ @@ -61,7 +67,7 @@ opus_int silk_control_audio_bandwidth( } if( psEncC->allow_bandwidth_switch || encControl->opusCanSwitch ) { /* Check if we should switch down */ - if( silk_SMULBB( psEncC->fs_kHz, 1000 ) > psEncC->desiredInternal_fs_Hz ) + if( silk_SMULBB( orig_kHz, 1000 ) > psEncC->desiredInternal_fs_Hz ) { /* Switch down */ if( psEncC->sLP.mode == 0 ) { @@ -76,7 +82,7 @@ opus_int silk_control_audio_bandwidth( psEncC->sLP.mode = 0; /* Switch to a lower sample frequency */ - fs_kHz = psEncC->fs_kHz == 16 ? 12 : 8; + fs_kHz = orig_kHz == 16 ? 12 : 8; } else { if( psEncC->sLP.transition_frame_no <= 0 ) { encControl->switchReady = 1; @@ -90,12 +96,12 @@ opus_int silk_control_audio_bandwidth( } else /* Check if we should switch up */ - if( silk_SMULBB( psEncC->fs_kHz, 1000 ) < psEncC->desiredInternal_fs_Hz ) + if( silk_SMULBB( orig_kHz, 1000 ) < psEncC->desiredInternal_fs_Hz ) { /* Switch up */ if( encControl->opusCanSwitch ) { /* Switch to a higher sample frequency */ - fs_kHz = psEncC->fs_kHz == 8 ? 12 : 16; + fs_kHz = orig_kHz == 8 ? 12 : 16; /* New transition */ psEncC->sLP.transition_frame_no = 0; diff --git a/libs/libopus/silk/control_codec.c b/libs/libopus/silk/control_codec.c index 044eea3f2..52aa8fded 100644 --- a/libs/libopus/silk/control_codec.c +++ b/libs/libopus/silk/control_codec.c @@ -57,7 +57,7 @@ static opus_int silk_setup_complexity( static OPUS_INLINE opus_int silk_setup_LBRR( silk_encoder_state *psEncC, /* I/O */ - const opus_int32 TargetRate_bps /* I */ + const silk_EncControlStruct *encControl /* I */ ); @@ -65,7 +65,6 @@ static OPUS_INLINE opus_int silk_setup_LBRR( opus_int silk_control_encoder( silk_encoder_state_Fxx *psEnc, /* I/O Pointer to Silk encoder state */ silk_EncControlStruct *encControl, /* I Control structure */ - const opus_int32 TargetRate_bps, /* I Target max bitrate (bps) */ const opus_int allow_bw_switch, /* I Flag to allow switching audio bandwidth */ const opus_int channelNb, /* I Channel number */ const opus_int force_fs_kHz @@ -125,7 +124,7 @@ opus_int silk_control_encoder( /********************************************/ /* Set LBRR usage */ /********************************************/ - ret += silk_setup_LBRR( &psEnc->sCmn, TargetRate_bps ); + ret += silk_setup_LBRR( &psEnc->sCmn, encControl ); psEnc->sCmn.controlled_since_last_payload = 1; @@ -239,12 +238,11 @@ static opus_int silk_setup_fs( } /* Set internal sampling frequency */ - silk_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 ); - silk_assert( psEnc->sCmn.nb_subfr == 2 || psEnc->sCmn.nb_subfr == 4 ); + celt_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 ); + celt_assert( psEnc->sCmn.nb_subfr == 2 || psEnc->sCmn.nb_subfr == 4 ); if( psEnc->sCmn.fs_kHz != fs_kHz ) { /* reset part of the state */ silk_memset( &psEnc->sShape, 0, sizeof( psEnc->sShape ) ); - silk_memset( &psEnc->sPrefilt, 0, sizeof( psEnc->sPrefilt ) ); silk_memset( &psEnc->sCmn.sNSQ, 0, sizeof( psEnc->sCmn.sNSQ ) ); silk_memset( psEnc->sCmn.prev_NLSFq_Q15, 0, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) ); silk_memset( &psEnc->sCmn.sLP.In_LP_State, 0, sizeof( psEnc->sCmn.sLP.In_LP_State ) ); @@ -255,7 +253,6 @@ static opus_int silk_setup_fs( /* Initialize non-zero parameters */ psEnc->sCmn.prevLag = 100; psEnc->sCmn.first_frame_after_reset = 1; - psEnc->sPrefilt.lagPrev = 100; psEnc->sShape.LastGainIndex = 10; psEnc->sCmn.sNSQ.lagPrev = 100; psEnc->sCmn.sNSQ.prev_gain_Q16 = 65536; @@ -293,19 +290,16 @@ static opus_int silk_setup_fs( psEnc->sCmn.pitch_LPC_win_length = silk_SMULBB( FIND_PITCH_LPC_WIN_MS_2_SF, fs_kHz ); } if( psEnc->sCmn.fs_kHz == 16 ) { - psEnc->sCmn.mu_LTP_Q9 = SILK_FIX_CONST( MU_LTP_QUANT_WB, 9 ); psEnc->sCmn.pitch_lag_low_bits_iCDF = silk_uniform8_iCDF; } else if( psEnc->sCmn.fs_kHz == 12 ) { - psEnc->sCmn.mu_LTP_Q9 = SILK_FIX_CONST( MU_LTP_QUANT_MB, 9 ); psEnc->sCmn.pitch_lag_low_bits_iCDF = silk_uniform6_iCDF; } else { - psEnc->sCmn.mu_LTP_Q9 = SILK_FIX_CONST( MU_LTP_QUANT_NB, 9 ); psEnc->sCmn.pitch_lag_low_bits_iCDF = silk_uniform4_iCDF; } } /* Check that settings are valid */ - silk_assert( ( psEnc->sCmn.subfr_length * psEnc->sCmn.nb_subfr ) == psEnc->sCmn.frame_length ); + celt_assert( ( psEnc->sCmn.subfr_length * psEnc->sCmn.nb_subfr ) == psEnc->sCmn.frame_length ); return ret; } @@ -318,61 +312,76 @@ static opus_int silk_setup_complexity( opus_int ret = 0; /* Set encoding complexity */ - silk_assert( Complexity >= 0 && Complexity <= 10 ); - if( Complexity < 2 ) { + celt_assert( Complexity >= 0 && Complexity <= 10 ); + if( Complexity < 1 ) { psEncC->pitchEstimationComplexity = SILK_PE_MIN_COMPLEX; psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.8, 16 ); psEncC->pitchEstimationLPCOrder = 6; - psEncC->shapingLPCOrder = 8; + psEncC->shapingLPCOrder = 12; psEncC->la_shape = 3 * psEncC->fs_kHz; psEncC->nStatesDelayedDecision = 1; psEncC->useInterpolatedNLSFs = 0; - psEncC->LTPQuantLowComplexity = 1; psEncC->NLSF_MSVQ_Survivors = 2; psEncC->warping_Q16 = 0; - } else if( Complexity < 4 ) { + } else if( Complexity < 2 ) { psEncC->pitchEstimationComplexity = SILK_PE_MID_COMPLEX; psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.76, 16 ); psEncC->pitchEstimationLPCOrder = 8; - psEncC->shapingLPCOrder = 10; + psEncC->shapingLPCOrder = 14; psEncC->la_shape = 5 * psEncC->fs_kHz; psEncC->nStatesDelayedDecision = 1; psEncC->useInterpolatedNLSFs = 0; - psEncC->LTPQuantLowComplexity = 0; + psEncC->NLSF_MSVQ_Survivors = 3; + psEncC->warping_Q16 = 0; + } else if( Complexity < 3 ) { + psEncC->pitchEstimationComplexity = SILK_PE_MIN_COMPLEX; + psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.8, 16 ); + psEncC->pitchEstimationLPCOrder = 6; + psEncC->shapingLPCOrder = 12; + psEncC->la_shape = 3 * psEncC->fs_kHz; + psEncC->nStatesDelayedDecision = 2; + psEncC->useInterpolatedNLSFs = 0; + psEncC->NLSF_MSVQ_Survivors = 2; + psEncC->warping_Q16 = 0; + } else if( Complexity < 4 ) { + psEncC->pitchEstimationComplexity = SILK_PE_MID_COMPLEX; + psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.76, 16 ); + psEncC->pitchEstimationLPCOrder = 8; + psEncC->shapingLPCOrder = 14; + psEncC->la_shape = 5 * psEncC->fs_kHz; + psEncC->nStatesDelayedDecision = 2; + psEncC->useInterpolatedNLSFs = 0; psEncC->NLSF_MSVQ_Survivors = 4; psEncC->warping_Q16 = 0; } else if( Complexity < 6 ) { psEncC->pitchEstimationComplexity = SILK_PE_MID_COMPLEX; psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.74, 16 ); psEncC->pitchEstimationLPCOrder = 10; - psEncC->shapingLPCOrder = 12; + psEncC->shapingLPCOrder = 16; psEncC->la_shape = 5 * psEncC->fs_kHz; psEncC->nStatesDelayedDecision = 2; psEncC->useInterpolatedNLSFs = 1; - psEncC->LTPQuantLowComplexity = 0; - psEncC->NLSF_MSVQ_Survivors = 8; + psEncC->NLSF_MSVQ_Survivors = 6; psEncC->warping_Q16 = psEncC->fs_kHz * SILK_FIX_CONST( WARPING_MULTIPLIER, 16 ); } else if( Complexity < 8 ) { psEncC->pitchEstimationComplexity = SILK_PE_MID_COMPLEX; psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.72, 16 ); psEncC->pitchEstimationLPCOrder = 12; - psEncC->shapingLPCOrder = 14; + psEncC->shapingLPCOrder = 20; psEncC->la_shape = 5 * psEncC->fs_kHz; psEncC->nStatesDelayedDecision = 3; psEncC->useInterpolatedNLSFs = 1; - psEncC->LTPQuantLowComplexity = 0; - psEncC->NLSF_MSVQ_Survivors = 16; + psEncC->NLSF_MSVQ_Survivors = 8; psEncC->warping_Q16 = psEncC->fs_kHz * SILK_FIX_CONST( WARPING_MULTIPLIER, 16 ); } else { psEncC->pitchEstimationComplexity = SILK_PE_MAX_COMPLEX; psEncC->pitchEstimationThreshold_Q16 = SILK_FIX_CONST( 0.7, 16 ); psEncC->pitchEstimationLPCOrder = 16; - psEncC->shapingLPCOrder = 16; + psEncC->shapingLPCOrder = 24; psEncC->la_shape = 5 * psEncC->fs_kHz; psEncC->nStatesDelayedDecision = MAX_DEL_DEC_STATES; psEncC->useInterpolatedNLSFs = 1; - psEncC->LTPQuantLowComplexity = 0; - psEncC->NLSF_MSVQ_Survivors = 32; + psEncC->NLSF_MSVQ_Survivors = 16; psEncC->warping_Q16 = psEncC->fs_kHz * SILK_FIX_CONST( WARPING_MULTIPLIER, 16 ); } @@ -381,46 +390,32 @@ static opus_int silk_setup_complexity( psEncC->shapeWinLength = SUB_FRAME_LENGTH_MS * psEncC->fs_kHz + 2 * psEncC->la_shape; psEncC->Complexity = Complexity; - silk_assert( psEncC->pitchEstimationLPCOrder <= MAX_FIND_PITCH_LPC_ORDER ); - silk_assert( psEncC->shapingLPCOrder <= MAX_SHAPE_LPC_ORDER ); - silk_assert( psEncC->nStatesDelayedDecision <= MAX_DEL_DEC_STATES ); - silk_assert( psEncC->warping_Q16 <= 32767 ); - silk_assert( psEncC->la_shape <= LA_SHAPE_MAX ); - silk_assert( psEncC->shapeWinLength <= SHAPE_LPC_WIN_MAX ); - silk_assert( psEncC->NLSF_MSVQ_Survivors <= NLSF_VQ_MAX_SURVIVORS ); + celt_assert( psEncC->pitchEstimationLPCOrder <= MAX_FIND_PITCH_LPC_ORDER ); + celt_assert( psEncC->shapingLPCOrder <= MAX_SHAPE_LPC_ORDER ); + celt_assert( psEncC->nStatesDelayedDecision <= MAX_DEL_DEC_STATES ); + celt_assert( psEncC->warping_Q16 <= 32767 ); + celt_assert( psEncC->la_shape <= LA_SHAPE_MAX ); + celt_assert( psEncC->shapeWinLength <= SHAPE_LPC_WIN_MAX ); return ret; } static OPUS_INLINE opus_int silk_setup_LBRR( silk_encoder_state *psEncC, /* I/O */ - const opus_int32 TargetRate_bps /* I */ + const silk_EncControlStruct *encControl /* I */ ) { opus_int LBRR_in_previous_packet, ret = SILK_NO_ERROR; - opus_int32 LBRR_rate_thres_bps; LBRR_in_previous_packet = psEncC->LBRR_enabled; - psEncC->LBRR_enabled = 0; - if( psEncC->useInBandFEC && psEncC->PacketLoss_perc > 0 ) { - if( psEncC->fs_kHz == 8 ) { - LBRR_rate_thres_bps = LBRR_NB_MIN_RATE_BPS; - } else if( psEncC->fs_kHz == 12 ) { - LBRR_rate_thres_bps = LBRR_MB_MIN_RATE_BPS; + psEncC->LBRR_enabled = encControl->LBRR_coded; + if( psEncC->LBRR_enabled ) { + /* Set gain increase for coding LBRR excitation */ + if( LBRR_in_previous_packet == 0 ) { + /* Previous packet did not have LBRR, and was therefore coded at a higher bitrate */ + psEncC->LBRR_GainIncreases = 7; } else { - LBRR_rate_thres_bps = LBRR_WB_MIN_RATE_BPS; - } - LBRR_rate_thres_bps = silk_SMULWB( silk_MUL( LBRR_rate_thres_bps, 125 - silk_min( psEncC->PacketLoss_perc, 25 ) ), SILK_FIX_CONST( 0.01, 16 ) ); - - if( TargetRate_bps > LBRR_rate_thres_bps ) { - /* Set gain increase for coding LBRR excitation */ - if( LBRR_in_previous_packet == 0 ) { - /* Previous packet did not have LBRR, and was therefore coded at a higher bitrate */ - psEncC->LBRR_GainIncreases = 7; - } else { - psEncC->LBRR_GainIncreases = silk_max_int( 7 - silk_SMULWB( (opus_int32)psEncC->PacketLoss_perc, SILK_FIX_CONST( 0.4, 16 ) ), 2 ); - } - psEncC->LBRR_enabled = 1; + psEncC->LBRR_GainIncreases = silk_max_int( 7 - silk_SMULWB( (opus_int32)psEncC->PacketLoss_perc, SILK_FIX_CONST( 0.4, 16 ) ), 2 ); } } diff --git a/libs/libopus/silk/debug.c b/libs/libopus/silk/debug.c index 9253faf71..eb0c36ef1 100644 --- a/libs/libopus/silk/debug.c +++ b/libs/libopus/silk/debug.c @@ -30,18 +30,20 @@ POSSIBILITY OF SUCH DAMAGE. #endif #include "debug.h" + +#if SILK_DEBUG || SILK_TIC_TOC #include "SigProc_FIX.h" +#endif #if SILK_TIC_TOC -#ifdef _WIN32 - #if (defined(_WIN32) || defined(_WINCE)) #include /* timer */ #else /* Linux or Mac*/ #include #endif +#ifdef _WIN32 unsigned long silk_GetHighResolutionTime(void) /* O time in usec*/ { /* Returns a time counter in microsec */ @@ -65,7 +67,7 @@ unsigned long GetHighResolutionTime(void) /* O time in usec*/ int silk_Timer_nTimers = 0; int silk_Timer_depth_ctr = 0; char silk_Timer_tags[silk_NUM_TIMERS_MAX][silk_NUM_TIMERS_MAX_TAG_LEN]; -#ifdef WIN32 +#ifdef _WIN32 LARGE_INTEGER silk_Timer_start[silk_NUM_TIMERS_MAX]; #else unsigned long silk_Timer_start[silk_NUM_TIMERS_MAX]; @@ -76,7 +78,7 @@ opus_int64 silk_Timer_sum[silk_NUM_TIMERS_MAX]; opus_int64 silk_Timer_max[silk_NUM_TIMERS_MAX]; opus_int64 silk_Timer_depth[silk_NUM_TIMERS_MAX]; -#ifdef WIN32 +#ifdef _WIN32 void silk_TimerSave(char *file_name) { if( silk_Timer_nTimers > 0 ) diff --git a/libs/libopus/silk/debug.h b/libs/libopus/silk/debug.h index efb6d3e99..36163e478 100644 --- a/libs/libopus/silk/debug.h +++ b/libs/libopus/silk/debug.h @@ -28,41 +28,29 @@ POSSIBILITY OF SUCH DAMAGE. #ifndef SILK_DEBUG_H #define SILK_DEBUG_H +/* Set to 1 to enable DEBUG_STORE_DATA() macros for dumping + * intermediate signals from the codec. + */ +#define SILK_DEBUG 0 + +/* Flag for using timers */ +#define SILK_TIC_TOC 0 + +#if SILK_DEBUG || SILK_TIC_TOC #include "typedef.h" -#include /* file writing */ #include /* strcpy, strcmp */ +#include /* file writing */ +#endif #ifdef __cplusplus extern "C" { #endif -unsigned long GetHighResolutionTime(void); /* O time in usec*/ - -/* make SILK_DEBUG dependent on compiler's _DEBUG */ -#if defined _WIN32 - #ifdef _DEBUG - #define SILK_DEBUG 1 - #else - #define SILK_DEBUG 0 - #endif - - /* overrule the above */ - #if 0 - /* #define NO_ASSERTS*/ - #undef SILK_DEBUG - #define SILK_DEBUG 1 - #endif -#else - #define SILK_DEBUG 0 -#endif - -/* Flag for using timers */ -#define SILK_TIC_TOC 0 - - #if SILK_TIC_TOC +unsigned long GetHighResolutionTime(void); /* O time in usec*/ + #if (defined(_WIN32) || defined(_WINCE)) #include /* timer */ #else /* Linux or Mac*/ diff --git a/libs/libopus/silk/dec_API.c b/libs/libopus/silk/dec_API.c index b7d8ed48d..7d5ca7fb9 100644 --- a/libs/libopus/silk/dec_API.c +++ b/libs/libopus/silk/dec_API.c @@ -104,7 +104,7 @@ opus_int silk_Decode( /* O Returns error co int delay_stack_alloc; SAVE_STACK; - silk_assert( decControl->nChannelsInternal == 1 || decControl->nChannelsInternal == 2 ); + celt_assert( decControl->nChannelsInternal == 1 || decControl->nChannelsInternal == 2 ); /**********************************/ /* Test if first frame in payload */ @@ -143,13 +143,13 @@ opus_int silk_Decode( /* O Returns error co channel_state[ n ].nFramesPerPacket = 3; channel_state[ n ].nb_subfr = 4; } else { - silk_assert( 0 ); + celt_assert( 0 ); RESTORE_STACK; return SILK_DEC_INVALID_FRAME_SIZE; } fs_kHz_dec = ( decControl->internalSampleRate >> 10 ) + 1; if( fs_kHz_dec != 8 && fs_kHz_dec != 12 && fs_kHz_dec != 16 ) { - silk_assert( 0 ); + celt_assert( 0 ); RESTORE_STACK; return SILK_DEC_INVALID_SAMPLING_FREQUENCY; } diff --git a/libs/libopus/silk/decode_core.c b/libs/libopus/silk/decode_core.c index e569c0e72..1c352a652 100644 --- a/libs/libopus/silk/decode_core.c +++ b/libs/libopus/silk/decode_core.c @@ -141,7 +141,7 @@ void silk_decode_core( if( k == 0 || ( k == 2 && NLSF_interpolation_flag ) ) { /* Rewhiten with new A coefs */ start_idx = psDec->ltp_mem_length - lag - psDec->LPC_order - LTP_ORDER / 2; - silk_assert( start_idx > 0 ); + celt_assert( start_idx > 0 ); if( k == 2 ) { silk_memcpy( &psDec->outBuf[ psDec->ltp_mem_length ], xq, 2 * psDec->subfr_length * sizeof( opus_int16 ) ); @@ -196,7 +196,7 @@ void silk_decode_core( for( i = 0; i < psDec->subfr_length; i++ ) { /* Short-term prediction */ - silk_assert( psDec->LPC_order == 10 || psDec->LPC_order == 16 ); + celt_assert( psDec->LPC_order == 10 || psDec->LPC_order == 16 ); /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ LPC_pred_Q10 = silk_RSHIFT( psDec->LPC_order, 1 ); LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, sLPC_Q14[ MAX_LPC_ORDER + i - 1 ], A_Q12_tmp[ 0 ] ); @@ -225,8 +225,6 @@ void silk_decode_core( pxq[ i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( sLPC_Q14[ MAX_LPC_ORDER + i ], Gain_Q10 ), 8 ) ); } - /* DEBUG_STORE_DATA( dec.pcm, pxq, psDec->subfr_length * sizeof( opus_int16 ) ) */ - /* Update LPC filter state */ silk_memcpy( sLPC_Q14, &sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) ); pexc_Q14 += psDec->subfr_length; diff --git a/libs/libopus/silk/decode_frame.c b/libs/libopus/silk/decode_frame.c index a605d95ac..4f36f854c 100644 --- a/libs/libopus/silk/decode_frame.c +++ b/libs/libopus/silk/decode_frame.c @@ -55,7 +55,7 @@ opus_int silk_decode_frame( psDecCtrl->LTP_scale_Q14 = 0; /* Safety checks */ - silk_assert( L > 0 && L <= MAX_FRAME_LENGTH ); + celt_assert( L > 0 && L <= MAX_FRAME_LENGTH ); if( lostFlag == FLAG_DECODE_NORMAL || ( lostFlag == FLAG_DECODE_LBRR && psDec->LBRR_flags[ psDec->nFramesDecoded ] == 1 ) ) @@ -91,7 +91,7 @@ opus_int silk_decode_frame( psDec->lossCnt = 0; psDec->prevSignalType = psDec->indices.signalType; - silk_assert( psDec->prevSignalType >= 0 && psDec->prevSignalType <= 2 ); + celt_assert( psDec->prevSignalType >= 0 && psDec->prevSignalType <= 2 ); /* A frame has been decoded without errors */ psDec->first_frame_after_reset = 0; @@ -103,7 +103,7 @@ opus_int silk_decode_frame( /*************************/ /* Update output buffer. */ /*************************/ - silk_assert( psDec->ltp_mem_length >= psDec->frame_length ); + celt_assert( psDec->ltp_mem_length >= psDec->frame_length ); mv_len = psDec->ltp_mem_length - psDec->frame_length; silk_memmove( psDec->outBuf, &psDec->outBuf[ psDec->frame_length ], mv_len * sizeof(opus_int16) ); silk_memcpy( &psDec->outBuf[ mv_len ], pOut, psDec->frame_length * sizeof( opus_int16 ) ); diff --git a/libs/libopus/silk/decode_indices.c b/libs/libopus/silk/decode_indices.c index 7afe5c26c..0bb4a997a 100644 --- a/libs/libopus/silk/decode_indices.c +++ b/libs/libopus/silk/decode_indices.c @@ -79,7 +79,7 @@ void silk_decode_indices( /**********************/ psDec->indices.NLSFIndices[ 0 ] = (opus_int8)ec_dec_icdf( psRangeDec, &psDec->psNLSF_CB->CB1_iCDF[ ( psDec->indices.signalType >> 1 ) * psDec->psNLSF_CB->nVectors ], 8 ); silk_NLSF_unpack( ec_ix, pred_Q8, psDec->psNLSF_CB, psDec->indices.NLSFIndices[ 0 ] ); - silk_assert( psDec->psNLSF_CB->order == psDec->LPC_order ); + celt_assert( psDec->psNLSF_CB->order == psDec->LPC_order ); for( i = 0; i < psDec->psNLSF_CB->order; i++ ) { Ix = ec_dec_icdf( psRangeDec, &psDec->psNLSF_CB->ec_iCDF[ ec_ix[ i ] ], 8 ); if( Ix == 0 ) { diff --git a/libs/libopus/silk/decode_parameters.c b/libs/libopus/silk/decode_parameters.c index e345b1dce..a56a40985 100644 --- a/libs/libopus/silk/decode_parameters.c +++ b/libs/libopus/silk/decode_parameters.c @@ -52,7 +52,7 @@ void silk_decode_parameters( silk_NLSF_decode( pNLSF_Q15, psDec->indices.NLSFIndices, psDec->psNLSF_CB ); /* Convert NLSF parameters to AR prediction filter coefficients */ - silk_NLSF2A( psDecCtrl->PredCoef_Q12[ 1 ], pNLSF_Q15, psDec->LPC_order ); + silk_NLSF2A( psDecCtrl->PredCoef_Q12[ 1 ], pNLSF_Q15, psDec->LPC_order, psDec->arch ); /* If just reset, e.g., because internal Fs changed, do not allow interpolation */ /* improves the case of packet loss in the first frame after a switch */ @@ -69,7 +69,7 @@ void silk_decode_parameters( } /* Convert NLSF parameters to AR prediction filter coefficients */ - silk_NLSF2A( psDecCtrl->PredCoef_Q12[ 0 ], pNLSF0_Q15, psDec->LPC_order ); + silk_NLSF2A( psDecCtrl->PredCoef_Q12[ 0 ], pNLSF0_Q15, psDec->LPC_order, psDec->arch ); } else { /* Copy LPC coefficients for first half from second half */ silk_memcpy( psDecCtrl->PredCoef_Q12[ 0 ], psDecCtrl->PredCoef_Q12[ 1 ], psDec->LPC_order * sizeof( opus_int16 ) ); diff --git a/libs/libopus/silk/decode_pitch.c b/libs/libopus/silk/decode_pitch.c index fedbc6a52..fd1b6bf55 100644 --- a/libs/libopus/silk/decode_pitch.c +++ b/libs/libopus/silk/decode_pitch.c @@ -51,7 +51,7 @@ void silk_decode_pitch( Lag_CB_ptr = &silk_CB_lags_stage2[ 0 ][ 0 ]; cbk_size = PE_NB_CBKS_STAGE2_EXT; } else { - silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 ); + celt_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 ); Lag_CB_ptr = &silk_CB_lags_stage2_10_ms[ 0 ][ 0 ]; cbk_size = PE_NB_CBKS_STAGE2_10MS; } @@ -60,7 +60,7 @@ void silk_decode_pitch( Lag_CB_ptr = &silk_CB_lags_stage3[ 0 ][ 0 ]; cbk_size = PE_NB_CBKS_STAGE3_MAX; } else { - silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 ); + celt_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 ); Lag_CB_ptr = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ]; cbk_size = PE_NB_CBKS_STAGE3_10MS; } diff --git a/libs/libopus/silk/decode_pulses.c b/libs/libopus/silk/decode_pulses.c index d6bbec922..a56d2d307 100644 --- a/libs/libopus/silk/decode_pulses.c +++ b/libs/libopus/silk/decode_pulses.c @@ -56,7 +56,7 @@ void silk_decode_pulses( silk_assert( 1 << LOG2_SHELL_CODEC_FRAME_LENGTH == SHELL_CODEC_FRAME_LENGTH ); iter = silk_RSHIFT( frame_length, LOG2_SHELL_CODEC_FRAME_LENGTH ); if( iter * SHELL_CODEC_FRAME_LENGTH < frame_length ) { - silk_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */ + celt_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */ iter++; } diff --git a/libs/libopus/silk/decoder_set_fs.c b/libs/libopus/silk/decoder_set_fs.c index eef0fd25e..d9a13d0f0 100644 --- a/libs/libopus/silk/decoder_set_fs.c +++ b/libs/libopus/silk/decoder_set_fs.c @@ -40,8 +40,8 @@ opus_int silk_decoder_set_fs( { opus_int frame_length, ret = 0; - silk_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 ); - silk_assert( psDec->nb_subfr == MAX_NB_SUBFR || psDec->nb_subfr == MAX_NB_SUBFR/2 ); + celt_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 ); + celt_assert( psDec->nb_subfr == MAX_NB_SUBFR || psDec->nb_subfr == MAX_NB_SUBFR/2 ); /* New (sub)frame length */ psDec->subfr_length = silk_SMULBB( SUB_FRAME_LENGTH_MS, fs_kHz ); @@ -86,7 +86,7 @@ opus_int silk_decoder_set_fs( psDec->pitch_lag_low_bits_iCDF = silk_uniform4_iCDF; } else { /* unsupported sampling rate */ - silk_assert( 0 ); + celt_assert( 0 ); } psDec->first_frame_after_reset = 1; psDec->lagPrev = 100; @@ -101,7 +101,7 @@ opus_int silk_decoder_set_fs( } /* Check that settings are valid */ - silk_assert( psDec->frame_length > 0 && psDec->frame_length <= MAX_FRAME_LENGTH ); + celt_assert( psDec->frame_length > 0 && psDec->frame_length <= MAX_FRAME_LENGTH ); return ret; } diff --git a/libs/libopus/silk/define.h b/libs/libopus/silk/define.h index 19c9b00e2..491c86f33 100644 --- a/libs/libopus/silk/define.h +++ b/libs/libopus/silk/define.h @@ -46,7 +46,6 @@ extern "C" /* Limits on bitrate */ #define MIN_TARGET_RATE_BPS 5000 #define MAX_TARGET_RATE_BPS 80000 -#define TARGET_RATE_TAB_SZ 8 /* LBRR thresholds */ #define LBRR_NB_MIN_RATE_BPS 12000 @@ -56,6 +55,12 @@ extern "C" /* DTX settings */ #define NB_SPEECH_FRAMES_BEFORE_DTX 10 /* eq 200 ms */ #define MAX_CONSECUTIVE_DTX 20 /* eq 400 ms */ +#define DTX_ACTIVITY_THRESHOLD 0.1f + +/* VAD decision */ +#define VAD_NO_DECISION -1 +#define VAD_NO_ACTIVITY 0 +#define VAD_ACTIVITY 1 /* Maximum sampling frequency */ #define MAX_FS_KHZ 16 @@ -147,7 +152,7 @@ extern "C" #define USE_HARM_SHAPING 1 /* Max LPC order of noise shaping filters */ -#define MAX_SHAPE_LPC_ORDER 16 +#define MAX_SHAPE_LPC_ORDER 24 #define HARM_SHAPE_FIR_TAPS 3 @@ -157,8 +162,7 @@ extern "C" #define LTP_BUF_LENGTH 512 #define LTP_MASK ( LTP_BUF_LENGTH - 1 ) -#define DECISION_DELAY 32 -#define DECISION_DELAY_MASK ( DECISION_DELAY - 1 ) +#define DECISION_DELAY 40 /* Number of subframes for excitation entropy coding */ #define SHELL_CODEC_FRAME_LENGTH 16 @@ -173,11 +177,7 @@ extern "C" #define MAX_MATRIX_SIZE MAX_LPC_ORDER /* Max of LPC Order and LTP order */ -#if( MAX_LPC_ORDER > DECISION_DELAY ) # define NSQ_LPC_BUF_LENGTH MAX_LPC_ORDER -#else -# define NSQ_LPC_BUF_LENGTH DECISION_DELAY -#endif /***************************/ /* Voice activity detector */ @@ -205,7 +205,6 @@ extern "C" /******************/ #define NLSF_W_Q 2 #define NLSF_VQ_MAX_VECTORS 32 -#define NLSF_VQ_MAX_SURVIVORS 32 #define NLSF_QUANT_MAX_AMPLITUDE 4 #define NLSF_QUANT_MAX_AMPLITUDE_EXT 10 #define NLSF_QUANT_LEVEL_ADJ 0.1 @@ -226,6 +225,7 @@ extern "C" /* Defines for CN generation */ #define CNG_BUF_MASK_MAX 255 /* 2^floor(log2(MAX_FRAME_LENGTH))-1 */ #define CNG_GAIN_SMTH_Q16 4634 /* 0.25^(1/4) */ +#define CNG_GAIN_SMTH_THRESHOLD_Q16 46396 /* -3 dB */ #define CNG_NLSF_SMTH_Q16 16348 /* 0.25 */ #ifdef __cplusplus diff --git a/libs/libopus/silk/enc_API.c b/libs/libopus/silk/enc_API.c index f8060286d..55a33f37e 100644 --- a/libs/libopus/silk/enc_API.c +++ b/libs/libopus/silk/enc_API.c @@ -82,7 +82,7 @@ opus_int silk_InitEncoder( /* O Returns error co silk_memset( psEnc, 0, sizeof( silk_encoder ) ); for( n = 0; n < ENCODER_NUM_CHANNELS; n++ ) { if( ret += silk_init_encoder( &psEnc->state_Fxx[ n ], arch ) ) { - silk_assert( 0 ); + celt_assert( 0 ); } } @@ -91,7 +91,7 @@ opus_int silk_InitEncoder( /* O Returns error co /* Read control structure */ if( ret += silk_QueryEncoder( encState, encStatus ) ) { - silk_assert( 0 ); + celt_assert( 0 ); } return ret; @@ -144,7 +144,8 @@ opus_int silk_Encode( /* O Returns error co opus_int nSamplesIn, /* I Number of samples in input vector */ ec_enc *psRangeEnc, /* I/O Compressor data structure */ opus_int32 *nBytesOut, /* I/O Number of bytes in payload (input: Max bytes) */ - const opus_int prefillFlag /* I Flag to indicate prefilling buffers no coding */ + const opus_int prefillFlag, /* I Flag to indicate prefilling buffers no coding */ + opus_int activity /* I Decision of Opus voice activity detector */ ) { opus_int n, i, nBits, flags, tmp_payloadSize_ms = 0, tmp_complexity = 0, ret = 0; @@ -166,7 +167,7 @@ opus_int silk_Encode( /* O Returns error co /* Check values in encoder control structure */ if( ( ret = check_control_input( encControl ) ) != 0 ) { - silk_assert( 0 ); + celt_assert( 0 ); RESTORE_STACK; return ret; } @@ -199,16 +200,26 @@ opus_int silk_Encode( /* O Returns error co tot_blocks = ( nBlocksOf10ms > 1 ) ? nBlocksOf10ms >> 1 : 1; curr_block = 0; if( prefillFlag ) { + silk_LP_state save_LP; /* Only accept input length of 10 ms */ if( nBlocksOf10ms != 1 ) { - silk_assert( 0 ); + celt_assert( 0 ); RESTORE_STACK; return SILK_ENC_INPUT_INVALID_NO_OF_SAMPLES; } + if ( prefillFlag == 2 ) { + save_LP = psEnc->state_Fxx[ 0 ].sCmn.sLP; + /* Save the sampling rate so the bandwidth switching code can keep handling transitions. */ + save_LP.saved_fs_kHz = psEnc->state_Fxx[ 0 ].sCmn.fs_kHz; + } /* Reset Encoder */ for( n = 0; n < encControl->nChannelsInternal; n++ ) { ret = silk_init_encoder( &psEnc->state_Fxx[ n ], psEnc->state_Fxx[ n ].sCmn.arch ); - silk_assert( !ret ); + /* Restore the variable LP state. */ + if ( prefillFlag == 2 ) { + psEnc->state_Fxx[ n ].sCmn.sLP = save_LP; + } + celt_assert( !ret ); } tmp_payloadSize_ms = encControl->payloadSize_ms; encControl->payloadSize_ms = 10; @@ -221,23 +232,22 @@ opus_int silk_Encode( /* O Returns error co } else { /* Only accept input lengths that are a multiple of 10 ms */ if( nBlocksOf10ms * encControl->API_sampleRate != 100 * nSamplesIn || nSamplesIn < 0 ) { - silk_assert( 0 ); + celt_assert( 0 ); RESTORE_STACK; return SILK_ENC_INPUT_INVALID_NO_OF_SAMPLES; } /* Make sure no more than one packet can be produced */ if( 1000 * (opus_int32)nSamplesIn > encControl->payloadSize_ms * encControl->API_sampleRate ) { - silk_assert( 0 ); + celt_assert( 0 ); RESTORE_STACK; return SILK_ENC_INPUT_INVALID_NO_OF_SAMPLES; } } - TargetRate_bps = silk_RSHIFT32( encControl->bitRate, encControl->nChannelsInternal - 1 ); for( n = 0; n < encControl->nChannelsInternal; n++ ) { /* Force the side channel to the same rate as the mid */ opus_int force_fs_kHz = (n==1) ? psEnc->state_Fxx[0].sCmn.fs_kHz : 0; - if( ( ret = silk_control_encoder( &psEnc->state_Fxx[ n ], encControl, TargetRate_bps, psEnc->allowBandwidthSwitch, n, force_fs_kHz ) ) != 0 ) { + if( ( ret = silk_control_encoder( &psEnc->state_Fxx[ n ], encControl, psEnc->allowBandwidthSwitch, n, force_fs_kHz ) ) != 0 ) { silk_assert( 0 ); RESTORE_STACK; return ret; @@ -249,7 +259,7 @@ opus_int silk_Encode( /* O Returns error co } psEnc->state_Fxx[ n ].sCmn.inDTX = psEnc->state_Fxx[ n ].sCmn.useDTX; } - silk_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 0 ].sCmn.fs_kHz == psEnc->state_Fxx[ 1 ].sCmn.fs_kHz ); + celt_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 0 ].sCmn.fs_kHz == psEnc->state_Fxx[ 1 ].sCmn.fs_kHz ); /* Input buffering/resampling and encoding */ nSamplesToBufferMax = @@ -307,7 +317,7 @@ opus_int silk_Encode( /* O Returns error co } psEnc->state_Fxx[ 0 ].sCmn.inputBufIx += nSamplesToBuffer; } else { - silk_assert( encControl->nChannelsAPI == 1 && encControl->nChannelsInternal == 1 ); + celt_assert( encControl->nChannelsAPI == 1 && encControl->nChannelsInternal == 1 ); silk_memcpy(buf, samplesIn, nSamplesFromInput*sizeof(opus_int16)); ret += silk_resampler( &psEnc->state_Fxx[ 0 ].sCmn.resampler_state, &psEnc->state_Fxx[ 0 ].sCmn.inputBuf[ psEnc->state_Fxx[ 0 ].sCmn.inputBufIx + 2 ], buf, nSamplesFromInput ); @@ -323,8 +333,8 @@ opus_int silk_Encode( /* O Returns error co /* Silk encoder */ if( psEnc->state_Fxx[ 0 ].sCmn.inputBufIx >= psEnc->state_Fxx[ 0 ].sCmn.frame_length ) { /* Enough data in input buffer, so encode */ - silk_assert( psEnc->state_Fxx[ 0 ].sCmn.inputBufIx == psEnc->state_Fxx[ 0 ].sCmn.frame_length ); - silk_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 1 ].sCmn.inputBufIx == psEnc->state_Fxx[ 1 ].sCmn.frame_length ); + celt_assert( psEnc->state_Fxx[ 0 ].sCmn.inputBufIx == psEnc->state_Fxx[ 0 ].sCmn.frame_length ); + celt_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 1 ].sCmn.inputBufIx == psEnc->state_Fxx[ 1 ].sCmn.frame_length ); /* Deal with LBRR data */ if( psEnc->state_Fxx[ 0 ].sCmn.nFramesEncoded == 0 && !prefillFlag ) { @@ -416,7 +426,6 @@ opus_int silk_Encode( /* O Returns error co /* Reset side channel encoder memory for first frame with side coding */ if( psEnc->prev_decode_only_middle == 1 ) { silk_memset( &psEnc->state_Fxx[ 1 ].sShape, 0, sizeof( psEnc->state_Fxx[ 1 ].sShape ) ); - silk_memset( &psEnc->state_Fxx[ 1 ].sPrefilt, 0, sizeof( psEnc->state_Fxx[ 1 ].sPrefilt ) ); silk_memset( &psEnc->state_Fxx[ 1 ].sCmn.sNSQ, 0, sizeof( psEnc->state_Fxx[ 1 ].sCmn.sNSQ ) ); silk_memset( psEnc->state_Fxx[ 1 ].sCmn.prev_NLSFq_Q15, 0, sizeof( psEnc->state_Fxx[ 1 ].sCmn.prev_NLSFq_Q15 ) ); silk_memset( &psEnc->state_Fxx[ 1 ].sCmn.sLP.In_LP_State, 0, sizeof( psEnc->state_Fxx[ 1 ].sCmn.sLP.In_LP_State ) ); @@ -427,7 +436,7 @@ opus_int silk_Encode( /* O Returns error co psEnc->state_Fxx[ 1 ].sCmn.sNSQ.prev_gain_Q16 = 65536; psEnc->state_Fxx[ 1 ].sCmn.first_frame_after_reset = 1; } - silk_encode_do_VAD_Fxx( &psEnc->state_Fxx[ 1 ] ); + silk_encode_do_VAD_Fxx( &psEnc->state_Fxx[ 1 ], activity ); } else { psEnc->state_Fxx[ 1 ].sCmn.VAD_flags[ psEnc->state_Fxx[ 0 ].sCmn.nFramesEncoded ] = 0; } @@ -442,7 +451,7 @@ opus_int silk_Encode( /* O Returns error co silk_memcpy( psEnc->state_Fxx[ 0 ].sCmn.inputBuf, psEnc->sStereo.sMid, 2 * sizeof( opus_int16 ) ); silk_memcpy( psEnc->sStereo.sMid, &psEnc->state_Fxx[ 0 ].sCmn.inputBuf[ psEnc->state_Fxx[ 0 ].sCmn.frame_length ], 2 * sizeof( opus_int16 ) ); } - silk_encode_do_VAD_Fxx( &psEnc->state_Fxx[ 0 ] ); + silk_encode_do_VAD_Fxx( &psEnc->state_Fxx[ 0 ], activity ); /* Encode */ for( n = 0; n < encControl->nChannelsInternal; n++ ) { @@ -557,6 +566,10 @@ opus_int silk_Encode( /* O Returns error co } } + encControl->signalType = psEnc->state_Fxx[0].sCmn.indices.signalType; + encControl->offset = silk_Quantization_Offsets_Q10 + [ psEnc->state_Fxx[0].sCmn.indices.signalType >> 1 ] + [ psEnc->state_Fxx[0].sCmn.indices.quantOffsetType ]; RESTORE_STACK; return ret; } diff --git a/libs/libopus/silk/encode_indices.c b/libs/libopus/silk/encode_indices.c index 666c8c0b1..4bcbc3347 100644 --- a/libs/libopus/silk/encode_indices.c +++ b/libs/libopus/silk/encode_indices.c @@ -56,8 +56,8 @@ void silk_encode_indices( /* Encode signal type and quantizer offset */ /*******************************************/ typeOffset = 2 * psIndices->signalType + psIndices->quantOffsetType; - silk_assert( typeOffset >= 0 && typeOffset < 6 ); - silk_assert( encode_LBRR == 0 || typeOffset >= 2 ); + celt_assert( typeOffset >= 0 && typeOffset < 6 ); + celt_assert( encode_LBRR == 0 || typeOffset >= 2 ); if( encode_LBRR || typeOffset >= 2 ) { ec_enc_icdf( psRangeEnc, typeOffset - 2, silk_type_offset_VAD_iCDF, 8 ); } else { @@ -90,7 +90,7 @@ void silk_encode_indices( /****************/ ec_enc_icdf( psRangeEnc, psIndices->NLSFIndices[ 0 ], &psEncC->psNLSF_CB->CB1_iCDF[ ( psIndices->signalType >> 1 ) * psEncC->psNLSF_CB->nVectors ], 8 ); silk_NLSF_unpack( ec_ix, pred_Q8, psEncC->psNLSF_CB, psIndices->NLSFIndices[ 0 ] ); - silk_assert( psEncC->psNLSF_CB->order == psEncC->predictLPCOrder ); + celt_assert( psEncC->psNLSF_CB->order == psEncC->predictLPCOrder ); for( i = 0; i < psEncC->psNLSF_CB->order; i++ ) { if( psIndices->NLSFIndices[ i+1 ] >= NLSF_QUANT_MAX_AMPLITUDE ) { ec_enc_icdf( psRangeEnc, 2 * NLSF_QUANT_MAX_AMPLITUDE, &psEncC->psNLSF_CB->ec_iCDF[ ec_ix[ i ] ], 8 ); diff --git a/libs/libopus/silk/encode_pulses.c b/libs/libopus/silk/encode_pulses.c index ab00264f9..8a1999138 100644 --- a/libs/libopus/silk/encode_pulses.c +++ b/libs/libopus/silk/encode_pulses.c @@ -86,7 +86,7 @@ void silk_encode_pulses( silk_assert( 1 << LOG2_SHELL_CODEC_FRAME_LENGTH == SHELL_CODEC_FRAME_LENGTH ); iter = silk_RSHIFT( frame_length, LOG2_SHELL_CODEC_FRAME_LENGTH ); if( iter * SHELL_CODEC_FRAME_LENGTH < frame_length ) { - silk_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */ + celt_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */ iter++; silk_memset( &pulses[ frame_length ], 0, SHELL_CODEC_FRAME_LENGTH * sizeof(opus_int8)); } diff --git a/libs/libopus/silk/fixed/apply_sine_window_FIX.c b/libs/libopus/silk/fixed/apply_sine_window_FIX.c index 4502b7130..03e088a6d 100644 --- a/libs/libopus/silk/fixed/apply_sine_window_FIX.c +++ b/libs/libopus/silk/fixed/apply_sine_window_FIX.c @@ -57,15 +57,15 @@ void silk_apply_sine_window( opus_int k, f_Q16, c_Q16; opus_int32 S0_Q16, S1_Q16; - silk_assert( win_type == 1 || win_type == 2 ); + celt_assert( win_type == 1 || win_type == 2 ); /* Length must be in a range from 16 to 120 and a multiple of 4 */ - silk_assert( length >= 16 && length <= 120 ); - silk_assert( ( length & 3 ) == 0 ); + celt_assert( length >= 16 && length <= 120 ); + celt_assert( ( length & 3 ) == 0 ); /* Frequency */ k = ( length >> 2 ) - 4; - silk_assert( k >= 0 && k <= 26 ); + celt_assert( k >= 0 && k <= 26 ); f_Q16 = (opus_int)freq_table_Q16[ k ]; /* Factor used for cosine approximation */ diff --git a/libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_arm.h b/libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_arm.h new file mode 100644 index 000000000..1992e4328 --- /dev/null +++ b/libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_arm.h @@ -0,0 +1,68 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifndef SILK_WARPED_AUTOCORRELATION_FIX_ARM_H +# define SILK_WARPED_AUTOCORRELATION_FIX_ARM_H + +# include "celt/arm/armcpu.h" + +# if defined(FIXED_POINT) + +# if defined(OPUS_ARM_MAY_HAVE_NEON_INTR) +void silk_warped_autocorrelation_FIX_neon( + opus_int32 *corr, /* O Result [order + 1] */ + opus_int *scale, /* O Scaling of the correlation vector */ + const opus_int16 *input, /* I Input data to correlate */ + const opus_int warping_Q16, /* I Warping coefficient */ + const opus_int length, /* I Length of input */ + const opus_int order /* I Correlation order (even) */ +); + +# if !defined(OPUS_HAVE_RTCD) && defined(OPUS_ARM_PRESUME_NEON) +# define OVERRIDE_silk_warped_autocorrelation_FIX (1) +# define silk_warped_autocorrelation_FIX(corr, scale, input, warping_Q16, length, order, arch) \ + ((void)(arch), PRESUME_NEON(silk_warped_autocorrelation_FIX)(corr, scale, input, warping_Q16, length, order)) +# endif +# endif + +# if !defined(OVERRIDE_silk_warped_autocorrelation_FIX) +/*Is run-time CPU detection enabled on this platform?*/ +# if defined(OPUS_HAVE_RTCD) && (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && !defined(OPUS_ARM_PRESUME_NEON_INTR)) +extern void (*const SILK_WARPED_AUTOCORRELATION_FIX_IMPL[OPUS_ARCHMASK+1])(opus_int32*, opus_int*, const opus_int16*, const opus_int, const opus_int, const opus_int); +# define OVERRIDE_silk_warped_autocorrelation_FIX (1) +# define silk_warped_autocorrelation_FIX(corr, scale, input, warping_Q16, length, order, arch) \ + ((*SILK_WARPED_AUTOCORRELATION_FIX_IMPL[(arch)&OPUS_ARCHMASK])(corr, scale, input, warping_Q16, length, order)) +# elif defined(OPUS_ARM_PRESUME_NEON_INTR) +# define OVERRIDE_silk_warped_autocorrelation_FIX (1) +# define silk_warped_autocorrelation_FIX(corr, scale, input, warping_Q16, length, order, arch) \ + ((void)(arch), silk_warped_autocorrelation_FIX_neon(corr, scale, input, warping_Q16, length, order)) +# endif +# endif + +# endif /* end FIXED_POINT */ + +#endif /* end SILK_WARPED_AUTOCORRELATION_FIX_ARM_H */ diff --git a/libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_neon_intr.c b/libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_neon_intr.c new file mode 100644 index 000000000..6f3be025c --- /dev/null +++ b/libs/libopus/silk/fixed/arm/warped_autocorrelation_FIX_neon_intr.c @@ -0,0 +1,265 @@ +/*********************************************************************** +Copyright (c) 2017 Google Inc., Jean-Marc Valin +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +- Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +***********************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#ifdef OPUS_CHECK_ASM +# include +#endif +#include "stack_alloc.h" +#include "main_FIX.h" + +static OPUS_INLINE void calc_corr( const opus_int32 *const input_QS, opus_int64 *const corr_QC, const opus_int offset, const int32x4_t state_QS_s32x4 ) +{ + int64x2_t corr_QC_s64x2[ 2 ], t_s64x2[ 2 ]; + const int32x4_t input_QS_s32x4 = vld1q_s32( input_QS + offset ); + corr_QC_s64x2[ 0 ] = vld1q_s64( corr_QC + offset + 0 ); + corr_QC_s64x2[ 1 ] = vld1q_s64( corr_QC + offset + 2 ); + t_s64x2[ 0 ] = vmull_s32( vget_low_s32( state_QS_s32x4 ), vget_low_s32( input_QS_s32x4 ) ); + t_s64x2[ 1 ] = vmull_s32( vget_high_s32( state_QS_s32x4 ), vget_high_s32( input_QS_s32x4 ) ); + corr_QC_s64x2[ 0 ] = vsraq_n_s64( corr_QC_s64x2[ 0 ], t_s64x2[ 0 ], 2 * QS - QC ); + corr_QC_s64x2[ 1 ] = vsraq_n_s64( corr_QC_s64x2[ 1 ], t_s64x2[ 1 ], 2 * QS - QC ); + vst1q_s64( corr_QC + offset + 0, corr_QC_s64x2[ 0 ] ); + vst1q_s64( corr_QC + offset + 2, corr_QC_s64x2[ 1 ] ); +} + +static OPUS_INLINE int32x4_t calc_state( const int32x4_t state_QS0_s32x4, const int32x4_t state_QS0_1_s32x4, const int32x4_t state_QS1_1_s32x4, const int32x4_t warping_Q16_s32x4 ) +{ + int32x4_t t_s32x4 = vsubq_s32( state_QS0_s32x4, state_QS0_1_s32x4 ); + t_s32x4 = vqdmulhq_s32( t_s32x4, warping_Q16_s32x4 ); + return vaddq_s32( state_QS1_1_s32x4, t_s32x4 ); +} + +void silk_warped_autocorrelation_FIX_neon( + opus_int32 *corr, /* O Result [order + 1] */ + opus_int *scale, /* O Scaling of the correlation vector */ + const opus_int16 *input, /* I Input data to correlate */ + const opus_int warping_Q16, /* I Warping coefficient */ + const opus_int length, /* I Length of input */ + const opus_int order /* I Correlation order (even) */ +) +{ + if( ( MAX_SHAPE_LPC_ORDER > 24 ) || ( order < 6 ) ) { + silk_warped_autocorrelation_FIX_c( corr, scale, input, warping_Q16, length, order ); + } else { + opus_int n, i, lsh; + opus_int64 corr_QC[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; /* In reverse order */ + opus_int64 corr_QC_orderT; + int64x2_t lsh_s64x2; + const opus_int orderT = ( order + 3 ) & ~3; + opus_int64 *corr_QCT; + opus_int32 *input_QS; + VARDECL( opus_int32, input_QST ); + VARDECL( opus_int32, state ); + SAVE_STACK; + + /* Order must be even */ + silk_assert( ( order & 1 ) == 0 ); + silk_assert( 2 * QS - QC >= 0 ); + + /* The additional +4 is to ensure a later vld1q_s32 call does not overflow. */ + /* Strictly, only +3 is needed but +4 simplifies initialization using the 4x32 neon load. */ + ALLOC( input_QST, length + 2 * MAX_SHAPE_LPC_ORDER + 4, opus_int32 ); + + input_QS = input_QST; + /* input_QS has zero paddings in the beginning and end. */ + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + + /* Loop over samples */ + for( n = 0; n < length - 7; n += 8, input_QS += 8 ) { + const int16x8_t t0_s16x4 = vld1q_s16( input + n ); + vst1q_s32( input_QS + 0, vshll_n_s16( vget_low_s16( t0_s16x4 ), QS ) ); + vst1q_s32( input_QS + 4, vshll_n_s16( vget_high_s16( t0_s16x4 ), QS ) ); + } + for( ; n < length; n++, input_QS++ ) { + input_QS[ 0 ] = silk_LSHIFT32( (opus_int32)input[ n ], QS ); + } + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS += 4; + vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); + input_QS = input_QST + MAX_SHAPE_LPC_ORDER - orderT; + + /* The following loop runs ( length + order ) times, with ( order ) extra epilogues. */ + /* The zero paddings in input_QS guarantee corr_QC's correctness even with the extra epilogues. */ + /* The values of state_QS will be polluted by the extra epilogues, however they are temporary values. */ + + /* Keep the C code here to help understand the intrinsics optimization. */ + /* + { + opus_int32 state_QS[ 2 ][ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; + opus_int32 *state_QST[ 3 ]; + state_QST[ 0 ] = state_QS[ 0 ]; + state_QST[ 1 ] = state_QS[ 1 ]; + for( n = 0; n < length + order; n++, input_QS++ ) { + state_QST[ 0 ][ orderT ] = input_QS[ orderT ]; + for( i = 0; i < orderT; i++ ) { + corr_QC[ i ] += silk_RSHIFT64( silk_SMULL( state_QST[ 0 ][ i ], input_QS[ i ] ), 2 * QS - QC ); + state_QST[ 1 ][ i ] = silk_SMLAWB( state_QST[ 1 ][ i + 1 ], state_QST[ 0 ][ i ] - state_QST[ 0 ][ i + 1 ], warping_Q16 ); + } + state_QST[ 2 ] = state_QST[ 0 ]; + state_QST[ 0 ] = state_QST[ 1 ]; + state_QST[ 1 ] = state_QST[ 2 ]; + } + } + */ + + { + const int32x4_t warping_Q16_s32x4 = vdupq_n_s32( warping_Q16 << 15 ); + const opus_int32 *in = input_QS + orderT; + opus_int o = orderT; + int32x4_t state_QS_s32x4[ 3 ][ 2 ]; + + /* The additional +4 is to ensure a later vld1q_s32 call does not overflow. */ + ALLOC( state, length + order + 4, opus_int32 ); + state_QS_s32x4[ 2 ][ 1 ] = vdupq_n_s32( 0 ); + + /* Calculate 8 taps of all inputs in each loop. */ + do { + state_QS_s32x4[ 0 ][ 0 ] = state_QS_s32x4[ 0 ][ 1 ] = + state_QS_s32x4[ 1 ][ 0 ] = state_QS_s32x4[ 1 ][ 1 ] = vdupq_n_s32( 0 ); + n = 0; + do { + calc_corr( input_QS + n, corr_QC, o - 8, state_QS_s32x4[ 0 ][ 0 ] ); + calc_corr( input_QS + n, corr_QC, o - 4, state_QS_s32x4[ 0 ][ 1 ] ); + state_QS_s32x4[ 2 ][ 1 ] = vld1q_s32( in + n ); + vst1q_lane_s32( state + n, state_QS_s32x4[ 0 ][ 0 ], 0 ); + state_QS_s32x4[ 2 ][ 0 ] = vextq_s32( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 0 ][ 1 ], 1 ); + state_QS_s32x4[ 2 ][ 1 ] = vextq_s32( state_QS_s32x4[ 0 ][ 1 ], state_QS_s32x4[ 2 ][ 1 ], 1 ); + state_QS_s32x4[ 0 ][ 0 ] = calc_state( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 2 ][ 0 ], state_QS_s32x4[ 1 ][ 0 ], warping_Q16_s32x4 ); + state_QS_s32x4[ 0 ][ 1 ] = calc_state( state_QS_s32x4[ 0 ][ 1 ], state_QS_s32x4[ 2 ][ 1 ], state_QS_s32x4[ 1 ][ 1 ], warping_Q16_s32x4 ); + state_QS_s32x4[ 1 ][ 0 ] = state_QS_s32x4[ 2 ][ 0 ]; + state_QS_s32x4[ 1 ][ 1 ] = state_QS_s32x4[ 2 ][ 1 ]; + } while( ++n < ( length + order ) ); + in = state; + o -= 8; + } while( o > 4 ); + + if( o ) { + /* Calculate the last 4 taps of all inputs. */ + opus_int32 *stateT = state; + silk_assert( o == 4 ); + state_QS_s32x4[ 0 ][ 0 ] = state_QS_s32x4[ 1 ][ 0 ] = vdupq_n_s32( 0 ); + n = length + order; + do { + calc_corr( input_QS, corr_QC, 0, state_QS_s32x4[ 0 ][ 0 ] ); + state_QS_s32x4[ 2 ][ 0 ] = vld1q_s32( stateT ); + vst1q_lane_s32( stateT, state_QS_s32x4[ 0 ][ 0 ], 0 ); + state_QS_s32x4[ 2 ][ 0 ] = vextq_s32( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 2 ][ 0 ], 1 ); + state_QS_s32x4[ 0 ][ 0 ] = calc_state( state_QS_s32x4[ 0 ][ 0 ], state_QS_s32x4[ 2 ][ 0 ], state_QS_s32x4[ 1 ][ 0 ], warping_Q16_s32x4 ); + state_QS_s32x4[ 1 ][ 0 ] = state_QS_s32x4[ 2 ][ 0 ]; + input_QS++; + stateT++; + } while( --n ); + } + } + + { + const opus_int16 *inputT = input; + int32x4_t t_s32x4; + int64x1_t t_s64x1; + int64x2_t t_s64x2 = vdupq_n_s64( 0 ); + for( n = 0; n <= length - 8; n += 8 ) { + int16x8_t input_s16x8 = vld1q_s16( inputT ); + t_s32x4 = vmull_s16( vget_low_s16( input_s16x8 ), vget_low_s16( input_s16x8 ) ); + t_s32x4 = vmlal_s16( t_s32x4, vget_high_s16( input_s16x8 ), vget_high_s16( input_s16x8 ) ); + t_s64x2 = vaddw_s32( t_s64x2, vget_low_s32( t_s32x4 ) ); + t_s64x2 = vaddw_s32( t_s64x2, vget_high_s32( t_s32x4 ) ); + inputT += 8; + } + t_s64x1 = vadd_s64( vget_low_s64( t_s64x2 ), vget_high_s64( t_s64x2 ) ); + corr_QC_orderT = vget_lane_s64( t_s64x1, 0 ); + for( ; n < length; n++ ) { + corr_QC_orderT += silk_SMULL( input[ n ], input[ n ] ); + } + corr_QC_orderT = silk_LSHIFT64( corr_QC_orderT, QC ); + corr_QC[ orderT ] = corr_QC_orderT; + } + + corr_QCT = corr_QC + orderT - order; + lsh = silk_CLZ64( corr_QC_orderT ) - 35; + lsh = silk_LIMIT( lsh, -12 - QC, 30 - QC ); + *scale = -( QC + lsh ); + silk_assert( *scale >= -30 && *scale <= 12 ); + lsh_s64x2 = vdupq_n_s64( lsh ); + for( i = 0; i <= order - 3; i += 4 ) { + int32x4_t corr_s32x4; + int64x2_t corr_QC0_s64x2, corr_QC1_s64x2; + corr_QC0_s64x2 = vld1q_s64( corr_QCT + i ); + corr_QC1_s64x2 = vld1q_s64( corr_QCT + i + 2 ); + corr_QC0_s64x2 = vshlq_s64( corr_QC0_s64x2, lsh_s64x2 ); + corr_QC1_s64x2 = vshlq_s64( corr_QC1_s64x2, lsh_s64x2 ); + corr_s32x4 = vcombine_s32( vmovn_s64( corr_QC1_s64x2 ), vmovn_s64( corr_QC0_s64x2 ) ); + corr_s32x4 = vrev64q_s32( corr_s32x4 ); + vst1q_s32( corr + order - i - 3, corr_s32x4 ); + } + if( lsh >= 0 ) { + for( ; i < order + 1; i++ ) { + corr[ order - i ] = (opus_int32)silk_CHECK_FIT32( silk_LSHIFT64( corr_QCT[ i ], lsh ) ); + } + } else { + for( ; i < order + 1; i++ ) { + corr[ order - i ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( corr_QCT[ i ], -lsh ) ); + } + } + silk_assert( corr_QCT[ order ] >= 0 ); /* If breaking, decrease QC*/ + RESTORE_STACK; + } + +#ifdef OPUS_CHECK_ASM + { + opus_int32 corr_c[ MAX_SHAPE_LPC_ORDER + 1 ]; + opus_int scale_c; + silk_warped_autocorrelation_FIX_c( corr_c, &scale_c, input, warping_Q16, length, order ); + silk_assert( !memcmp( corr_c, corr, sizeof( corr_c[ 0 ] ) * ( order + 1 ) ) ); + silk_assert( scale_c == *scale ); + } +#endif +} diff --git a/libs/libopus/silk/fixed/burg_modified_FIX.c b/libs/libopus/silk/fixed/burg_modified_FIX.c index 17d0e0993..185a12b17 100644 --- a/libs/libopus/silk/fixed/burg_modified_FIX.c +++ b/libs/libopus/silk/fixed/burg_modified_FIX.c @@ -37,7 +37,7 @@ POSSIBILITY OF SUCH DAMAGE. #define MAX_FRAME_SIZE 384 /* subfr_length * nb_subfr = ( 0.005 * 16000 + 16 ) * 4 = 384 */ #define QA 25 -#define N_BITS_HEAD_ROOM 2 +#define N_BITS_HEAD_ROOM 3 #define MIN_RSHIFTS -16 #define MAX_RSHIFTS (32 - QA) @@ -65,10 +65,10 @@ void silk_burg_modified_c( opus_int32 xcorr[ SILK_MAX_ORDER_LPC ]; opus_int64 C0_64; - silk_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); + celt_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); /* Compute autocorrelations, added over subframes */ - C0_64 = silk_inner_prod16_aligned_64( x, x, subfr_length*nb_subfr, arch ); + C0_64 = silk_inner_prod16( x, x, subfr_length*nb_subfr, arch ); lz = silk_CLZ64(C0_64); rshifts = 32 + 1 + N_BITS_HEAD_ROOM - lz; if (rshifts > MAX_RSHIFTS) rshifts = MAX_RSHIFTS; @@ -87,7 +87,7 @@ void silk_burg_modified_c( x_ptr = x + s * subfr_length; for( n = 1; n < D + 1; n++ ) { C_first_row[ n - 1 ] += (opus_int32)silk_RSHIFT64( - silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n, arch ), rshifts ); + silk_inner_prod16( x_ptr, x_ptr + n, subfr_length - n, arch ), rshifts ); } } } else { @@ -150,7 +150,7 @@ void silk_burg_modified_c( C_first_row[ k ] = silk_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */ C_last_row[ k ] = silk_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */ Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); /* Q17 */ - /* We sometimes have get overflows in the multiplications (even beyond +/- 2^32), + /* We sometimes get overflows in the multiplications (even beyond +/- 2^32), but they cancel each other and the real result seems to always fit in a 32-bit signed integer. This was determined experimentally, not theoretically (unfortunately). */ tmp1 = silk_MLA_ovflw( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); /* Q17 */ @@ -253,7 +253,7 @@ void silk_burg_modified_c( if( rshifts > 0 ) { for( s = 0; s < nb_subfr; s++ ) { x_ptr = x + s * subfr_length; - C0 -= (opus_int32)silk_RSHIFT64( silk_inner_prod16_aligned_64( x_ptr, x_ptr, D, arch ), rshifts ); + C0 -= (opus_int32)silk_RSHIFT64( silk_inner_prod16( x_ptr, x_ptr, D, arch ), rshifts ); } } else { for( s = 0; s < nb_subfr; s++ ) { diff --git a/libs/libopus/silk/fixed/corrMatrix_FIX.c b/libs/libopus/silk/fixed/corrMatrix_FIX.c index c1d437c78..1b4a29c23 100644 --- a/libs/libopus/silk/fixed/corrMatrix_FIX.c +++ b/libs/libopus/silk/fixed/corrMatrix_FIX.c @@ -58,7 +58,7 @@ void silk_corrVector_FIX( for( lag = 0; lag < order; lag++ ) { inner_prod = 0; for( i = 0; i < L; i++ ) { - inner_prod += silk_RSHIFT32( silk_SMULBB( ptr1[ i ], ptr2[i] ), rshifts ); + inner_prod = silk_ADD_RSHIFT32( inner_prod, silk_SMULBB( ptr1[ i ], ptr2[i] ), rshifts ); } Xt[ lag ] = inner_prod; /* X[:,lag]'*t */ ptr1--; /* Go to next column of X */ @@ -77,61 +77,54 @@ void silk_corrMatrix_FIX( const opus_int16 *x, /* I x vector [L + order - 1] used to form data matrix X */ const opus_int L, /* I Length of vectors */ const opus_int order, /* I Max lag for correlation */ - const opus_int head_room, /* I Desired headroom */ opus_int32 *XX, /* O Pointer to X'*X correlation matrix [ order x order ] */ - opus_int *rshifts, /* I/O Right shifts of correlations */ + opus_int32 *nrg, /* O Energy of x vector */ + opus_int *rshifts, /* O Right shifts of correlations and energy */ int arch /* I Run-time architecture */ ) { - opus_int i, j, lag, rshifts_local, head_room_rshifts; + opus_int i, j, lag; opus_int32 energy; const opus_int16 *ptr1, *ptr2; /* Calculate energy to find shift used to fit in 32 bits */ - silk_sum_sqr_shift( &energy, &rshifts_local, x, L + order - 1 ); - /* Add shifts to get the desired head room */ - head_room_rshifts = silk_max( head_room - silk_CLZ32( energy ), 0 ); - - energy = silk_RSHIFT32( energy, head_room_rshifts ); - rshifts_local += head_room_rshifts; + silk_sum_sqr_shift( nrg, rshifts, x, L + order - 1 ); + energy = *nrg; /* Calculate energy of first column (0) of X: X[:,0]'*X[:,0] */ /* Remove contribution of first order - 1 samples */ for( i = 0; i < order - 1; i++ ) { - energy -= silk_RSHIFT32( silk_SMULBB( x[ i ], x[ i ] ), rshifts_local ); - } - if( rshifts_local < *rshifts ) { - /* Adjust energy */ - energy = silk_RSHIFT32( energy, *rshifts - rshifts_local ); - rshifts_local = *rshifts; + energy -= silk_RSHIFT32( silk_SMULBB( x[ i ], x[ i ] ), *rshifts ); } /* Calculate energy of remaining columns of X: X[:,j]'*X[:,j] */ /* Fill out the diagonal of the correlation matrix */ matrix_ptr( XX, 0, 0, order ) = energy; + silk_assert( energy >= 0 ); ptr1 = &x[ order - 1 ]; /* First sample of column 0 of X */ for( j = 1; j < order; j++ ) { - energy = silk_SUB32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ L - j ], ptr1[ L - j ] ), rshifts_local ) ); - energy = silk_ADD32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ -j ], ptr1[ -j ] ), rshifts_local ) ); + energy = silk_SUB32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ L - j ], ptr1[ L - j ] ), *rshifts ) ); + energy = silk_ADD32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ -j ], ptr1[ -j ] ), *rshifts ) ); matrix_ptr( XX, j, j, order ) = energy; + silk_assert( energy >= 0 ); } ptr2 = &x[ order - 2 ]; /* First sample of column 1 of X */ /* Calculate the remaining elements of the correlation matrix */ - if( rshifts_local > 0 ) { + if( *rshifts > 0 ) { /* Right shifting used */ for( lag = 1; lag < order; lag++ ) { /* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */ energy = 0; for( i = 0; i < L; i++ ) { - energy += silk_RSHIFT32( silk_SMULBB( ptr1[ i ], ptr2[i] ), rshifts_local ); + energy += silk_RSHIFT32( silk_SMULBB( ptr1[ i ], ptr2[i] ), *rshifts ); } /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */ matrix_ptr( XX, lag, 0, order ) = energy; matrix_ptr( XX, 0, lag, order ) = energy; for( j = 1; j < ( order - lag ); j++ ) { - energy = silk_SUB32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ), rshifts_local ) ); - energy = silk_ADD32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ -j ], ptr2[ -j ] ), rshifts_local ) ); + energy = silk_SUB32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ), *rshifts ) ); + energy = silk_ADD32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ -j ], ptr2[ -j ] ), *rshifts ) ); matrix_ptr( XX, lag + j, j, order ) = energy; matrix_ptr( XX, j, lag + j, order ) = energy; } @@ -153,6 +146,5 @@ void silk_corrMatrix_FIX( ptr2--;/* Update pointer to first sample of next column (lag) in X */ } } - *rshifts = rshifts_local; } diff --git a/libs/libopus/silk/fixed/encode_frame_FIX.c b/libs/libopus/silk/fixed/encode_frame_FIX.c index 5ef44b03f..a02bf87db 100644 --- a/libs/libopus/silk/fixed/encode_frame_FIX.c +++ b/libs/libopus/silk/fixed/encode_frame_FIX.c @@ -29,6 +29,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "config.h" #endif +#include #include "main_FIX.h" #include "stack_alloc.h" #include "tuning_parameters.h" @@ -37,26 +38,33 @@ POSSIBILITY OF SUCH DAMAGE. static OPUS_INLINE void silk_LBRR_encode_FIX( silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk FIX encoder state */ silk_encoder_control_FIX *psEncCtrl, /* I/O Pointer to Silk FIX encoder control struct */ - const opus_int32 xfw_Q3[], /* I Input signal */ + const opus_int16 x16[], /* I Input signal */ opus_int condCoding /* I The type of conditional coding used so far for this frame */ ); void silk_encode_do_VAD_FIX( - silk_encoder_state_FIX *psEnc /* I/O Pointer to Silk FIX encoder state */ + silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk FIX encoder state */ + opus_int activity /* I Decision of Opus voice activity detector */ ) { + const opus_int activity_threshold = SILK_FIX_CONST( SPEECH_ACTIVITY_DTX_THRES, 8 ); + /****************************/ /* Voice Activity Detection */ /****************************/ silk_VAD_GetSA_Q8( &psEnc->sCmn, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.arch ); + /* If Opus VAD is inactive and Silk VAD is active: lower Silk VAD to just under the threshold */ + if( activity == VAD_NO_ACTIVITY && psEnc->sCmn.speech_activity_Q8 >= activity_threshold ) { + psEnc->sCmn.speech_activity_Q8 = activity_threshold - 1; + } /**************************************************/ /* Convert speech activity into VAD and DTX flags */ /**************************************************/ - if( psEnc->sCmn.speech_activity_Q8 < SILK_FIX_CONST( SPEECH_ACTIVITY_DTX_THRES, 8 ) ) { + if( psEnc->sCmn.speech_activity_Q8 < activity_threshold ) { psEnc->sCmn.indices.signalType = TYPE_NO_VOICE_ACTIVITY; psEnc->sCmn.noSpeechCounter++; - if( psEnc->sCmn.noSpeechCounter < NB_SPEECH_FRAMES_BEFORE_DTX ) { + if( psEnc->sCmn.noSpeechCounter <= NB_SPEECH_FRAMES_BEFORE_DTX ) { psEnc->sCmn.inDTX = 0; } else if( psEnc->sCmn.noSpeechCounter > MAX_CONSECUTIVE_DTX + NB_SPEECH_FRAMES_BEFORE_DTX ) { psEnc->sCmn.noSpeechCounter = NB_SPEECH_FRAMES_BEFORE_DTX; @@ -94,6 +102,9 @@ opus_int silk_encode_frame_FIX( opus_int16 ec_prevLagIndex_copy; opus_int ec_prevSignalType_copy; opus_int8 LastGainIndex_copy2; + opus_int gain_lock[ MAX_NB_SUBFR ] = {0}; + opus_int16 best_gain_mult[ MAX_NB_SUBFR ]; + opus_int best_sum[ MAX_NB_SUBFR ]; SAVE_STACK; /* This is totally unnecessary but many compilers (including gcc) are too dumb to realise it */ @@ -118,7 +129,6 @@ opus_int silk_encode_frame_FIX( silk_memcpy( x_frame + LA_SHAPE_MS * psEnc->sCmn.fs_kHz, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.frame_length * sizeof( opus_int16 ) ); if( !psEnc->sCmn.prefillFlag ) { - VARDECL( opus_int32, xfw_Q3 ); VARDECL( opus_int16, res_pitch ); VARDECL( opus_uint8, ec_buf_copy ); opus_int16 *res_pitch_frame; @@ -132,7 +142,7 @@ opus_int silk_encode_frame_FIX( /*****************************************/ /* Find pitch lags, initial LPC analysis */ /*****************************************/ - silk_find_pitch_lags_FIX( psEnc, &sEncCtrl, res_pitch, x_frame, psEnc->sCmn.arch ); + silk_find_pitch_lags_FIX( psEnc, &sEncCtrl, res_pitch, x_frame - psEnc->sCmn.ltp_mem_length, psEnc->sCmn.arch ); /************************/ /* Noise shape analysis */ @@ -142,23 +152,17 @@ opus_int silk_encode_frame_FIX( /***************************************************/ /* Find linear prediction coefficients (LPC + LTP) */ /***************************************************/ - silk_find_pred_coefs_FIX( psEnc, &sEncCtrl, res_pitch, x_frame, condCoding ); + silk_find_pred_coefs_FIX( psEnc, &sEncCtrl, res_pitch_frame, x_frame, condCoding ); /****************************************/ /* Process gains */ /****************************************/ silk_process_gains_FIX( psEnc, &sEncCtrl, condCoding ); - /*****************************************/ - /* Prefiltering for noise shaper */ - /*****************************************/ - ALLOC( xfw_Q3, psEnc->sCmn.frame_length, opus_int32 ); - silk_prefilter_FIX( psEnc, &sEncCtrl, xfw_Q3, x_frame ); - /****************************************/ /* Low Bitrate Redundant Encoding */ /****************************************/ - silk_LBRR_encode_FIX( psEnc, &sEncCtrl, xfw_Q3, condCoding ); + silk_LBRR_encode_FIX( psEnc, &sEncCtrl, x_frame, condCoding ); /* Loop over quantizer and entropy coding to control bitrate */ maxIter = 6; @@ -194,17 +198,21 @@ opus_int silk_encode_frame_FIX( /* Noise shaping quantization */ /*****************************************/ if( psEnc->sCmn.nStatesDelayedDecision > 1 || psEnc->sCmn.warping_Q16 > 0 ) { - silk_NSQ_del_dec( &psEnc->sCmn, &psEnc->sCmn.sNSQ, &psEnc->sCmn.indices, xfw_Q3, psEnc->sCmn.pulses, - sEncCtrl.PredCoef_Q12[ 0 ], sEncCtrl.LTPCoef_Q14, sEncCtrl.AR2_Q13, sEncCtrl.HarmShapeGain_Q14, + silk_NSQ_del_dec( &psEnc->sCmn, &psEnc->sCmn.sNSQ, &psEnc->sCmn.indices, x_frame, psEnc->sCmn.pulses, + sEncCtrl.PredCoef_Q12[ 0 ], sEncCtrl.LTPCoef_Q14, sEncCtrl.AR_Q13, sEncCtrl.HarmShapeGain_Q14, sEncCtrl.Tilt_Q14, sEncCtrl.LF_shp_Q14, sEncCtrl.Gains_Q16, sEncCtrl.pitchL, sEncCtrl.Lambda_Q10, sEncCtrl.LTP_scale_Q14, psEnc->sCmn.arch ); } else { - silk_NSQ( &psEnc->sCmn, &psEnc->sCmn.sNSQ, &psEnc->sCmn.indices, xfw_Q3, psEnc->sCmn.pulses, - sEncCtrl.PredCoef_Q12[ 0 ], sEncCtrl.LTPCoef_Q14, sEncCtrl.AR2_Q13, sEncCtrl.HarmShapeGain_Q14, + silk_NSQ( &psEnc->sCmn, &psEnc->sCmn.sNSQ, &psEnc->sCmn.indices, x_frame, psEnc->sCmn.pulses, + sEncCtrl.PredCoef_Q12[ 0 ], sEncCtrl.LTPCoef_Q14, sEncCtrl.AR_Q13, sEncCtrl.HarmShapeGain_Q14, sEncCtrl.Tilt_Q14, sEncCtrl.LF_shp_Q14, sEncCtrl.Gains_Q16, sEncCtrl.pitchL, sEncCtrl.Lambda_Q10, sEncCtrl.LTP_scale_Q14, psEnc->sCmn.arch); } + if ( iter == maxIter && !found_lower ) { + silk_memcpy( &sRangeEnc_copy2, psRangeEnc, sizeof( ec_enc ) ); + } + /****************************************/ /* Encode Parameters */ /****************************************/ @@ -218,6 +226,33 @@ opus_int silk_encode_frame_FIX( nBits = ec_tell( psRangeEnc ); + /* If we still bust after the last iteration, do some damage control. */ + if ( iter == maxIter && !found_lower && nBits > maxBits ) { + silk_memcpy( psRangeEnc, &sRangeEnc_copy2, sizeof( ec_enc ) ); + + /* Keep gains the same as the last frame. */ + psEnc->sShape.LastGainIndex = sEncCtrl.lastGainIndexPrev; + for ( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { + psEnc->sCmn.indices.GainsIndices[ i ] = 4; + } + if (condCoding != CODE_CONDITIONALLY) { + psEnc->sCmn.indices.GainsIndices[ 0 ] = sEncCtrl.lastGainIndexPrev; + } + psEnc->sCmn.ec_prevLagIndex = ec_prevLagIndex_copy; + psEnc->sCmn.ec_prevSignalType = ec_prevSignalType_copy; + /* Clear all pulses. */ + for ( i = 0; i < psEnc->sCmn.frame_length; i++ ) { + psEnc->sCmn.pulses[ i ] = 0; + } + + silk_encode_indices( &psEnc->sCmn, psRangeEnc, psEnc->sCmn.nFramesEncoded, 0, condCoding ); + + silk_encode_pulses( psRangeEnc, psEnc->sCmn.indices.signalType, psEnc->sCmn.indices.quantOffsetType, + psEnc->sCmn.pulses, psEnc->sCmn.frame_length ); + + nBits = ec_tell( psRangeEnc ); + } + if( useCBR == 0 && iter == 0 && nBits <= maxBits ) { break; } @@ -227,7 +262,7 @@ opus_int silk_encode_frame_FIX( if( found_lower && ( gainsID == gainsID_lower || nBits > maxBits ) ) { /* Restore output state from earlier iteration that did meet the bitrate budget */ silk_memcpy( psRangeEnc, &sRangeEnc_copy2, sizeof( ec_enc ) ); - silk_assert( sRangeEnc_copy2.offs <= 1275 ); + celt_assert( sRangeEnc_copy2.offs <= 1275 ); silk_memcpy( psRangeEnc->buf, ec_buf_copy, sRangeEnc_copy2.offs ); silk_memcpy( &psEnc->sCmn.sNSQ, &sNSQ_copy2, sizeof( silk_nsq_state ) ); psEnc->sShape.LastGainIndex = LastGainIndex_copy2; @@ -255,7 +290,7 @@ opus_int silk_encode_frame_FIX( gainsID_lower = gainsID; /* Copy part of the output state */ silk_memcpy( &sRangeEnc_copy2, psRangeEnc, sizeof( ec_enc ) ); - silk_assert( psRangeEnc->offs <= 1275 ); + celt_assert( psRangeEnc->offs <= 1275 ); silk_memcpy( ec_buf_copy, psRangeEnc->buf, psRangeEnc->offs ); silk_memcpy( &sNSQ_copy2, &psEnc->sCmn.sNSQ, sizeof( silk_nsq_state ) ); LastGainIndex_copy2 = psEnc->sShape.LastGainIndex; @@ -265,15 +300,35 @@ opus_int silk_encode_frame_FIX( break; } + if ( !found_lower && nBits > maxBits ) { + int j; + for ( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { + int sum=0; + for ( j = i*psEnc->sCmn.subfr_length; j < (i+1)*psEnc->sCmn.subfr_length; j++ ) { + sum += abs( psEnc->sCmn.pulses[j] ); + } + if ( iter == 0 || (sum < best_sum[i] && !gain_lock[i]) ) { + best_sum[i] = sum; + best_gain_mult[i] = gainMult_Q8; + } else { + gain_lock[i] = 1; + } + } + } if( ( found_lower & found_upper ) == 0 ) { /* Adjust gain according to high-rate rate/distortion curve */ - opus_int32 gain_factor_Q16; - gain_factor_Q16 = silk_log2lin( silk_LSHIFT( nBits - maxBits, 7 ) / psEnc->sCmn.frame_length + SILK_FIX_CONST( 16, 7 ) ); - gain_factor_Q16 = silk_min_32( gain_factor_Q16, SILK_FIX_CONST( 2, 16 ) ); if( nBits > maxBits ) { - gain_factor_Q16 = silk_max_32( gain_factor_Q16, SILK_FIX_CONST( 1.3, 16 ) ); + if (gainMult_Q8 < 16384) { + gainMult_Q8 *= 2; + } else { + gainMult_Q8 = 32767; + } + } else { + opus_int32 gain_factor_Q16; + gain_factor_Q16 = silk_log2lin( silk_LSHIFT( nBits - maxBits, 7 ) / psEnc->sCmn.frame_length + SILK_FIX_CONST( 16, 7 ) ); + gainMult_Q8 = silk_SMULWB( gain_factor_Q16, gainMult_Q8 ); } - gainMult_Q8 = silk_SMULWB( gain_factor_Q16, gainMult_Q8 ); + } else { /* Adjust gain by interpolating */ gainMult_Q8 = gainMult_lower + silk_DIV32_16( silk_MUL( gainMult_upper - gainMult_lower, maxBits - nBits_lower ), nBits_upper - nBits_lower ); @@ -287,7 +342,13 @@ opus_int silk_encode_frame_FIX( } for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { - sEncCtrl.Gains_Q16[ i ] = silk_LSHIFT_SAT32( silk_SMULWB( sEncCtrl.GainsUnq_Q16[ i ], gainMult_Q8 ), 8 ); + opus_int16 tmp; + if ( gain_lock[i] ) { + tmp = best_gain_mult[i]; + } else { + tmp = gainMult_Q8; + } + sEncCtrl.Gains_Q16[ i ] = silk_LSHIFT_SAT32( silk_SMULWB( sEncCtrl.GainsUnq_Q16[ i ], tmp ), 8 ); } /* Quantize gains */ @@ -331,7 +392,7 @@ opus_int silk_encode_frame_FIX( static OPUS_INLINE void silk_LBRR_encode_FIX( silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk FIX encoder state */ silk_encoder_control_FIX *psEncCtrl, /* I/O Pointer to Silk FIX encoder control struct */ - const opus_int32 xfw_Q3[], /* I Input signal */ + const opus_int16 x16[], /* I Input signal */ opus_int condCoding /* I The type of conditional coding used so far for this frame */ ) { @@ -370,14 +431,14 @@ static OPUS_INLINE void silk_LBRR_encode_FIX( /* Noise shaping quantization */ /*****************************************/ if( psEnc->sCmn.nStatesDelayedDecision > 1 || psEnc->sCmn.warping_Q16 > 0 ) { - silk_NSQ_del_dec( &psEnc->sCmn, &sNSQ_LBRR, psIndices_LBRR, xfw_Q3, + silk_NSQ_del_dec( &psEnc->sCmn, &sNSQ_LBRR, psIndices_LBRR, x16, psEnc->sCmn.pulses_LBRR[ psEnc->sCmn.nFramesEncoded ], psEncCtrl->PredCoef_Q12[ 0 ], psEncCtrl->LTPCoef_Q14, - psEncCtrl->AR2_Q13, psEncCtrl->HarmShapeGain_Q14, psEncCtrl->Tilt_Q14, psEncCtrl->LF_shp_Q14, + psEncCtrl->AR_Q13, psEncCtrl->HarmShapeGain_Q14, psEncCtrl->Tilt_Q14, psEncCtrl->LF_shp_Q14, psEncCtrl->Gains_Q16, psEncCtrl->pitchL, psEncCtrl->Lambda_Q10, psEncCtrl->LTP_scale_Q14, psEnc->sCmn.arch ); } else { - silk_NSQ( &psEnc->sCmn, &sNSQ_LBRR, psIndices_LBRR, xfw_Q3, + silk_NSQ( &psEnc->sCmn, &sNSQ_LBRR, psIndices_LBRR, x16, psEnc->sCmn.pulses_LBRR[ psEnc->sCmn.nFramesEncoded ], psEncCtrl->PredCoef_Q12[ 0 ], psEncCtrl->LTPCoef_Q14, - psEncCtrl->AR2_Q13, psEncCtrl->HarmShapeGain_Q14, psEncCtrl->Tilt_Q14, psEncCtrl->LF_shp_Q14, + psEncCtrl->AR_Q13, psEncCtrl->HarmShapeGain_Q14, psEncCtrl->Tilt_Q14, psEncCtrl->LF_shp_Q14, psEncCtrl->Gains_Q16, psEncCtrl->pitchL, psEncCtrl->Lambda_Q10, psEncCtrl->LTP_scale_Q14, psEnc->sCmn.arch ); } diff --git a/libs/libopus/silk/fixed/find_LPC_FIX.c b/libs/libopus/silk/fixed/find_LPC_FIX.c index e11cdc86e..c762a0f2a 100644 --- a/libs/libopus/silk/fixed/find_LPC_FIX.c +++ b/libs/libopus/silk/fixed/find_LPC_FIX.c @@ -92,7 +92,7 @@ void silk_find_LPC_FIX( silk_interpolate( NLSF0_Q15, psEncC->prev_NLSFq_Q15, NLSF_Q15, k, psEncC->predictLPCOrder ); /* Convert to LPC for residual energy evaluation */ - silk_NLSF2A( a_tmp_Q12, NLSF0_Q15, psEncC->predictLPCOrder ); + silk_NLSF2A( a_tmp_Q12, NLSF0_Q15, psEncC->predictLPCOrder, psEncC->arch ); /* Calculate residual energy with NLSF interpolation */ silk_LPC_analysis_filter( LPC_res, x, a_tmp_Q12, 2 * subfr_length, psEncC->predictLPCOrder, psEncC->arch ); @@ -146,6 +146,6 @@ void silk_find_LPC_FIX( silk_A2NLSF( NLSF_Q15, a_Q16, psEncC->predictLPCOrder ); } - silk_assert( psEncC->indices.NLSFInterpCoef_Q2 == 4 || ( psEncC->useInterpolatedNLSFs && !psEncC->first_frame_after_reset && psEncC->nb_subfr == MAX_NB_SUBFR ) ); + celt_assert( psEncC->indices.NLSFInterpCoef_Q2 == 4 || ( psEncC->useInterpolatedNLSFs && !psEncC->first_frame_after_reset && psEncC->nb_subfr == MAX_NB_SUBFR ) ); RESTORE_STACK; } diff --git a/libs/libopus/silk/fixed/find_LTP_FIX.c b/libs/libopus/silk/fixed/find_LTP_FIX.c index 1314a2813..62d4afb25 100644 --- a/libs/libopus/silk/fixed/find_LTP_FIX.c +++ b/libs/libopus/silk/fixed/find_LTP_FIX.c @@ -32,214 +32,68 @@ POSSIBILITY OF SUCH DAMAGE. #include "main_FIX.h" #include "tuning_parameters.h" -/* Head room for correlations */ -#define LTP_CORRS_HEAD_ROOM 2 - -void silk_fit_LTP( - opus_int32 LTP_coefs_Q16[ LTP_ORDER ], - opus_int16 LTP_coefs_Q14[ LTP_ORDER ] -); - void silk_find_LTP_FIX( - opus_int16 b_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* O LTP coefs */ - opus_int32 WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ - opus_int *LTPredCodGain_Q7, /* O LTP coding gain */ - const opus_int16 r_lpc[], /* I residual signal after LPC signal + state for first 10 ms */ + opus_int32 XXLTP_Q17[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Correlation matrix */ + opus_int32 xXLTP_Q17[ MAX_NB_SUBFR * LTP_ORDER ], /* O Correlation vector */ + const opus_int16 r_ptr[], /* I Residual signal after LPC */ const opus_int lag[ MAX_NB_SUBFR ], /* I LTP lags */ - const opus_int32 Wght_Q15[ MAX_NB_SUBFR ], /* I weights */ - const opus_int subfr_length, /* I subframe length */ - const opus_int nb_subfr, /* I number of subframes */ - const opus_int mem_offset, /* I number of samples in LTP memory */ - opus_int corr_rshifts[ MAX_NB_SUBFR ], /* O right shifts applied to correlations */ + const opus_int subfr_length, /* I Subframe length */ + const opus_int nb_subfr, /* I Number of subframes */ int arch /* I Run-time architecture */ ) { - opus_int i, k, lshift; - const opus_int16 *r_ptr, *lag_ptr; - opus_int16 *b_Q14_ptr; - - opus_int32 regu; - opus_int32 *WLTP_ptr; - opus_int32 b_Q16[ LTP_ORDER ], delta_b_Q14[ LTP_ORDER ], d_Q14[ MAX_NB_SUBFR ], nrg[ MAX_NB_SUBFR ], g_Q26; - opus_int32 w[ MAX_NB_SUBFR ], WLTP_max, max_abs_d_Q14, max_w_bits; - - opus_int32 temp32, denom32; - opus_int extra_shifts; - opus_int rr_shifts, maxRshifts, maxRshifts_wxtra, LZs; - opus_int32 LPC_res_nrg, LPC_LTP_res_nrg, div_Q16; - opus_int32 Rr[ LTP_ORDER ], rr[ MAX_NB_SUBFR ]; - opus_int32 wd, m_Q12; - - b_Q14_ptr = b_Q14; - WLTP_ptr = WLTP; - r_ptr = &r_lpc[ mem_offset ]; + opus_int i, k, extra_shifts; + opus_int xx_shifts, xX_shifts, XX_shifts; + const opus_int16 *lag_ptr; + opus_int32 *XXLTP_Q17_ptr, *xXLTP_Q17_ptr; + opus_int32 xx, nrg, temp; + + xXLTP_Q17_ptr = xXLTP_Q17; + XXLTP_Q17_ptr = XXLTP_Q17; for( k = 0; k < nb_subfr; k++ ) { lag_ptr = r_ptr - ( lag[ k ] + LTP_ORDER / 2 ); - silk_sum_sqr_shift( &rr[ k ], &rr_shifts, r_ptr, subfr_length ); /* rr[ k ] in Q( -rr_shifts ) */ - - /* Assure headroom */ - LZs = silk_CLZ32( rr[k] ); - if( LZs < LTP_CORRS_HEAD_ROOM ) { - rr[ k ] = silk_RSHIFT_ROUND( rr[ k ], LTP_CORRS_HEAD_ROOM - LZs ); - rr_shifts += ( LTP_CORRS_HEAD_ROOM - LZs ); - } - corr_rshifts[ k ] = rr_shifts; - silk_corrMatrix_FIX( lag_ptr, subfr_length, LTP_ORDER, LTP_CORRS_HEAD_ROOM, WLTP_ptr, &corr_rshifts[ k ], arch ); /* WLTP_fix_ptr in Q( -corr_rshifts[ k ] ) */ - - /* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */ - silk_corrVector_FIX( lag_ptr, r_ptr, subfr_length, LTP_ORDER, Rr, corr_rshifts[ k ], arch ); /* Rr_fix_ptr in Q( -corr_rshifts[ k ] ) */ - if( corr_rshifts[ k ] > rr_shifts ) { - rr[ k ] = silk_RSHIFT( rr[ k ], corr_rshifts[ k ] - rr_shifts ); /* rr[ k ] in Q( -corr_rshifts[ k ] ) */ + silk_sum_sqr_shift( &xx, &xx_shifts, r_ptr, subfr_length + LTP_ORDER ); /* xx in Q( -xx_shifts ) */ + silk_corrMatrix_FIX( lag_ptr, subfr_length, LTP_ORDER, XXLTP_Q17_ptr, &nrg, &XX_shifts, arch ); /* XXLTP_Q17_ptr and nrg in Q( -XX_shifts ) */ + extra_shifts = xx_shifts - XX_shifts; + if( extra_shifts > 0 ) { + /* Shift XX */ + xX_shifts = xx_shifts; + for( i = 0; i < LTP_ORDER * LTP_ORDER; i++ ) { + XXLTP_Q17_ptr[ i ] = silk_RSHIFT32( XXLTP_Q17_ptr[ i ], extra_shifts ); /* Q( -xX_shifts ) */ + } + nrg = silk_RSHIFT32( nrg, extra_shifts ); /* Q( -xX_shifts ) */ + } else if( extra_shifts < 0 ) { + /* Shift xx */ + xX_shifts = XX_shifts; + xx = silk_RSHIFT32( xx, -extra_shifts ); /* Q( -xX_shifts ) */ + } else { + xX_shifts = xx_shifts; } - silk_assert( rr[ k ] >= 0 ); - - regu = 1; - regu = silk_SMLAWB( regu, rr[ k ], SILK_FIX_CONST( LTP_DAMPING/3, 16 ) ); - regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) ); - regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) ); - silk_regularize_correlations_FIX( WLTP_ptr, &rr[k], regu, LTP_ORDER ); - - silk_solve_LDL_FIX( WLTP_ptr, LTP_ORDER, Rr, b_Q16 ); /* WLTP_fix_ptr and Rr_fix_ptr both in Q(-corr_rshifts[k]) */ - - /* Limit and store in Q14 */ - silk_fit_LTP( b_Q16, b_Q14_ptr ); - - /* Calculate residual energy */ - nrg[ k ] = silk_residual_energy16_covar_FIX( b_Q14_ptr, WLTP_ptr, Rr, rr[ k ], LTP_ORDER, 14 ); /* nrg_fix in Q( -corr_rshifts[ k ] ) */ - - /* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */ - extra_shifts = silk_min_int( corr_rshifts[ k ], LTP_CORRS_HEAD_ROOM ); - denom32 = silk_LSHIFT_SAT32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 + extra_shifts ) + /* Q( -corr_rshifts[ k ] + extra_shifts ) */ - silk_RSHIFT( silk_SMULWB( (opus_int32)subfr_length, 655 ), corr_rshifts[ k ] - extra_shifts ); /* Q( -corr_rshifts[ k ] + extra_shifts ) */ - denom32 = silk_max( denom32, 1 ); - silk_assert( ((opus_int64)Wght_Q15[ k ] << 16 ) < silk_int32_MAX ); /* Wght always < 0.5 in Q0 */ - temp32 = silk_DIV32( silk_LSHIFT( (opus_int32)Wght_Q15[ k ], 16 ), denom32 ); /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */ - temp32 = silk_RSHIFT( temp32, 31 + corr_rshifts[ k ] - extra_shifts - 26 ); /* Q26 */ + silk_corrVector_FIX( lag_ptr, r_ptr, subfr_length, LTP_ORDER, xXLTP_Q17_ptr, xX_shifts, arch ); /* xXLTP_Q17_ptr in Q( -xX_shifts ) */ - /* Limit temp such that the below scaling never wraps around */ - WLTP_max = 0; + /* At this point all correlations are in Q(-xX_shifts) */ + temp = silk_SMLAWB( 1, nrg, SILK_FIX_CONST( LTP_CORR_INV_MAX, 16 ) ); + temp = silk_max( temp, xx ); +TIC(div) +#if 0 for( i = 0; i < LTP_ORDER * LTP_ORDER; i++ ) { - WLTP_max = silk_max( WLTP_ptr[ i ], WLTP_max ); + XXLTP_Q17_ptr[ i ] = silk_DIV32_varQ( XXLTP_Q17_ptr[ i ], temp, 17 ); } - lshift = silk_CLZ32( WLTP_max ) - 1 - 3; /* keep 3 bits free for vq_nearest_neighbor_fix */ - silk_assert( 26 - 18 + lshift >= 0 ); - if( 26 - 18 + lshift < 31 ) { - temp32 = silk_min_32( temp32, silk_LSHIFT( (opus_int32)1, 26 - 18 + lshift ) ); - } - - silk_scale_vector32_Q26_lshift_18( WLTP_ptr, temp32, LTP_ORDER * LTP_ORDER ); /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */ - - w[ k ] = matrix_ptr( WLTP_ptr, LTP_ORDER/2, LTP_ORDER/2, LTP_ORDER ); /* w in Q( 18 - corr_rshifts[ k ] ) */ - silk_assert( w[k] >= 0 ); - - r_ptr += subfr_length; - b_Q14_ptr += LTP_ORDER; - WLTP_ptr += LTP_ORDER * LTP_ORDER; - } - - maxRshifts = 0; - for( k = 0; k < nb_subfr; k++ ) { - maxRshifts = silk_max_int( corr_rshifts[ k ], maxRshifts ); - } - - /* Compute LTP coding gain */ - if( LTPredCodGain_Q7 != NULL ) { - LPC_LTP_res_nrg = 0; - LPC_res_nrg = 0; - silk_assert( LTP_CORRS_HEAD_ROOM >= 2 ); /* Check that no overflow will happen when adding */ - for( k = 0; k < nb_subfr; k++ ) { - LPC_res_nrg = silk_ADD32( LPC_res_nrg, silk_RSHIFT( silk_ADD32( silk_SMULWB( rr[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /* Q( -maxRshifts ) */ - LPC_LTP_res_nrg = silk_ADD32( LPC_LTP_res_nrg, silk_RSHIFT( silk_ADD32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /* Q( -maxRshifts ) */ - } - LPC_LTP_res_nrg = silk_max( LPC_LTP_res_nrg, 1 ); /* avoid division by zero */ - - div_Q16 = silk_DIV32_varQ( LPC_res_nrg, LPC_LTP_res_nrg, 16 ); - *LTPredCodGain_Q7 = ( opus_int )silk_SMULBB( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ); - - silk_assert( *LTPredCodGain_Q7 == ( opus_int )silk_SAT16( silk_MUL( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ) ) ); - } - - /* smoothing */ - /* d = sum( B, 1 ); */ - b_Q14_ptr = b_Q14; - for( k = 0; k < nb_subfr; k++ ) { - d_Q14[ k ] = 0; for( i = 0; i < LTP_ORDER; i++ ) { - d_Q14[ k ] += b_Q14_ptr[ i ]; - } - b_Q14_ptr += LTP_ORDER; - } - - /* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */ - - /* Find maximum absolute value of d_Q14 and the bits used by w in Q0 */ - max_abs_d_Q14 = 0; - max_w_bits = 0; - for( k = 0; k < nb_subfr; k++ ) { - max_abs_d_Q14 = silk_max_32( max_abs_d_Q14, silk_abs( d_Q14[ k ] ) ); - /* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */ - /* Find bits needed in Q( 18 - maxRshifts ) */ - max_w_bits = silk_max_32( max_w_bits, 32 - silk_CLZ32( w[ k ] ) + corr_rshifts[ k ] - maxRshifts ); - } - - /* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -silk_int16_MIN */ - silk_assert( max_abs_d_Q14 <= ( 5 << 15 ) ); - - /* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */ - extra_shifts = max_w_bits + 32 - silk_CLZ32( max_abs_d_Q14 ) - 14; - - /* Subtract what we got available; bits in output var plus maxRshifts */ - extra_shifts -= ( 32 - 1 - 2 + maxRshifts ); /* Keep sign bit free as well as 2 bits for accumulation */ - extra_shifts = silk_max_int( extra_shifts, 0 ); - - maxRshifts_wxtra = maxRshifts + extra_shifts; - - temp32 = silk_RSHIFT( 262, maxRshifts + extra_shifts ) + 1; /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */ - wd = 0; - for( k = 0; k < nb_subfr; k++ ) { - /* w has at least 2 bits of headroom so no overflow should happen */ - temp32 = silk_ADD32( temp32, silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ) ); /* Q( 18 - maxRshifts_wxtra ) */ - wd = silk_ADD32( wd, silk_LSHIFT( silk_SMULWW( silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ), d_Q14[ k ] ), 2 ) ); /* Q( 18 - maxRshifts_wxtra ) */ - } - m_Q12 = silk_DIV32_varQ( wd, temp32, 12 ); - - b_Q14_ptr = b_Q14; - for( k = 0; k < nb_subfr; k++ ) { - /* w_fix[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */ - if( 2 - corr_rshifts[k] > 0 ) { - temp32 = silk_RSHIFT( w[ k ], 2 - corr_rshifts[ k ] ); - } else { - temp32 = silk_LSHIFT_SAT32( w[ k ], corr_rshifts[ k ] - 2 ); + xXLTP_Q17_ptr[ i ] = silk_DIV32_varQ( xXLTP_Q17_ptr[ i ], temp, 17 ); } - - g_Q26 = silk_MUL( - silk_DIV32( - SILK_FIX_CONST( LTP_SMOOTHING, 26 ), - silk_RSHIFT( SILK_FIX_CONST( LTP_SMOOTHING, 26 ), 10 ) + temp32 ), /* Q10 */ - silk_LSHIFT_SAT32( silk_SUB_SAT32( (opus_int32)m_Q12, silk_RSHIFT( d_Q14[ k ], 2 ) ), 4 ) ); /* Q16 */ - - temp32 = 0; - for( i = 0; i < LTP_ORDER; i++ ) { - delta_b_Q14[ i ] = silk_max_16( b_Q14_ptr[ i ], 1638 ); /* 1638_Q14 = 0.1_Q0 */ - temp32 += delta_b_Q14[ i ]; /* Q14 */ +#else + for( i = 0; i < LTP_ORDER * LTP_ORDER; i++ ) { + XXLTP_Q17_ptr[ i ] = (opus_int32)( silk_LSHIFT64( (opus_int64)XXLTP_Q17_ptr[ i ], 17 ) / temp ); } - temp32 = silk_DIV32( g_Q26, temp32 ); /* Q14 -> Q12 */ for( i = 0; i < LTP_ORDER; i++ ) { - b_Q14_ptr[ i ] = silk_LIMIT_32( (opus_int32)b_Q14_ptr[ i ] + silk_SMULWB( silk_LSHIFT_SAT32( temp32, 4 ), delta_b_Q14[ i ] ), -16000, 28000 ); + xXLTP_Q17_ptr[ i ] = (opus_int32)( silk_LSHIFT64( (opus_int64)xXLTP_Q17_ptr[ i ], 17 ) / temp ); } - b_Q14_ptr += LTP_ORDER; - } -} - -void silk_fit_LTP( - opus_int32 LTP_coefs_Q16[ LTP_ORDER ], - opus_int16 LTP_coefs_Q14[ LTP_ORDER ] -) -{ - opus_int i; - - for( i = 0; i < LTP_ORDER; i++ ) { - LTP_coefs_Q14[ i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( LTP_coefs_Q16[ i ], 2 ) ); +#endif +TOC(div) + r_ptr += subfr_length; + XXLTP_Q17_ptr += LTP_ORDER * LTP_ORDER; + xXLTP_Q17_ptr += LTP_ORDER; } } diff --git a/libs/libopus/silk/fixed/find_pitch_lags_FIX.c b/libs/libopus/silk/fixed/find_pitch_lags_FIX.c index b8440a824..6c3379f2b 100644 --- a/libs/libopus/silk/fixed/find_pitch_lags_FIX.c +++ b/libs/libopus/silk/fixed/find_pitch_lags_FIX.c @@ -44,7 +44,7 @@ void silk_find_pitch_lags_FIX( { opus_int buf_len, i, scale; opus_int32 thrhld_Q13, res_nrg; - const opus_int16 *x_buf, *x_buf_ptr; + const opus_int16 *x_ptr; VARDECL( opus_int16, Wsig ); opus_int16 *Wsig_ptr; opus_int32 auto_corr[ MAX_FIND_PITCH_LPC_ORDER + 1 ]; @@ -59,9 +59,7 @@ void silk_find_pitch_lags_FIX( buf_len = psEnc->sCmn.la_pitch + psEnc->sCmn.frame_length + psEnc->sCmn.ltp_mem_length; /* Safety check */ - silk_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length ); - - x_buf = x - psEnc->sCmn.ltp_mem_length; + celt_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length ); /*************************************/ /* Estimate LPC AR coefficients */ @@ -72,19 +70,19 @@ void silk_find_pitch_lags_FIX( ALLOC( Wsig, psEnc->sCmn.pitch_LPC_win_length, opus_int16 ); /* First LA_LTP samples */ - x_buf_ptr = x_buf + buf_len - psEnc->sCmn.pitch_LPC_win_length; + x_ptr = x + buf_len - psEnc->sCmn.pitch_LPC_win_length; Wsig_ptr = Wsig; - silk_apply_sine_window( Wsig_ptr, x_buf_ptr, 1, psEnc->sCmn.la_pitch ); + silk_apply_sine_window( Wsig_ptr, x_ptr, 1, psEnc->sCmn.la_pitch ); /* Middle un - windowed samples */ Wsig_ptr += psEnc->sCmn.la_pitch; - x_buf_ptr += psEnc->sCmn.la_pitch; - silk_memcpy( Wsig_ptr, x_buf_ptr, ( psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ) ) * sizeof( opus_int16 ) ); + x_ptr += psEnc->sCmn.la_pitch; + silk_memcpy( Wsig_ptr, x_ptr, ( psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ) ) * sizeof( opus_int16 ) ); /* Last LA_LTP samples */ Wsig_ptr += psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ); - x_buf_ptr += psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ); - silk_apply_sine_window( Wsig_ptr, x_buf_ptr, 2, psEnc->sCmn.la_pitch ); + x_ptr += psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ); + silk_apply_sine_window( Wsig_ptr, x_ptr, 2, psEnc->sCmn.la_pitch ); /* Calculate autocorrelation sequence */ silk_autocorr( auto_corr, &scale, Wsig, psEnc->sCmn.pitch_LPC_win_length, psEnc->sCmn.pitchEstimationLPCOrder + 1, arch ); @@ -112,7 +110,7 @@ void silk_find_pitch_lags_FIX( /*****************************************/ /* LPC analysis filtering */ /*****************************************/ - silk_LPC_analysis_filter( res, x_buf, A_Q12, buf_len, psEnc->sCmn.pitchEstimationLPCOrder, psEnc->sCmn.arch ); + silk_LPC_analysis_filter( res, x, A_Q12, buf_len, psEnc->sCmn.pitchEstimationLPCOrder, psEnc->sCmn.arch ); if( psEnc->sCmn.indices.signalType != TYPE_NO_VOICE_ACTIVITY && psEnc->sCmn.first_frame_after_reset == 0 ) { /* Threshold for pitch estimator */ diff --git a/libs/libopus/silk/fixed/find_pred_coefs_FIX.c b/libs/libopus/silk/fixed/find_pred_coefs_FIX.c index d308e9cf5..606d86334 100644 --- a/libs/libopus/silk/fixed/find_pred_coefs_FIX.c +++ b/libs/libopus/silk/fixed/find_pred_coefs_FIX.c @@ -41,13 +41,12 @@ void silk_find_pred_coefs_FIX( ) { opus_int i; - opus_int32 invGains_Q16[ MAX_NB_SUBFR ], local_gains[ MAX_NB_SUBFR ], Wght_Q15[ MAX_NB_SUBFR ]; + opus_int32 invGains_Q16[ MAX_NB_SUBFR ], local_gains[ MAX_NB_SUBFR ]; opus_int16 NLSF_Q15[ MAX_LPC_ORDER ]; const opus_int16 *x_ptr; opus_int16 *x_pre_ptr; VARDECL( opus_int16, LPC_in_pre ); - opus_int32 tmp, min_gain_Q16, minInvGain_Q30; - opus_int LTP_corrs_rshift[ MAX_NB_SUBFR ]; + opus_int32 min_gain_Q16, minInvGain_Q30; SAVE_STACK; /* weighting for weighted least squares */ @@ -61,13 +60,11 @@ void silk_find_pred_coefs_FIX( /* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */ invGains_Q16[ i ] = silk_DIV32_varQ( min_gain_Q16, psEncCtrl->Gains_Q16[ i ], 16 - 2 ); - /* Ensure Wght_Q15 a minimum value 1 */ - invGains_Q16[ i ] = silk_max( invGains_Q16[ i ], 363 ); + /* Limit inverse */ + invGains_Q16[ i ] = silk_max( invGains_Q16[ i ], 100 ); /* Square the inverted gains */ silk_assert( invGains_Q16[ i ] == silk_SAT16( invGains_Q16[ i ] ) ); - tmp = silk_SMULWB( invGains_Q16[ i ], invGains_Q16[ i ] ); - Wght_Q15[ i ] = silk_RSHIFT( tmp, 1 ); /* Invert the inverted and normalized gains */ local_gains[ i ] = silk_DIV32( ( (opus_int32)1 << 16 ), invGains_Q16[ i ] ); @@ -77,24 +74,24 @@ void silk_find_pred_coefs_FIX( psEnc->sCmn.nb_subfr * psEnc->sCmn.predictLPCOrder + psEnc->sCmn.frame_length, opus_int16 ); if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { - VARDECL( opus_int32, WLTP ); + VARDECL( opus_int32, xXLTP_Q17 ); + VARDECL( opus_int32, XXLTP_Q17 ); /**********/ /* VOICED */ /**********/ - silk_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 ); + celt_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 ); - ALLOC( WLTP, psEnc->sCmn.nb_subfr * LTP_ORDER * LTP_ORDER, opus_int32 ); + ALLOC( xXLTP_Q17, psEnc->sCmn.nb_subfr * LTP_ORDER, opus_int32 ); + ALLOC( XXLTP_Q17, psEnc->sCmn.nb_subfr * LTP_ORDER * LTP_ORDER, opus_int32 ); /* LTP analysis */ - silk_find_LTP_FIX( psEncCtrl->LTPCoef_Q14, WLTP, &psEncCtrl->LTPredCodGain_Q7, - res_pitch, psEncCtrl->pitchL, Wght_Q15, psEnc->sCmn.subfr_length, - psEnc->sCmn.nb_subfr, psEnc->sCmn.ltp_mem_length, LTP_corrs_rshift, psEnc->sCmn.arch ); + silk_find_LTP_FIX( XXLTP_Q17, xXLTP_Q17, res_pitch, + psEncCtrl->pitchL, psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr, psEnc->sCmn.arch ); /* Quantize LTP gain parameters */ silk_quant_LTP_gains( psEncCtrl->LTPCoef_Q14, psEnc->sCmn.indices.LTPIndex, &psEnc->sCmn.indices.PERIndex, - &psEnc->sCmn.sum_log_gain_Q7, WLTP, psEnc->sCmn.mu_LTP_Q9, psEnc->sCmn.LTPQuantLowComplexity, psEnc->sCmn.nb_subfr, - psEnc->sCmn.arch); + &psEnc->sCmn.sum_log_gain_Q7, &psEncCtrl->LTPredCodGain_Q7, XXLTP_Q17, xXLTP_Q17, psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr, psEnc->sCmn.arch ); /* Control LTP scaling */ silk_LTP_scale_ctrl_FIX( psEnc, psEncCtrl, condCoding ); diff --git a/libs/libopus/silk/fixed/k2a_FIX.c b/libs/libopus/silk/fixed/k2a_FIX.c index 5fee599bc..549f6eada 100644 --- a/libs/libopus/silk/fixed/k2a_FIX.c +++ b/libs/libopus/silk/fixed/k2a_FIX.c @@ -39,14 +39,15 @@ void silk_k2a( ) { opus_int k, n; - opus_int32 Atmp[ SILK_MAX_ORDER_LPC ]; + opus_int32 rc, tmp1, tmp2; for( k = 0; k < order; k++ ) { - for( n = 0; n < k; n++ ) { - Atmp[ n ] = A_Q24[ n ]; - } - for( n = 0; n < k; n++ ) { - A_Q24[ n ] = silk_SMLAWB( A_Q24[ n ], silk_LSHIFT( Atmp[ k - n - 1 ], 1 ), rc_Q15[ k ] ); + rc = rc_Q15[ k ]; + for( n = 0; n < (k + 1) >> 1; n++ ) { + tmp1 = A_Q24[ n ]; + tmp2 = A_Q24[ k - n - 1 ]; + A_Q24[ n ] = silk_SMLAWB( tmp1, silk_LSHIFT( tmp2, 1 ), rc ); + A_Q24[ k - n - 1 ] = silk_SMLAWB( tmp2, silk_LSHIFT( tmp1, 1 ), rc ); } A_Q24[ k ] = -silk_LSHIFT( (opus_int32)rc_Q15[ k ], 9 ); } diff --git a/libs/libopus/silk/fixed/k2a_Q16_FIX.c b/libs/libopus/silk/fixed/k2a_Q16_FIX.c index 3b0398754..1595aa621 100644 --- a/libs/libopus/silk/fixed/k2a_Q16_FIX.c +++ b/libs/libopus/silk/fixed/k2a_Q16_FIX.c @@ -39,15 +39,16 @@ void silk_k2a_Q16( ) { opus_int k, n; - opus_int32 Atmp[ SILK_MAX_ORDER_LPC ]; + opus_int32 rc, tmp1, tmp2; for( k = 0; k < order; k++ ) { - for( n = 0; n < k; n++ ) { - Atmp[ n ] = A_Q24[ n ]; + rc = rc_Q16[ k ]; + for( n = 0; n < (k + 1) >> 1; n++ ) { + tmp1 = A_Q24[ n ]; + tmp2 = A_Q24[ k - n - 1 ]; + A_Q24[ n ] = silk_SMLAWW( tmp1, tmp2, rc ); + A_Q24[ k - n - 1 ] = silk_SMLAWW( tmp2, tmp1, rc ); } - for( n = 0; n < k; n++ ) { - A_Q24[ n ] = silk_SMLAWW( A_Q24[ n ], Atmp[ k - n - 1 ], rc_Q16[ k ] ); - } - A_Q24[ k ] = -silk_LSHIFT( rc_Q16[ k ], 8 ); + A_Q24[ k ] = -silk_LSHIFT( rc, 8 ); } } diff --git a/libs/libopus/silk/fixed/main_FIX.h b/libs/libopus/silk/fixed/main_FIX.h index 375b5eb32..6d2112e51 100644 --- a/libs/libopus/silk/fixed/main_FIX.h +++ b/libs/libopus/silk/fixed/main_FIX.h @@ -36,6 +36,11 @@ POSSIBILITY OF SUCH DAMAGE. #include "debug.h" #include "entenc.h" +#if ((defined(OPUS_ARM_ASM) && defined(FIXED_POINT)) \ + || defined(OPUS_ARM_MAY_HAVE_NEON_INTR)) +#include "fixed/arm/warped_autocorrelation_FIX_arm.h" +#endif + #ifndef FORCE_CPP_BUILD #ifdef __cplusplus extern "C" @@ -47,6 +52,9 @@ extern "C" #define silk_encode_do_VAD_Fxx silk_encode_do_VAD_FIX #define silk_encode_frame_Fxx silk_encode_frame_FIX +#define QC 10 +#define QS 13 + /*********************/ /* Encoder Functions */ /*********************/ @@ -58,7 +66,8 @@ void silk_HP_variable_cutoff( /* Encoder main function */ void silk_encode_do_VAD_FIX( - silk_encoder_state_FIX *psEnc /* I/O Pointer to Silk FIX encoder state */ + silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk FIX encoder state */ + opus_int activity /* I Decision of Opus voice activity detector */ ); /* Encoder main function */ @@ -81,33 +90,11 @@ opus_int silk_init_encoder( opus_int silk_control_encoder( silk_encoder_state_Fxx *psEnc, /* I/O Pointer to Silk encoder state */ silk_EncControlStruct *encControl, /* I Control structure */ - const opus_int32 TargetRate_bps, /* I Target max bitrate (bps) */ const opus_int allow_bw_switch, /* I Flag to allow switching audio bandwidth */ const opus_int channelNb, /* I Channel number */ const opus_int force_fs_kHz ); -/****************/ -/* Prefiltering */ -/****************/ -void silk_prefilter_FIX( - silk_encoder_state_FIX *psEnc, /* I/O Encoder state */ - const silk_encoder_control_FIX *psEncCtrl, /* I Encoder control */ - opus_int32 xw_Q10[], /* O Weighted signal */ - const opus_int16 x[] /* I Speech signal */ -); - -void silk_warped_LPC_analysis_filter_FIX_c( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -); - - /**************************/ /* Noise shaping analysis */ /**************************/ @@ -121,7 +108,7 @@ void silk_noise_shape_analysis_FIX( ); /* Autocorrelations for a warped frequency axis */ -void silk_warped_autocorrelation_FIX( +void silk_warped_autocorrelation_FIX_c( opus_int32 *corr, /* O Result [order + 1] */ opus_int *scale, /* O Scaling of the correlation vector */ const opus_int16 *input, /* I Input data to correlate */ @@ -130,6 +117,11 @@ void silk_warped_autocorrelation_FIX( const opus_int order /* I Correlation order (even) */ ); +#if !defined(OVERRIDE_silk_warped_autocorrelation_FIX) +#define silk_warped_autocorrelation_FIX(corr, scale, input, warping_Q16, length, order, arch) \ + ((void)(arch), silk_warped_autocorrelation_FIX_c(corr, scale, input, warping_Q16, length, order)) +#endif + /* Calculation of LTP state scaling */ void silk_LTP_scale_ctrl_FIX( silk_encoder_state_FIX *psEnc, /* I/O encoder state */ @@ -168,16 +160,12 @@ void silk_find_LPC_FIX( /* LTP analysis */ void silk_find_LTP_FIX( - opus_int16 b_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* O LTP coefs */ - opus_int32 WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ - opus_int *LTPredCodGain_Q7, /* O LTP coding gain */ - const opus_int16 r_lpc[], /* I residual signal after LPC signal + state for first 10 ms */ + opus_int32 XXLTP_Q17[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Correlation matrix */ + opus_int32 xXLTP_Q17[ MAX_NB_SUBFR * LTP_ORDER ], /* O Correlation vector */ + const opus_int16 r_lpc[], /* I Residual signal after LPC */ const opus_int lag[ MAX_NB_SUBFR ], /* I LTP lags */ - const opus_int32 Wght_Q15[ MAX_NB_SUBFR ], /* I weights */ - const opus_int subfr_length, /* I subframe length */ - const opus_int nb_subfr, /* I number of subframes */ - const opus_int mem_offset, /* I number of samples in LTP memory */ - opus_int corr_rshifts[ MAX_NB_SUBFR ], /* O right shifts applied to correlations */ + const opus_int subfr_length, /* I Subframe length */ + const opus_int nb_subfr, /* I Number of subframes */ int arch /* I Run-time architecture */ ); @@ -231,9 +219,9 @@ void silk_corrMatrix_FIX( const opus_int16 *x, /* I x vector [L + order - 1] used to form data matrix X */ const opus_int L, /* I Length of vectors */ const opus_int order, /* I Max lag for correlation */ - const opus_int head_room, /* I Desired headroom */ opus_int32 *XX, /* O Pointer to X'*X correlation matrix [ order x order ] */ - opus_int *rshifts, /* I/O Right shifts of correlations */ + opus_int32 *nrg, /* O Energy of x vector */ + opus_int *rshifts, /* O Right shifts of correlations */ int arch /* I Run-time architecture */ ); @@ -248,22 +236,6 @@ void silk_corrVector_FIX( int arch /* I Run-time architecture */ ); -/* Add noise to matrix diagonal */ -void silk_regularize_correlations_FIX( - opus_int32 *XX, /* I/O Correlation matrices */ - opus_int32 *xx, /* I/O Correlation values */ - opus_int32 noise, /* I Noise to add */ - opus_int D /* I Dimension of XX */ -); - -/* Solves Ax = b, assuming A is symmetric */ -void silk_solve_LDL_FIX( - opus_int32 *A, /* I Pointer to symetric square matrix A */ - opus_int M, /* I Size of matrix */ - const opus_int32 *b, /* I Pointer to b vector */ - opus_int32 *x_Q16 /* O Pointer to x solution vector */ -); - #ifndef FORCE_CPP_BUILD #ifdef __cplusplus } diff --git a/libs/libopus/silk/fixed/mips/noise_shape_analysis_FIX_mipsr1.h b/libs/libopus/silk/fixed/mips/noise_shape_analysis_FIX_mipsr1.h index c30481e43..3999b5bd0 100644 --- a/libs/libopus/silk/fixed/mips/noise_shape_analysis_FIX_mipsr1.h +++ b/libs/libopus/silk/fixed/mips/noise_shape_analysis_FIX_mipsr1.h @@ -169,7 +169,7 @@ void silk_noise_shape_analysis_FIX( if( psEnc->sCmn.warping_Q16 > 0 ) { /* Calculate warped auto correlation */ - silk_warped_autocorrelation_FIX( auto_corr, &scale, x_windowed, warping_Q16, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder ); + silk_warped_autocorrelation_FIX( auto_corr, &scale, x_windowed, warping_Q16, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder, arch ); } else { /* Calculate regular auto correlation */ silk_autocorr( auto_corr, &scale, x_windowed, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder + 1, arch ); @@ -224,8 +224,8 @@ void silk_noise_shape_analysis_FIX( silk_bwexpander_32( AR1_Q24, psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 ); /* Ratio of prediction gains, in energy domain */ - pre_nrg_Q30 = silk_LPC_inverse_pred_gain_Q24( AR2_Q24, psEnc->sCmn.shapingLPCOrder ); - nrg = silk_LPC_inverse_pred_gain_Q24( AR1_Q24, psEnc->sCmn.shapingLPCOrder ); + pre_nrg_Q30 = silk_LPC_inverse_pred_gain_Q24( AR2_Q24, psEnc->sCmn.shapingLPCOrder, arch ); + nrg = silk_LPC_inverse_pred_gain_Q24( AR1_Q24, psEnc->sCmn.shapingLPCOrder, arch ); /*psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;*/ pre_nrg_Q30 = silk_LSHIFT32( silk_SMULWB( pre_nrg_Q30, SILK_FIX_CONST( 0.7, 15 ) ), 1 ); diff --git a/libs/libopus/silk/fixed/mips/prefilter_FIX_mipsr1.h b/libs/libopus/silk/fixed/mips/prefilter_FIX_mipsr1.h deleted file mode 100644 index 21b256885..000000000 --- a/libs/libopus/silk/fixed/mips/prefilter_FIX_mipsr1.h +++ /dev/null @@ -1,184 +0,0 @@ -/*********************************************************************** -Copyright (c) 2006-2011, Skype Limited. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -- Neither the name of Internet Society, IETF or IETF Trust, nor the -names of specific contributors, may be used to endorse or promote -products derived from this software without specific prior written -permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -***********************************************************************/ -#ifndef __PREFILTER_FIX_MIPSR1_H__ -#define __PREFILTER_FIX_MIPSR1_H__ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "main_FIX.h" -#include "stack_alloc.h" -#include "tuning_parameters.h" - -#define OVERRIDE_silk_warped_LPC_analysis_filter_FIX -void silk_warped_LPC_analysis_filter_FIX( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order, /* I Filter order (even) */ - int arch -) -{ - opus_int n, i; - opus_int32 acc_Q11, acc_Q22, tmp1, tmp2, tmp3, tmp4; - opus_int32 state_cur, state_next; - - (void)arch; - - /* Order must be even */ - /* Length must be even */ - - silk_assert( ( order & 1 ) == 0 ); - silk_assert( ( length & 1 ) == 0 ); - - for( n = 0; n < length; n+=2 ) { - /* Output of lowpass section */ - tmp2 = silk_SMLAWB( state[ 0 ], state[ 1 ], lambda_Q16 ); - state_cur = silk_LSHIFT( input[ n ], 14 ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( state[ 1 ], state[ 2 ] - tmp2, lambda_Q16 ); - state_next = tmp2; - acc_Q11 = silk_RSHIFT( order, 1 ); - acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ 0 ] ); - - - /* Output of lowpass section */ - tmp4 = silk_SMLAWB( state_cur, state_next, lambda_Q16 ); - state[ 0 ] = silk_LSHIFT( input[ n+1 ], 14 ); - /* Output of allpass section */ - tmp3 = silk_SMLAWB( state_next, tmp1 - tmp4, lambda_Q16 ); - state[ 1 ] = tmp4; - acc_Q22 = silk_RSHIFT( order, 1 ); - acc_Q22 = silk_SMLAWB( acc_Q22, tmp4, coef_Q13[ 0 ] ); - - /* Loop over allpass sections */ - for( i = 2; i < order; i += 2 ) { - /* Output of allpass section */ - tmp2 = silk_SMLAWB( state[ i ], state[ i + 1 ] - tmp1, lambda_Q16 ); - state_cur = tmp1; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ i - 1 ] ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q16 ); - state_next = tmp2; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] ); - - - /* Output of allpass section */ - tmp4 = silk_SMLAWB( state_cur, state_next - tmp3, lambda_Q16 ); - state[ i ] = tmp3; - acc_Q22 = silk_SMLAWB( acc_Q22, tmp3, coef_Q13[ i - 1 ] ); - /* Output of allpass section */ - tmp3 = silk_SMLAWB( state_next, tmp1 - tmp4, lambda_Q16 ); - state[ i + 1 ] = tmp4; - acc_Q22 = silk_SMLAWB( acc_Q22, tmp4, coef_Q13[ i ] ); - } - acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] ); - res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROUND( acc_Q11, 9 ); - - state[ order ] = tmp3; - acc_Q22 = silk_SMLAWB( acc_Q22, tmp3, coef_Q13[ order - 1 ] ); - res_Q2[ n+1 ] = silk_LSHIFT( (opus_int32)input[ n+1 ], 2 ) - silk_RSHIFT_ROUND( acc_Q22, 9 ); - } -} - - - -/* Prefilter for finding Quantizer input signal */ -#define OVERRIDE_silk_prefilt_FIX -static inline void silk_prefilt_FIX( - silk_prefilter_state_FIX *P, /* I/O state */ - opus_int32 st_res_Q12[], /* I short term residual signal */ - opus_int32 xw_Q3[], /* O prefiltered signal */ - opus_int32 HarmShapeFIRPacked_Q12, /* I Harmonic shaping coeficients */ - opus_int Tilt_Q14, /* I Tilt shaping coeficient */ - opus_int32 LF_shp_Q14, /* I Low-frequancy shaping coeficients */ - opus_int lag, /* I Lag for harmonic shaping */ - opus_int length /* I Length of signals */ -) -{ - opus_int i, idx, LTP_shp_buf_idx; - opus_int32 n_LTP_Q12, n_Tilt_Q10, n_LF_Q10; - opus_int32 sLF_MA_shp_Q12, sLF_AR_shp_Q12; - opus_int16 *LTP_shp_buf; - - /* To speed up use temp variables instead of using the struct */ - LTP_shp_buf = P->sLTP_shp; - LTP_shp_buf_idx = P->sLTP_shp_buf_idx; - sLF_AR_shp_Q12 = P->sLF_AR_shp_Q12; - sLF_MA_shp_Q12 = P->sLF_MA_shp_Q12; - - if( lag > 0 ) { - for( i = 0; i < length; i++ ) { - /* unrolled loop */ - silk_assert( HARM_SHAPE_FIR_TAPS == 3 ); - idx = lag + LTP_shp_buf_idx; - n_LTP_Q12 = silk_SMULBB( LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 ); - n_LTP_Q12 = silk_SMLABT( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 ) & LTP_MASK ], HarmShapeFIRPacked_Q12 ); - n_LTP_Q12 = silk_SMLABB( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 + 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 ); - - n_Tilt_Q10 = silk_SMULWB( sLF_AR_shp_Q12, Tilt_Q14 ); - n_LF_Q10 = silk_SMLAWB( silk_SMULWT( sLF_AR_shp_Q12, LF_shp_Q14 ), sLF_MA_shp_Q12, LF_shp_Q14 ); - - sLF_AR_shp_Q12 = silk_SUB32( st_res_Q12[ i ], silk_LSHIFT( n_Tilt_Q10, 2 ) ); - sLF_MA_shp_Q12 = silk_SUB32( sLF_AR_shp_Q12, silk_LSHIFT( n_LF_Q10, 2 ) ); - - LTP_shp_buf_idx = ( LTP_shp_buf_idx - 1 ) & LTP_MASK; - LTP_shp_buf[ LTP_shp_buf_idx ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sLF_MA_shp_Q12, 12 ) ); - - xw_Q3[i] = silk_RSHIFT_ROUND( silk_SUB32( sLF_MA_shp_Q12, n_LTP_Q12 ), 9 ); - } - } - else - { - for( i = 0; i < length; i++ ) { - - n_LTP_Q12 = 0; - - n_Tilt_Q10 = silk_SMULWB( sLF_AR_shp_Q12, Tilt_Q14 ); - n_LF_Q10 = silk_SMLAWB( silk_SMULWT( sLF_AR_shp_Q12, LF_shp_Q14 ), sLF_MA_shp_Q12, LF_shp_Q14 ); - - sLF_AR_shp_Q12 = silk_SUB32( st_res_Q12[ i ], silk_LSHIFT( n_Tilt_Q10, 2 ) ); - sLF_MA_shp_Q12 = silk_SUB32( sLF_AR_shp_Q12, silk_LSHIFT( n_LF_Q10, 2 ) ); - - LTP_shp_buf_idx = ( LTP_shp_buf_idx - 1 ) & LTP_MASK; - LTP_shp_buf[ LTP_shp_buf_idx ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sLF_MA_shp_Q12, 12 ) ); - - xw_Q3[i] = silk_RSHIFT_ROUND( sLF_MA_shp_Q12, 9 ); - } - } - - /* Copy temp variable back to state */ - P->sLF_AR_shp_Q12 = sLF_AR_shp_Q12; - P->sLF_MA_shp_Q12 = sLF_MA_shp_Q12; - P->sLTP_shp_buf_idx = LTP_shp_buf_idx; -} - -#endif /* __PREFILTER_FIX_MIPSR1_H__ */ diff --git a/libs/libopus/silk/fixed/mips/warped_autocorrelation_FIX_mipsr1.h b/libs/libopus/silk/fixed/mips/warped_autocorrelation_FIX_mipsr1.h index e803ef0fc..66eb2ed26 100644 --- a/libs/libopus/silk/fixed/mips/warped_autocorrelation_FIX_mipsr1.h +++ b/libs/libopus/silk/fixed/mips/warped_autocorrelation_FIX_mipsr1.h @@ -41,8 +41,8 @@ POSSIBILITY OF SUCH DAMAGE. #define QS 14 /* Autocorrelations for a warped frequency axis */ -#define OVERRIDE_silk_warped_autocorrelation_FIX -void silk_warped_autocorrelation_FIX( +#define OVERRIDE_silk_warped_autocorrelation_FIX_c +void silk_warped_autocorrelation_FIX_c( opus_int32 *corr, /* O Result [order + 1] */ opus_int *scale, /* O Scaling of the correlation vector */ const opus_int16 *input, /* I Input data to correlate */ diff --git a/libs/libopus/silk/fixed/noise_shape_analysis_FIX.c b/libs/libopus/silk/fixed/noise_shape_analysis_FIX.c index 22a89f75a..85fea0bf0 100644 --- a/libs/libopus/silk/fixed/noise_shape_analysis_FIX.c +++ b/libs/libopus/silk/fixed/noise_shape_analysis_FIX.c @@ -57,88 +57,79 @@ static OPUS_INLINE opus_int32 warped_gain( /* gain in Q16*/ /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum */ /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */ static OPUS_INLINE void limit_warped_coefs( - opus_int32 *coefs_syn_Q24, - opus_int32 *coefs_ana_Q24, + opus_int32 *coefs_Q24, opus_int lambda_Q16, opus_int32 limit_Q24, opus_int order ) { opus_int i, iter, ind = 0; - opus_int32 tmp, maxabs_Q24, chirp_Q16, gain_syn_Q16, gain_ana_Q16; + opus_int32 tmp, maxabs_Q24, chirp_Q16, gain_Q16; opus_int32 nom_Q16, den_Q24; + opus_int32 limit_Q20, maxabs_Q20; /* Convert to monic coefficients */ lambda_Q16 = -lambda_Q16; for( i = order - 1; i > 0; i-- ) { - coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); - coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); + coefs_Q24[ i - 1 ] = silk_SMLAWB( coefs_Q24[ i - 1 ], coefs_Q24[ i ], lambda_Q16 ); } lambda_Q16 = -lambda_Q16; - nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -(opus_int32)lambda_Q16, lambda_Q16 ); - den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 ); - gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); - den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 ); - gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); + nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -(opus_int32)lambda_Q16, lambda_Q16 ); + den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_Q24[ 0 ], lambda_Q16 ); + gain_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); for( i = 0; i < order; i++ ) { - coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); - coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); + coefs_Q24[ i ] = silk_SMULWW( gain_Q16, coefs_Q24[ i ] ); } - + limit_Q20 = silk_RSHIFT(limit_Q24, 4); for( iter = 0; iter < 10; iter++ ) { /* Find maximum absolute value */ maxabs_Q24 = -1; for( i = 0; i < order; i++ ) { - tmp = silk_max( silk_abs_int32( coefs_syn_Q24[ i ] ), silk_abs_int32( coefs_ana_Q24[ i ] ) ); + tmp = silk_abs_int32( coefs_Q24[ i ] ); if( tmp > maxabs_Q24 ) { maxabs_Q24 = tmp; ind = i; } } - if( maxabs_Q24 <= limit_Q24 ) { + /* Use Q20 to avoid any overflow when multiplying by (ind + 1) later. */ + maxabs_Q20 = silk_RSHIFT(maxabs_Q24, 4); + if( maxabs_Q20 <= limit_Q20 ) { /* Coefficients are within range - done */ return; } /* Convert back to true warped coefficients */ for( i = 1; i < order; i++ ) { - coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); - coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); + coefs_Q24[ i - 1 ] = silk_SMLAWB( coefs_Q24[ i - 1 ], coefs_Q24[ i ], lambda_Q16 ); } - gain_syn_Q16 = silk_INVERSE32_varQ( gain_syn_Q16, 32 ); - gain_ana_Q16 = silk_INVERSE32_varQ( gain_ana_Q16, 32 ); + gain_Q16 = silk_INVERSE32_varQ( gain_Q16, 32 ); for( i = 0; i < order; i++ ) { - coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); - coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); + coefs_Q24[ i ] = silk_SMULWW( gain_Q16, coefs_Q24[ i ] ); } /* Apply bandwidth expansion */ chirp_Q16 = SILK_FIX_CONST( 0.99, 16 ) - silk_DIV32_varQ( - silk_SMULWB( maxabs_Q24 - limit_Q24, silk_SMLABB( SILK_FIX_CONST( 0.8, 10 ), SILK_FIX_CONST( 0.1, 10 ), iter ) ), - silk_MUL( maxabs_Q24, ind + 1 ), 22 ); - silk_bwexpander_32( coefs_syn_Q24, order, chirp_Q16 ); - silk_bwexpander_32( coefs_ana_Q24, order, chirp_Q16 ); + silk_SMULWB( maxabs_Q20 - limit_Q20, silk_SMLABB( SILK_FIX_CONST( 0.8, 10 ), SILK_FIX_CONST( 0.1, 10 ), iter ) ), + silk_MUL( maxabs_Q20, ind + 1 ), 22 ); + silk_bwexpander_32( coefs_Q24, order, chirp_Q16 ); /* Convert to monic warped coefficients */ lambda_Q16 = -lambda_Q16; for( i = order - 1; i > 0; i-- ) { - coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); - coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); + coefs_Q24[ i - 1 ] = silk_SMLAWB( coefs_Q24[ i - 1 ], coefs_Q24[ i ], lambda_Q16 ); } lambda_Q16 = -lambda_Q16; nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -(opus_int32)lambda_Q16, lambda_Q16 ); - den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 ); - gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); - den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 ); - gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); + den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_Q24[ 0 ], lambda_Q16 ); + gain_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); for( i = 0; i < order; i++ ) { - coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); - coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); + coefs_Q24[ i ] = silk_SMULWW( gain_Q16, coefs_Q24[ i ] ); } } silk_assert( 0 ); } -#if defined(MIPSr1_ASM) +/* Disable MIPS version until it's updated. */ +#if 0 && defined(MIPSr1_ASM) #include "mips/noise_shape_analysis_FIX_mipsr1.h" #endif @@ -155,14 +146,13 @@ void silk_noise_shape_analysis_FIX( ) { silk_shape_state_FIX *psShapeSt = &psEnc->sShape; - opus_int k, i, nSamples, Qnrg, b_Q14, warping_Q16, scale = 0; - opus_int32 SNR_adj_dB_Q7, HarmBoost_Q16, HarmShapeGain_Q16, Tilt_Q16, tmp32; - opus_int32 nrg, pre_nrg_Q30, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7; - opus_int32 delta_Q16, BWExp1_Q16, BWExp2_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8; + opus_int k, i, nSamples, nSegs, Qnrg, b_Q14, warping_Q16, scale = 0; + opus_int32 SNR_adj_dB_Q7, HarmShapeGain_Q16, Tilt_Q16, tmp32; + opus_int32 nrg, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7; + opus_int32 BWExp_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8; opus_int32 auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ]; opus_int32 refl_coef_Q16[ MAX_SHAPE_LPC_ORDER ]; - opus_int32 AR1_Q24[ MAX_SHAPE_LPC_ORDER ]; - opus_int32 AR2_Q24[ MAX_SHAPE_LPC_ORDER ]; + opus_int32 AR_Q24[ MAX_SHAPE_LPC_ORDER ]; VARDECL( opus_int16, x_windowed ); const opus_int16 *x_ptr, *pitch_res_ptr; SAVE_STACK; @@ -209,14 +199,14 @@ void silk_noise_shape_analysis_FIX( if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Initially set to 0; may be overruled in process_gains(..) */ psEnc->sCmn.indices.quantOffsetType = 0; - psEncCtrl->sparseness_Q8 = 0; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = silk_LSHIFT( psEnc->sCmn.fs_kHz, 1 ); energy_variation_Q7 = 0; log_energy_prev_Q7 = 0; pitch_res_ptr = pitch_res; - for( k = 0; k < silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) { + nSegs = silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; + for( k = 0; k < nSegs; k++ ) { silk_sum_sqr_shift( &nrg, &scale, pitch_res_ptr, nSamples ); nrg += silk_RSHIFT( nSamples, scale ); /* Q(-scale)*/ @@ -228,18 +218,12 @@ void silk_noise_shape_analysis_FIX( pitch_res_ptr += nSamples; } - psEncCtrl->sparseness_Q8 = silk_RSHIFT( silk_sigm_Q15( silk_SMULWB( energy_variation_Q7 - - SILK_FIX_CONST( 5.0, 7 ), SILK_FIX_CONST( 0.1, 16 ) ) ), 7 ); - /* Set quantization offset depending on sparseness measure */ - if( psEncCtrl->sparseness_Q8 > SILK_FIX_CONST( SPARSENESS_THRESHOLD_QNT_OFFSET, 8 ) ) { + if( energy_variation_Q7 > SILK_FIX_CONST( ENERGY_VARIATION_THRESHOLD_QNT_OFFSET, 7 ) * (nSegs-1) ) { psEnc->sCmn.indices.quantOffsetType = 0; } else { psEnc->sCmn.indices.quantOffsetType = 1; } - - /* Increase coding SNR for sparse signals */ - SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( SPARSE_SNR_INCR_dB, 15 ), psEncCtrl->sparseness_Q8 - SILK_FIX_CONST( 0.5, 8 ) ); } /*******************************/ @@ -247,14 +231,8 @@ void silk_noise_shape_analysis_FIX( /*******************************/ /* More BWE for signals with high prediction gain */ strength_Q16 = silk_SMULWB( psEncCtrl->predGain_Q16, SILK_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) ); - BWExp1_Q16 = BWExp2_Q16 = silk_DIV32_varQ( SILK_FIX_CONST( BANDWIDTH_EXPANSION, 16 ), + BWExp_Q16 = silk_DIV32_varQ( SILK_FIX_CONST( BANDWIDTH_EXPANSION, 16 ), silk_SMLAWW( SILK_FIX_CONST( 1.0, 16 ), strength_Q16, strength_Q16 ), 16 ); - delta_Q16 = silk_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - silk_SMULBB( 3, psEncCtrl->coding_quality_Q14 ), - SILK_FIX_CONST( LOW_RATE_BANDWIDTH_EXPANSION_DELTA, 16 ) ); - BWExp1_Q16 = silk_SUB32( BWExp1_Q16, delta_Q16 ); - BWExp2_Q16 = silk_ADD32( BWExp2_Q16, delta_Q16 ); - /* BWExp1 will be applied after BWExp2, so make it relative */ - BWExp1_Q16 = silk_DIV32_16( silk_LSHIFT( BWExp1_Q16, 14 ), silk_RSHIFT( BWExp2_Q16, 2 ) ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ @@ -284,7 +262,7 @@ void silk_noise_shape_analysis_FIX( if( psEnc->sCmn.warping_Q16 > 0 ) { /* Calculate warped auto correlation */ - silk_warped_autocorrelation_FIX( auto_corr, &scale, x_windowed, warping_Q16, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder ); + silk_warped_autocorrelation_FIX( auto_corr, &scale, x_windowed, warping_Q16, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder, arch ); } else { /* Calculate regular auto correlation */ silk_autocorr( auto_corr, &scale, x_windowed, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder + 1, arch ); @@ -299,7 +277,7 @@ void silk_noise_shape_analysis_FIX( silk_assert( nrg >= 0 ); /* Convert reflection coefficients to prediction coefficients */ - silk_k2a_Q16( AR2_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder ); + silk_k2a_Q16( AR_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder ); Qnrg = -scale; /* range: -12...30*/ silk_assert( Qnrg >= -12 ); @@ -318,40 +296,34 @@ void silk_noise_shape_analysis_FIX( if( psEnc->sCmn.warping_Q16 > 0 ) { /* Adjust gain for warping */ - gain_mult_Q16 = warped_gain( AR2_Q24, warping_Q16, psEnc->sCmn.shapingLPCOrder ); - silk_assert( psEncCtrl->Gains_Q16[ k ] >= 0 ); - if ( silk_SMULWW( silk_RSHIFT_ROUND( psEncCtrl->Gains_Q16[ k ], 1 ), gain_mult_Q16 ) >= ( silk_int32_MAX >> 1 ) ) { - psEncCtrl->Gains_Q16[ k ] = silk_int32_MAX; + gain_mult_Q16 = warped_gain( AR_Q24, warping_Q16, psEnc->sCmn.shapingLPCOrder ); + silk_assert( psEncCtrl->Gains_Q16[ k ] > 0 ); + if( psEncCtrl->Gains_Q16[ k ] < SILK_FIX_CONST( 0.25, 16 ) ) { + psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 ); } else { - psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 ); + psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( silk_RSHIFT_ROUND( psEncCtrl->Gains_Q16[ k ], 1 ), gain_mult_Q16 ); + if ( psEncCtrl->Gains_Q16[ k ] >= ( silk_int32_MAX >> 1 ) ) { + psEncCtrl->Gains_Q16[ k ] = silk_int32_MAX; + } else { + psEncCtrl->Gains_Q16[ k ] = silk_LSHIFT32( psEncCtrl->Gains_Q16[ k ], 1 ); + } } + silk_assert( psEncCtrl->Gains_Q16[ k ] > 0 ); } - /* Bandwidth expansion for synthesis filter shaping */ - silk_bwexpander_32( AR2_Q24, psEnc->sCmn.shapingLPCOrder, BWExp2_Q16 ); - - /* Compute noise shaping filter coefficients */ - silk_memcpy( AR1_Q24, AR2_Q24, psEnc->sCmn.shapingLPCOrder * sizeof( opus_int32 ) ); - - /* Bandwidth expansion for analysis filter shaping */ - silk_assert( BWExp1_Q16 <= SILK_FIX_CONST( 1.0, 16 ) ); - silk_bwexpander_32( AR1_Q24, psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 ); - - /* Ratio of prediction gains, in energy domain */ - pre_nrg_Q30 = silk_LPC_inverse_pred_gain_Q24( AR2_Q24, psEnc->sCmn.shapingLPCOrder ); - nrg = silk_LPC_inverse_pred_gain_Q24( AR1_Q24, psEnc->sCmn.shapingLPCOrder ); - - /*psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;*/ - pre_nrg_Q30 = silk_LSHIFT32( silk_SMULWB( pre_nrg_Q30, SILK_FIX_CONST( 0.7, 15 ) ), 1 ); - psEncCtrl->GainsPre_Q14[ k ] = ( opus_int ) SILK_FIX_CONST( 0.3, 14 ) + silk_DIV32_varQ( pre_nrg_Q30, nrg, 14 ); + /* Bandwidth expansion */ + silk_bwexpander_32( AR_Q24, psEnc->sCmn.shapingLPCOrder, BWExp_Q16 ); - /* Convert to monic warped prediction coefficients and limit absolute values */ - limit_warped_coefs( AR2_Q24, AR1_Q24, warping_Q16, SILK_FIX_CONST( 3.999, 24 ), psEnc->sCmn.shapingLPCOrder ); + if( psEnc->sCmn.warping_Q16 > 0 ) { + /* Convert to monic warped prediction coefficients and limit absolute values */ + limit_warped_coefs( AR_Q24, warping_Q16, SILK_FIX_CONST( 3.999, 24 ), psEnc->sCmn.shapingLPCOrder ); - /* Convert from Q24 to Q13 and store in int16 */ - for( i = 0; i < psEnc->sCmn.shapingLPCOrder; i++ ) { - psEncCtrl->AR1_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR1_Q24[ i ], 11 ) ); - psEncCtrl->AR2_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR2_Q24[ i ], 11 ) ); + /* Convert from Q24 to Q13 and store in int16 */ + for( i = 0; i < psEnc->sCmn.shapingLPCOrder; i++ ) { + psEncCtrl->AR_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR_Q24[ i ], 11 ) ); + } + } else { + silk_LPC_fit( &psEncCtrl->AR_Q13[ k * MAX_SHAPE_LPC_ORDER ], AR_Q24, 13, 24, psEnc->sCmn.shapingLPCOrder ); } } @@ -368,11 +340,6 @@ void silk_noise_shape_analysis_FIX( psEncCtrl->Gains_Q16[ k ] = silk_ADD_POS_SAT32( psEncCtrl->Gains_Q16[ k ], gain_add_Q16 ); } - gain_mult_Q16 = SILK_FIX_CONST( 1.0, 16 ) + silk_RSHIFT_ROUND( silk_MLA( SILK_FIX_CONST( INPUT_TILT, 26 ), - psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( HIGH_RATE_INPUT_TILT, 12 ) ), 10 ); - for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { - psEncCtrl->GainsPre_Q14[ k ] = silk_SMULWB( gain_mult_Q16, psEncCtrl->GainsPre_Q14[ k ] ); - } /************************************************/ /* Control low-frequency shaping and noise tilt */ @@ -410,14 +377,6 @@ void silk_noise_shape_analysis_FIX( /****************************/ /* HARMONIC SHAPING CONTROL */ /****************************/ - /* Control boosting of harmonic frequencies */ - HarmBoost_Q16 = silk_SMULWB( silk_SMULWB( SILK_FIX_CONST( 1.0, 17 ) - silk_LSHIFT( psEncCtrl->coding_quality_Q14, 3 ), - psEnc->LTPCorr_Q15 ), SILK_FIX_CONST( LOW_RATE_HARMONIC_BOOST, 16 ) ); - - /* More harmonic boost for noisy input signals */ - HarmBoost_Q16 = silk_SMLAWB( HarmBoost_Q16, - SILK_FIX_CONST( 1.0, 16 ) - silk_LSHIFT( psEncCtrl->input_quality_Q14, 2 ), SILK_FIX_CONST( LOW_INPUT_QUALITY_HARMONIC_BOOST, 16 ) ); - if( USE_HARM_SHAPING && psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* More harmonic noise shaping for high bitrates or noisy input */ HarmShapeGain_Q16 = silk_SMLAWB( SILK_FIX_CONST( HARMONIC_SHAPING, 16 ), @@ -435,14 +394,11 @@ void silk_noise_shape_analysis_FIX( /* Smooth over subframes */ /*************************/ for( k = 0; k < MAX_NB_SUBFR; k++ ) { - psShapeSt->HarmBoost_smth_Q16 = - silk_SMLAWB( psShapeSt->HarmBoost_smth_Q16, HarmBoost_Q16 - psShapeSt->HarmBoost_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) ); psShapeSt->HarmShapeGain_smth_Q16 = silk_SMLAWB( psShapeSt->HarmShapeGain_smth_Q16, HarmShapeGain_Q16 - psShapeSt->HarmShapeGain_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) ); psShapeSt->Tilt_smth_Q16 = silk_SMLAWB( psShapeSt->Tilt_smth_Q16, Tilt_Q16 - psShapeSt->Tilt_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) ); - psEncCtrl->HarmBoost_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->HarmBoost_smth_Q16, 2 ); psEncCtrl->HarmShapeGain_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->HarmShapeGain_smth_Q16, 2 ); psEncCtrl->Tilt_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->Tilt_smth_Q16, 2 ); } diff --git a/libs/libopus/silk/fixed/pitch_analysis_core_FIX.c b/libs/libopus/silk/fixed/pitch_analysis_core_FIX.c index 01bb9fc0a..14729046d 100644 --- a/libs/libopus/silk/fixed/pitch_analysis_core_FIX.c +++ b/libs/libopus/silk/fixed/pitch_analysis_core_FIX.c @@ -80,7 +80,7 @@ static void silk_P_Ana_calc_energy_st3( /* FIXED POINT CORE PITCH ANALYSIS FUNCTION */ /*************************************************************/ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 voiced, 1 unvoiced */ - const opus_int16 *frame, /* I Signal of length PE_FRAME_LENGTH_MS*Fs_kHz */ + const opus_int16 *frame_unscaled, /* I Signal of length PE_FRAME_LENGTH_MS*Fs_kHz */ opus_int *pitch_out, /* O 4 pitch lag values */ opus_int16 *lagIndex, /* O Lag Index */ opus_int8 *contourIndex, /* O Pitch contour Index */ @@ -94,16 +94,17 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 int arch /* I Run-time architecture */ ) { - VARDECL( opus_int16, frame_8kHz ); + VARDECL( opus_int16, frame_8kHz_buf ); VARDECL( opus_int16, frame_4kHz ); + VARDECL( opus_int16, frame_scaled ); opus_int32 filt_state[ 6 ]; - const opus_int16 *input_frame_ptr; + const opus_int16 *frame, *frame_8kHz; opus_int i, k, d, j; VARDECL( opus_int16, C ); VARDECL( opus_int32, xcorr32 ); const opus_int16 *target_ptr, *basis_ptr; - opus_int32 cross_corr, normalizer, energy, shift, energy_basis, energy_target; - opus_int d_srch[ PE_D_SRCH_LENGTH ], Cmax, length_d_srch, length_d_comp; + opus_int32 cross_corr, normalizer, energy, energy_basis, energy_target; + opus_int d_srch[ PE_D_SRCH_LENGTH ], Cmax, length_d_srch, length_d_comp, shift; VARDECL( opus_int16, d_comp ); opus_int32 sum, threshold, lag_counter; opus_int CBimax, CBimax_new, CBimax_old, lag, start_lag, end_lag, lag_new; @@ -119,12 +120,13 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 opus_int32 delta_lag_log2_sqr_Q7, lag_log2_Q7, prevLag_log2_Q7, prev_lag_bias_Q13; const opus_int8 *Lag_CB_ptr; SAVE_STACK; + /* Check for valid sampling frequency */ - silk_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 ); + celt_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 ); /* Check for valid complexity setting */ - silk_assert( complexity >= SILK_PE_MIN_COMPLEX ); - silk_assert( complexity <= SILK_PE_MAX_COMPLEX ); + celt_assert( complexity >= SILK_PE_MIN_COMPLEX ); + celt_assert( complexity <= SILK_PE_MAX_COMPLEX ); silk_assert( search_thres1_Q16 >= 0 && search_thres1_Q16 <= (1<<16) ); silk_assert( search_thres2_Q13 >= 0 && search_thres2_Q13 <= (1<<13) ); @@ -137,17 +139,33 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 min_lag = PE_MIN_LAG_MS * Fs_kHz; max_lag = PE_MAX_LAG_MS * Fs_kHz - 1; + /* Downscale input if necessary */ + silk_sum_sqr_shift( &energy, &shift, frame_unscaled, frame_length ); + shift += 3 - silk_CLZ32( energy ); /* at least two bits headroom */ + ALLOC( frame_scaled, frame_length, opus_int16 ); + if( shift > 0 ) { + shift = silk_RSHIFT( shift + 1, 1 ); + for( i = 0; i < frame_length; i++ ) { + frame_scaled[ i ] = silk_RSHIFT( frame_unscaled[ i ], shift ); + } + frame = frame_scaled; + } else { + frame = frame_unscaled; + } + + ALLOC( frame_8kHz_buf, ( Fs_kHz == 8 ) ? 1 : frame_length_8kHz, opus_int16 ); /* Resample from input sampled at Fs_kHz to 8 kHz */ - ALLOC( frame_8kHz, frame_length_8kHz, opus_int16 ); if( Fs_kHz == 16 ) { silk_memset( filt_state, 0, 2 * sizeof( opus_int32 ) ); - silk_resampler_down2( filt_state, frame_8kHz, frame, frame_length ); + silk_resampler_down2( filt_state, frame_8kHz_buf, frame, frame_length ); + frame_8kHz = frame_8kHz_buf; } else if( Fs_kHz == 12 ) { silk_memset( filt_state, 0, 6 * sizeof( opus_int32 ) ); - silk_resampler_down2_3( filt_state, frame_8kHz, frame, frame_length ); + silk_resampler_down2_3( filt_state, frame_8kHz_buf, frame, frame_length ); + frame_8kHz = frame_8kHz_buf; } else { - silk_assert( Fs_kHz == 8 ); - silk_memcpy( frame_8kHz, frame, frame_length_8kHz * sizeof(opus_int16) ); + celt_assert( Fs_kHz == 8 ); + frame_8kHz = frame; } /* Decimate again to 4 kHz */ @@ -160,19 +178,6 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 frame_4kHz[ i ] = silk_ADD_SAT16( frame_4kHz[ i ], frame_4kHz[ i - 1 ] ); } - /******************************************************************************* - ** Scale 4 kHz signal down to prevent correlations measures from overflowing - ** find scaling as max scaling for each 8kHz(?) subframe - *******************************************************************************/ - - /* Inner product is calculated with different lengths, so scale for the worst case */ - silk_sum_sqr_shift( &energy, &shift, frame_4kHz, frame_length_4kHz ); - if( shift > 0 ) { - shift = silk_RSHIFT( shift, 1 ); - for( i = 0; i < frame_length_4kHz; i++ ) { - frame_4kHz[ i ] = silk_RSHIFT( frame_4kHz[ i ], shift ); - } - } /****************************************************************************** * FIRST STAGE, operating in 4 khz @@ -183,14 +188,14 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 target_ptr = &frame_4kHz[ silk_LSHIFT( SF_LENGTH_4KHZ, 2 ) ]; for( k = 0; k < nb_subfr >> 1; k++ ) { /* Check that we are within range of the array */ - silk_assert( target_ptr >= frame_4kHz ); - silk_assert( target_ptr + SF_LENGTH_8KHZ <= frame_4kHz + frame_length_4kHz ); + celt_assert( target_ptr >= frame_4kHz ); + celt_assert( target_ptr + SF_LENGTH_8KHZ <= frame_4kHz + frame_length_4kHz ); basis_ptr = target_ptr - MIN_LAG_4KHZ; /* Check that we are within range of the array */ - silk_assert( basis_ptr >= frame_4kHz ); - silk_assert( basis_ptr + SF_LENGTH_8KHZ <= frame_4kHz + frame_length_4kHz ); + celt_assert( basis_ptr >= frame_4kHz ); + celt_assert( basis_ptr + SF_LENGTH_8KHZ <= frame_4kHz + frame_length_4kHz ); celt_pitch_xcorr( target_ptr, target_ptr - MAX_LAG_4KHZ, xcorr32, SF_LENGTH_8KHZ, MAX_LAG_4KHZ - MIN_LAG_4KHZ + 1, arch ); @@ -244,7 +249,7 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 /* Sort */ length_d_srch = silk_ADD_LSHIFT32( 4, complexity, 1 ); - silk_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH ); + celt_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH ); silk_insertion_sort_decreasing_int16( C, d_srch, CSTRIDE_4KHZ, length_d_srch ); @@ -269,7 +274,7 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 break; } } - silk_assert( length_d_srch > 0 ); + celt_assert( length_d_srch > 0 ); ALLOC( d_comp, D_COMP_STRIDE, opus_int16 ); for( i = D_COMP_MIN; i < D_COMP_MAX; i++ ) { @@ -311,18 +316,6 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 ** SECOND STAGE, operating at 8 kHz, on lag sections with high correlation *************************************************************************************/ - /****************************************************************************** - ** Scale signal down to avoid correlations measures from overflowing - *******************************************************************************/ - /* find scaling as max scaling for each subframe */ - silk_sum_sqr_shift( &energy, &shift, frame_8kHz, frame_length_8kHz ); - if( shift > 0 ) { - shift = silk_RSHIFT( shift, 1 ); - for( i = 0; i < frame_length_8kHz; i++ ) { - frame_8kHz[ i ] = silk_RSHIFT( frame_8kHz[ i ], shift ); - } - } - /********************************************************************************* * Find energy of each subframe projected onto its history, for a range of delays *********************************************************************************/ @@ -332,8 +325,8 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 for( k = 0; k < nb_subfr; k++ ) { /* Check that we are within range of the array */ - silk_assert( target_ptr >= frame_8kHz ); - silk_assert( target_ptr + SF_LENGTH_8KHZ <= frame_8kHz + frame_length_8kHz ); + celt_assert( target_ptr >= frame_8kHz ); + celt_assert( target_ptr + SF_LENGTH_8KHZ <= frame_8kHz + frame_length_8kHz ); energy_target = silk_ADD32( silk_inner_prod_aligned( target_ptr, target_ptr, SF_LENGTH_8KHZ, arch ), 1 ); for( j = 0; j < length_d_comp; j++ ) { @@ -462,24 +455,6 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 silk_assert( *LTPCorr_Q15 >= 0 ); if( Fs_kHz > 8 ) { - VARDECL( opus_int16, scratch_mem ); - /***************************************************************************/ - /* Scale input signal down to avoid correlations measures from overflowing */ - /***************************************************************************/ - /* find scaling as max scaling for each subframe */ - silk_sum_sqr_shift( &energy, &shift, frame, frame_length ); - ALLOC( scratch_mem, shift > 0 ? frame_length : ALLOC_NONE, opus_int16 ); - if( shift > 0 ) { - /* Move signal to scratch mem because the input signal should be unchanged */ - shift = silk_RSHIFT( shift, 1 ); - for( i = 0; i < frame_length; i++ ) { - scratch_mem[ i ] = silk_RSHIFT( frame[ i ], shift ); - } - input_frame_ptr = scratch_mem; - } else { - input_frame_ptr = frame; - } - /* Search in original signal */ CBimax_old = CBimax; @@ -519,14 +494,14 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 /* Calculate the correlations and energies needed in stage 3 */ ALLOC( energies_st3, nb_subfr * nb_cbk_search, silk_pe_stage3_vals ); ALLOC( cross_corr_st3, nb_subfr * nb_cbk_search, silk_pe_stage3_vals ); - silk_P_Ana_calc_corr_st3( cross_corr_st3, input_frame_ptr, start_lag, sf_length, nb_subfr, complexity, arch ); - silk_P_Ana_calc_energy_st3( energies_st3, input_frame_ptr, start_lag, sf_length, nb_subfr, complexity, arch ); + silk_P_Ana_calc_corr_st3( cross_corr_st3, frame, start_lag, sf_length, nb_subfr, complexity, arch ); + silk_P_Ana_calc_energy_st3( energies_st3, frame, start_lag, sf_length, nb_subfr, complexity, arch ); lag_counter = 0; silk_assert( lag == silk_SAT16( lag ) ); contour_bias_Q15 = silk_DIV32_16( SILK_FIX_CONST( PE_FLATCONTOUR_BIAS, 15 ), lag ); - target_ptr = &input_frame_ptr[ PE_LTP_MEM_LENGTH_MS * Fs_kHz ]; + target_ptr = &frame[ PE_LTP_MEM_LENGTH_MS * Fs_kHz ]; energy_target = silk_ADD32( silk_inner_prod_aligned( target_ptr, target_ptr, nb_subfr * sf_length, arch ), 1 ); for( d = start_lag; d <= end_lag; d++ ) { for( j = 0; j < nb_cbk_search; j++ ) { @@ -575,7 +550,7 @@ opus_int silk_pitch_analysis_core( /* O Voicing estimate: 0 *lagIndex = (opus_int16)( lag - MIN_LAG_8KHZ ); *contourIndex = (opus_int8)CBimax; } - silk_assert( *lagIndex >= 0 ); + celt_assert( *lagIndex >= 0 ); /* return as voiced */ RESTORE_STACK; return 0; @@ -612,8 +587,8 @@ static void silk_P_Ana_calc_corr_st3( const opus_int8 *Lag_range_ptr, *Lag_CB_ptr; SAVE_STACK; - silk_assert( complexity >= SILK_PE_MIN_COMPLEX ); - silk_assert( complexity <= SILK_PE_MAX_COMPLEX ); + celt_assert( complexity >= SILK_PE_MIN_COMPLEX ); + celt_assert( complexity <= SILK_PE_MAX_COMPLEX ); if( nb_subfr == PE_MAX_NB_SUBFR ) { Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ]; @@ -621,7 +596,7 @@ static void silk_P_Ana_calc_corr_st3( nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ]; cbk_size = PE_NB_CBKS_STAGE3_MAX; } else { - silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); + celt_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ]; Lag_CB_ptr = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ]; nb_cbk_search = PE_NB_CBKS_STAGE3_10MS; @@ -637,7 +612,7 @@ static void silk_P_Ana_calc_corr_st3( /* Calculate the correlations for each subframe */ lag_low = matrix_ptr( Lag_range_ptr, k, 0, 2 ); lag_high = matrix_ptr( Lag_range_ptr, k, 1, 2 ); - silk_assert(lag_high-lag_low+1 <= SCRATCH_SIZE); + celt_assert(lag_high-lag_low+1 <= SCRATCH_SIZE); celt_pitch_xcorr( target_ptr, target_ptr - start_lag - lag_high, xcorr32, sf_length, lag_high - lag_low + 1, arch ); for( j = lag_low; j <= lag_high; j++ ) { silk_assert( lag_counter < SCRATCH_SIZE ); @@ -684,8 +659,8 @@ static void silk_P_Ana_calc_energy_st3( const opus_int8 *Lag_range_ptr, *Lag_CB_ptr; SAVE_STACK; - silk_assert( complexity >= SILK_PE_MIN_COMPLEX ); - silk_assert( complexity <= SILK_PE_MAX_COMPLEX ); + celt_assert( complexity >= SILK_PE_MIN_COMPLEX ); + celt_assert( complexity <= SILK_PE_MAX_COMPLEX ); if( nb_subfr == PE_MAX_NB_SUBFR ) { Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ]; @@ -693,7 +668,7 @@ static void silk_P_Ana_calc_energy_st3( nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ]; cbk_size = PE_NB_CBKS_STAGE3_MAX; } else { - silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); + celt_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ]; Lag_CB_ptr = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ]; nb_cbk_search = PE_NB_CBKS_STAGE3_10MS; diff --git a/libs/libopus/silk/fixed/prefilter_FIX.c b/libs/libopus/silk/fixed/prefilter_FIX.c deleted file mode 100644 index 6a8e35152..000000000 --- a/libs/libopus/silk/fixed/prefilter_FIX.c +++ /dev/null @@ -1,221 +0,0 @@ -/*********************************************************************** -Copyright (c) 2006-2011, Skype Limited. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -- Neither the name of Internet Society, IETF or IETF Trust, nor the -names of specific contributors, may be used to endorse or promote -products derived from this software without specific prior written -permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -***********************************************************************/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "main_FIX.h" -#include "stack_alloc.h" -#include "tuning_parameters.h" - -#if defined(MIPSr1_ASM) -#include "mips/prefilter_FIX_mipsr1.h" -#endif - - -#if !defined(OVERRIDE_silk_warped_LPC_analysis_filter_FIX) -#define silk_warped_LPC_analysis_filter_FIX(state, res_Q2, coef_Q13, input, lambda_Q16, length, order, arch) \ - ((void)(arch),silk_warped_LPC_analysis_filter_FIX_c(state, res_Q2, coef_Q13, input, lambda_Q16, length, order)) -#endif - -/* Prefilter for finding Quantizer input signal */ -static OPUS_INLINE void silk_prefilt_FIX( - silk_prefilter_state_FIX *P, /* I/O state */ - opus_int32 st_res_Q12[], /* I short term residual signal */ - opus_int32 xw_Q3[], /* O prefiltered signal */ - opus_int32 HarmShapeFIRPacked_Q12, /* I Harmonic shaping coeficients */ - opus_int Tilt_Q14, /* I Tilt shaping coeficient */ - opus_int32 LF_shp_Q14, /* I Low-frequancy shaping coeficients */ - opus_int lag, /* I Lag for harmonic shaping */ - opus_int length /* I Length of signals */ -); - -void silk_warped_LPC_analysis_filter_FIX_c( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -) -{ - opus_int n, i; - opus_int32 acc_Q11, tmp1, tmp2; - - /* Order must be even */ - silk_assert( ( order & 1 ) == 0 ); - - for( n = 0; n < length; n++ ) { - /* Output of lowpass section */ - tmp2 = silk_SMLAWB( state[ 0 ], state[ 1 ], lambda_Q16 ); - state[ 0 ] = silk_LSHIFT( input[ n ], 14 ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( state[ 1 ], state[ 2 ] - tmp2, lambda_Q16 ); - state[ 1 ] = tmp2; - acc_Q11 = silk_RSHIFT( order, 1 ); - acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ 0 ] ); - /* Loop over allpass sections */ - for( i = 2; i < order; i += 2 ) { - /* Output of allpass section */ - tmp2 = silk_SMLAWB( state[ i ], state[ i + 1 ] - tmp1, lambda_Q16 ); - state[ i ] = tmp1; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ i - 1 ] ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q16 ); - state[ i + 1 ] = tmp2; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] ); - } - state[ order ] = tmp1; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] ); - res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROUND( acc_Q11, 9 ); - } -} - -void silk_prefilter_FIX( - silk_encoder_state_FIX *psEnc, /* I/O Encoder state */ - const silk_encoder_control_FIX *psEncCtrl, /* I Encoder control */ - opus_int32 xw_Q3[], /* O Weighted signal */ - const opus_int16 x[] /* I Speech signal */ -) -{ - silk_prefilter_state_FIX *P = &psEnc->sPrefilt; - opus_int j, k, lag; - opus_int32 tmp_32; - const opus_int16 *AR1_shp_Q13; - const opus_int16 *px; - opus_int32 *pxw_Q3; - opus_int HarmShapeGain_Q12, Tilt_Q14; - opus_int32 HarmShapeFIRPacked_Q12, LF_shp_Q14; - VARDECL( opus_int32, x_filt_Q12 ); - VARDECL( opus_int32, st_res_Q2 ); - opus_int16 B_Q10[ 2 ]; - SAVE_STACK; - - /* Set up pointers */ - px = x; - pxw_Q3 = xw_Q3; - lag = P->lagPrev; - ALLOC( x_filt_Q12, psEnc->sCmn.subfr_length, opus_int32 ); - ALLOC( st_res_Q2, psEnc->sCmn.subfr_length, opus_int32 ); - for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { - /* Update Variables that change per sub frame */ - if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { - lag = psEncCtrl->pitchL[ k ]; - } - - /* Noise shape parameters */ - HarmShapeGain_Q12 = silk_SMULWB( (opus_int32)psEncCtrl->HarmShapeGain_Q14[ k ], 16384 - psEncCtrl->HarmBoost_Q14[ k ] ); - silk_assert( HarmShapeGain_Q12 >= 0 ); - HarmShapeFIRPacked_Q12 = silk_RSHIFT( HarmShapeGain_Q12, 2 ); - HarmShapeFIRPacked_Q12 |= silk_LSHIFT( (opus_int32)silk_RSHIFT( HarmShapeGain_Q12, 1 ), 16 ); - Tilt_Q14 = psEncCtrl->Tilt_Q14[ k ]; - LF_shp_Q14 = psEncCtrl->LF_shp_Q14[ k ]; - AR1_shp_Q13 = &psEncCtrl->AR1_Q13[ k * MAX_SHAPE_LPC_ORDER ]; - - /* Short term FIR filtering*/ - silk_warped_LPC_analysis_filter_FIX( P->sAR_shp, st_res_Q2, AR1_shp_Q13, px, - psEnc->sCmn.warping_Q16, psEnc->sCmn.subfr_length, psEnc->sCmn.shapingLPCOrder, psEnc->sCmn.arch ); - - /* Reduce (mainly) low frequencies during harmonic emphasis */ - B_Q10[ 0 ] = silk_RSHIFT_ROUND( psEncCtrl->GainsPre_Q14[ k ], 4 ); - tmp_32 = silk_SMLABB( SILK_FIX_CONST( INPUT_TILT, 26 ), psEncCtrl->HarmBoost_Q14[ k ], HarmShapeGain_Q12 ); /* Q26 */ - tmp_32 = silk_SMLABB( tmp_32, psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( HIGH_RATE_INPUT_TILT, 12 ) ); /* Q26 */ - tmp_32 = silk_SMULWB( tmp_32, -psEncCtrl->GainsPre_Q14[ k ] ); /* Q24 */ - tmp_32 = silk_RSHIFT_ROUND( tmp_32, 14 ); /* Q10 */ - B_Q10[ 1 ]= silk_SAT16( tmp_32 ); - x_filt_Q12[ 0 ] = silk_MLA( silk_MUL( st_res_Q2[ 0 ], B_Q10[ 0 ] ), P->sHarmHP_Q2, B_Q10[ 1 ] ); - for( j = 1; j < psEnc->sCmn.subfr_length; j++ ) { - x_filt_Q12[ j ] = silk_MLA( silk_MUL( st_res_Q2[ j ], B_Q10[ 0 ] ), st_res_Q2[ j - 1 ], B_Q10[ 1 ] ); - } - P->sHarmHP_Q2 = st_res_Q2[ psEnc->sCmn.subfr_length - 1 ]; - - silk_prefilt_FIX( P, x_filt_Q12, pxw_Q3, HarmShapeFIRPacked_Q12, Tilt_Q14, LF_shp_Q14, lag, psEnc->sCmn.subfr_length ); - - px += psEnc->sCmn.subfr_length; - pxw_Q3 += psEnc->sCmn.subfr_length; - } - - P->lagPrev = psEncCtrl->pitchL[ psEnc->sCmn.nb_subfr - 1 ]; - RESTORE_STACK; -} - -#ifndef OVERRIDE_silk_prefilt_FIX -/* Prefilter for finding Quantizer input signal */ -static OPUS_INLINE void silk_prefilt_FIX( - silk_prefilter_state_FIX *P, /* I/O state */ - opus_int32 st_res_Q12[], /* I short term residual signal */ - opus_int32 xw_Q3[], /* O prefiltered signal */ - opus_int32 HarmShapeFIRPacked_Q12, /* I Harmonic shaping coeficients */ - opus_int Tilt_Q14, /* I Tilt shaping coeficient */ - opus_int32 LF_shp_Q14, /* I Low-frequancy shaping coeficients */ - opus_int lag, /* I Lag for harmonic shaping */ - opus_int length /* I Length of signals */ -) -{ - opus_int i, idx, LTP_shp_buf_idx; - opus_int32 n_LTP_Q12, n_Tilt_Q10, n_LF_Q10; - opus_int32 sLF_MA_shp_Q12, sLF_AR_shp_Q12; - opus_int16 *LTP_shp_buf; - - /* To speed up use temp variables instead of using the struct */ - LTP_shp_buf = P->sLTP_shp; - LTP_shp_buf_idx = P->sLTP_shp_buf_idx; - sLF_AR_shp_Q12 = P->sLF_AR_shp_Q12; - sLF_MA_shp_Q12 = P->sLF_MA_shp_Q12; - - for( i = 0; i < length; i++ ) { - if( lag > 0 ) { - /* unrolled loop */ - silk_assert( HARM_SHAPE_FIR_TAPS == 3 ); - idx = lag + LTP_shp_buf_idx; - n_LTP_Q12 = silk_SMULBB( LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 ); - n_LTP_Q12 = silk_SMLABT( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 ) & LTP_MASK ], HarmShapeFIRPacked_Q12 ); - n_LTP_Q12 = silk_SMLABB( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 + 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 ); - } else { - n_LTP_Q12 = 0; - } - - n_Tilt_Q10 = silk_SMULWB( sLF_AR_shp_Q12, Tilt_Q14 ); - n_LF_Q10 = silk_SMLAWB( silk_SMULWT( sLF_AR_shp_Q12, LF_shp_Q14 ), sLF_MA_shp_Q12, LF_shp_Q14 ); - - sLF_AR_shp_Q12 = silk_SUB32( st_res_Q12[ i ], silk_LSHIFT( n_Tilt_Q10, 2 ) ); - sLF_MA_shp_Q12 = silk_SUB32( sLF_AR_shp_Q12, silk_LSHIFT( n_LF_Q10, 2 ) ); - - LTP_shp_buf_idx = ( LTP_shp_buf_idx - 1 ) & LTP_MASK; - LTP_shp_buf[ LTP_shp_buf_idx ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sLF_MA_shp_Q12, 12 ) ); - - xw_Q3[i] = silk_RSHIFT_ROUND( silk_SUB32( sLF_MA_shp_Q12, n_LTP_Q12 ), 9 ); - } - - /* Copy temp variable back to state */ - P->sLF_AR_shp_Q12 = sLF_AR_shp_Q12; - P->sLF_MA_shp_Q12 = sLF_MA_shp_Q12; - P->sLTP_shp_buf_idx = LTP_shp_buf_idx; -} -#endif /* OVERRIDE_silk_prefilt_FIX */ diff --git a/libs/libopus/silk/fixed/residual_energy16_FIX.c b/libs/libopus/silk/fixed/residual_energy16_FIX.c index ebffb2a66..7f130f3d3 100644 --- a/libs/libopus/silk/fixed/residual_energy16_FIX.c +++ b/libs/libopus/silk/fixed/residual_energy16_FIX.c @@ -47,10 +47,10 @@ opus_int32 silk_residual_energy16_covar_FIX( const opus_int32 *pRow; /* Safety checks */ - silk_assert( D >= 0 ); - silk_assert( D <= 16 ); - silk_assert( cQ > 0 ); - silk_assert( cQ < 16 ); + celt_assert( D >= 0 ); + celt_assert( D <= 16 ); + celt_assert( cQ > 0 ); + celt_assert( cQ < 16 ); lshifts = 16 - cQ; Qxtra = lshifts; diff --git a/libs/libopus/silk/fixed/residual_energy_FIX.c b/libs/libopus/silk/fixed/residual_energy_FIX.c index 41f74778e..6c7cade9a 100644 --- a/libs/libopus/silk/fixed/residual_energy_FIX.c +++ b/libs/libopus/silk/fixed/residual_energy_FIX.c @@ -58,7 +58,7 @@ void silk_residual_energy_FIX( /* Filter input to create the LPC residual for each frame half, and measure subframe energies */ ALLOC( LPC_res, ( MAX_NB_SUBFR >> 1 ) * offset, opus_int16 ); - silk_assert( ( nb_subfr >> 1 ) * ( MAX_NB_SUBFR >> 1 ) == nb_subfr ); + celt_assert( ( nb_subfr >> 1 ) * ( MAX_NB_SUBFR >> 1 ) == nb_subfr ); for( i = 0; i < nb_subfr >> 1; i++ ) { /* Calculate half frame LPC residual signal including preceding samples */ silk_LPC_analysis_filter( LPC_res, x_ptr, a_Q12[ i ], ( MAX_NB_SUBFR >> 1 ) * offset, LPC_order, arch ); diff --git a/libs/libopus/silk/fixed/schur64_FIX.c b/libs/libopus/silk/fixed/schur64_FIX.c index 764a10ef3..4b7e19ea5 100644 --- a/libs/libopus/silk/fixed/schur64_FIX.c +++ b/libs/libopus/silk/fixed/schur64_FIX.c @@ -43,7 +43,7 @@ opus_int32 silk_schur64( /* O returns residual ene opus_int32 C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ]; opus_int32 Ctmp1_Q30, Ctmp2_Q30, rc_tmp_Q31; - silk_assert( order==6||order==8||order==10||order==12||order==14||order==16 ); + celt_assert( order >= 0 && order <= SILK_MAX_ORDER_LPC ); /* Check for invalid input */ if( c[ 0 ] <= 0 ) { @@ -51,9 +51,10 @@ opus_int32 silk_schur64( /* O returns residual ene return 0; } - for( k = 0; k < order + 1; k++ ) { + k = 0; + do { C[ k ][ 0 ] = C[ k ][ 1 ] = c[ k ]; - } + } while( ++k <= order ); for( k = 0; k < order; k++ ) { /* Check that we won't be getting an unstable rc, otherwise stop here. */ diff --git a/libs/libopus/silk/fixed/schur_FIX.c b/libs/libopus/silk/fixed/schur_FIX.c index c4c0ef23b..2840f6b1a 100644 --- a/libs/libopus/silk/fixed/schur_FIX.c +++ b/libs/libopus/silk/fixed/schur_FIX.c @@ -43,28 +43,29 @@ opus_int32 silk_schur( /* O Returns residual ene opus_int32 C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ]; opus_int32 Ctmp1, Ctmp2, rc_tmp_Q15; - silk_assert( order==6||order==8||order==10||order==12||order==14||order==16 ); + celt_assert( order >= 0 && order <= SILK_MAX_ORDER_LPC ); /* Get number of leading zeros */ lz = silk_CLZ32( c[ 0 ] ); /* Copy correlations and adjust level to Q30 */ + k = 0; if( lz < 2 ) { /* lz must be 1, so shift one to the right */ - for( k = 0; k < order + 1; k++ ) { + do { C[ k ][ 0 ] = C[ k ][ 1 ] = silk_RSHIFT( c[ k ], 1 ); - } + } while( ++k <= order ); } else if( lz > 2 ) { /* Shift to the left */ lz -= 2; - for( k = 0; k < order + 1; k++ ) { + do { C[ k ][ 0 ] = C[ k ][ 1 ] = silk_LSHIFT( c[ k ], lz ); - } + } while( ++k <= order ); } else { /* No need to shift */ - for( k = 0; k < order + 1; k++ ) { + do { C[ k ][ 0 ] = C[ k ][ 1 ] = c[ k ]; - } + } while( ++k <= order ); } for( k = 0; k < order; k++ ) { diff --git a/libs/libopus/silk/fixed/solve_LS_FIX.c b/libs/libopus/silk/fixed/solve_LS_FIX.c deleted file mode 100644 index 51d7d49d0..000000000 --- a/libs/libopus/silk/fixed/solve_LS_FIX.c +++ /dev/null @@ -1,249 +0,0 @@ -/*********************************************************************** -Copyright (c) 2006-2011, Skype Limited. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -- Neither the name of Internet Society, IETF or IETF Trust, nor the -names of specific contributors, may be used to endorse or promote -products derived from this software without specific prior written -permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -***********************************************************************/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "main_FIX.h" -#include "stack_alloc.h" -#include "tuning_parameters.h" - -/*****************************/ -/* Internal function headers */ -/*****************************/ - -typedef struct { - opus_int32 Q36_part; - opus_int32 Q48_part; -} inv_D_t; - -/* Factorize square matrix A into LDL form */ -static OPUS_INLINE void silk_LDL_factorize_FIX( - opus_int32 *A, /* I/O Pointer to Symetric Square Matrix */ - opus_int M, /* I Size of Matrix */ - opus_int32 *L_Q16, /* I/O Pointer to Square Upper triangular Matrix */ - inv_D_t *inv_D /* I/O Pointer to vector holding inverted diagonal elements of D */ -); - -/* Solve Lx = b, when L is lower triangular and has ones on the diagonal */ -static OPUS_INLINE void silk_LS_SolveFirst_FIX( - const opus_int32 *L_Q16, /* I Pointer to Lower Triangular Matrix */ - opus_int M, /* I Dim of Matrix equation */ - const opus_int32 *b, /* I b Vector */ - opus_int32 *x_Q16 /* O x Vector */ -); - -/* Solve L^t*x = b, where L is lower triangular with ones on the diagonal */ -static OPUS_INLINE void silk_LS_SolveLast_FIX( - const opus_int32 *L_Q16, /* I Pointer to Lower Triangular Matrix */ - const opus_int M, /* I Dim of Matrix equation */ - const opus_int32 *b, /* I b Vector */ - opus_int32 *x_Q16 /* O x Vector */ -); - -static OPUS_INLINE void silk_LS_divide_Q16_FIX( - opus_int32 T[], /* I/O Numenator vector */ - inv_D_t *inv_D, /* I 1 / D vector */ - opus_int M /* I dimension */ -); - -/* Solves Ax = b, assuming A is symmetric */ -void silk_solve_LDL_FIX( - opus_int32 *A, /* I Pointer to symetric square matrix A */ - opus_int M, /* I Size of matrix */ - const opus_int32 *b, /* I Pointer to b vector */ - opus_int32 *x_Q16 /* O Pointer to x solution vector */ -) -{ - VARDECL( opus_int32, L_Q16 ); - opus_int32 Y[ MAX_MATRIX_SIZE ]; - inv_D_t inv_D[ MAX_MATRIX_SIZE ]; - SAVE_STACK; - - silk_assert( M <= MAX_MATRIX_SIZE ); - ALLOC( L_Q16, M * M, opus_int32 ); - - /*************************************************** - Factorize A by LDL such that A = L*D*L', - where L is lower triangular with ones on diagonal - ****************************************************/ - silk_LDL_factorize_FIX( A, M, L_Q16, inv_D ); - - /**************************************************** - * substitute D*L'*x = Y. ie: - L*D*L'*x = b => L*Y = b <=> Y = inv(L)*b - ******************************************************/ - silk_LS_SolveFirst_FIX( L_Q16, M, b, Y ); - - /**************************************************** - D*L'*x = Y <=> L'*x = inv(D)*Y, because D is - diagonal just multiply with 1/d_i - ****************************************************/ - silk_LS_divide_Q16_FIX( Y, inv_D, M ); - - /**************************************************** - x = inv(L') * inv(D) * Y - *****************************************************/ - silk_LS_SolveLast_FIX( L_Q16, M, Y, x_Q16 ); - RESTORE_STACK; -} - -static OPUS_INLINE void silk_LDL_factorize_FIX( - opus_int32 *A, /* I/O Pointer to Symetric Square Matrix */ - opus_int M, /* I Size of Matrix */ - opus_int32 *L_Q16, /* I/O Pointer to Square Upper triangular Matrix */ - inv_D_t *inv_D /* I/O Pointer to vector holding inverted diagonal elements of D */ -) -{ - opus_int i, j, k, status, loop_count; - const opus_int32 *ptr1, *ptr2; - opus_int32 diag_min_value, tmp_32, err; - opus_int32 v_Q0[ MAX_MATRIX_SIZE ], D_Q0[ MAX_MATRIX_SIZE ]; - opus_int32 one_div_diag_Q36, one_div_diag_Q40, one_div_diag_Q48; - - silk_assert( M <= MAX_MATRIX_SIZE ); - - status = 1; - diag_min_value = silk_max_32( silk_SMMUL( silk_ADD_SAT32( A[ 0 ], A[ silk_SMULBB( M, M ) - 1 ] ), SILK_FIX_CONST( FIND_LTP_COND_FAC, 31 ) ), 1 << 9 ); - for( loop_count = 0; loop_count < M && status == 1; loop_count++ ) { - status = 0; - for( j = 0; j < M; j++ ) { - ptr1 = matrix_adr( L_Q16, j, 0, M ); - tmp_32 = 0; - for( i = 0; i < j; i++ ) { - v_Q0[ i ] = silk_SMULWW( D_Q0[ i ], ptr1[ i ] ); /* Q0 */ - tmp_32 = silk_SMLAWW( tmp_32, v_Q0[ i ], ptr1[ i ] ); /* Q0 */ - } - tmp_32 = silk_SUB32( matrix_ptr( A, j, j, M ), tmp_32 ); - - if( tmp_32 < diag_min_value ) { - tmp_32 = silk_SUB32( silk_SMULBB( loop_count + 1, diag_min_value ), tmp_32 ); - /* Matrix not positive semi-definite, or ill conditioned */ - for( i = 0; i < M; i++ ) { - matrix_ptr( A, i, i, M ) = silk_ADD32( matrix_ptr( A, i, i, M ), tmp_32 ); - } - status = 1; - break; - } - D_Q0[ j ] = tmp_32; /* always < max(Correlation) */ - - /* two-step division */ - one_div_diag_Q36 = silk_INVERSE32_varQ( tmp_32, 36 ); /* Q36 */ - one_div_diag_Q40 = silk_LSHIFT( one_div_diag_Q36, 4 ); /* Q40 */ - err = silk_SUB32( (opus_int32)1 << 24, silk_SMULWW( tmp_32, one_div_diag_Q40 ) ); /* Q24 */ - one_div_diag_Q48 = silk_SMULWW( err, one_div_diag_Q40 ); /* Q48 */ - - /* Save 1/Ds */ - inv_D[ j ].Q36_part = one_div_diag_Q36; - inv_D[ j ].Q48_part = one_div_diag_Q48; - - matrix_ptr( L_Q16, j, j, M ) = 65536; /* 1.0 in Q16 */ - ptr1 = matrix_adr( A, j, 0, M ); - ptr2 = matrix_adr( L_Q16, j + 1, 0, M ); - for( i = j + 1; i < M; i++ ) { - tmp_32 = 0; - for( k = 0; k < j; k++ ) { - tmp_32 = silk_SMLAWW( tmp_32, v_Q0[ k ], ptr2[ k ] ); /* Q0 */ - } - tmp_32 = silk_SUB32( ptr1[ i ], tmp_32 ); /* always < max(Correlation) */ - - /* tmp_32 / D_Q0[j] : Divide to Q16 */ - matrix_ptr( L_Q16, i, j, M ) = silk_ADD32( silk_SMMUL( tmp_32, one_div_diag_Q48 ), - silk_RSHIFT( silk_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) ); - - /* go to next column */ - ptr2 += M; - } - } - } - - silk_assert( status == 0 ); -} - -static OPUS_INLINE void silk_LS_divide_Q16_FIX( - opus_int32 T[], /* I/O Numenator vector */ - inv_D_t *inv_D, /* I 1 / D vector */ - opus_int M /* I dimension */ -) -{ - opus_int i; - opus_int32 tmp_32; - opus_int32 one_div_diag_Q36, one_div_diag_Q48; - - for( i = 0; i < M; i++ ) { - one_div_diag_Q36 = inv_D[ i ].Q36_part; - one_div_diag_Q48 = inv_D[ i ].Q48_part; - - tmp_32 = T[ i ]; - T[ i ] = silk_ADD32( silk_SMMUL( tmp_32, one_div_diag_Q48 ), silk_RSHIFT( silk_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) ); - } -} - -/* Solve Lx = b, when L is lower triangular and has ones on the diagonal */ -static OPUS_INLINE void silk_LS_SolveFirst_FIX( - const opus_int32 *L_Q16, /* I Pointer to Lower Triangular Matrix */ - opus_int M, /* I Dim of Matrix equation */ - const opus_int32 *b, /* I b Vector */ - opus_int32 *x_Q16 /* O x Vector */ -) -{ - opus_int i, j; - const opus_int32 *ptr32; - opus_int32 tmp_32; - - for( i = 0; i < M; i++ ) { - ptr32 = matrix_adr( L_Q16, i, 0, M ); - tmp_32 = 0; - for( j = 0; j < i; j++ ) { - tmp_32 = silk_SMLAWW( tmp_32, ptr32[ j ], x_Q16[ j ] ); - } - x_Q16[ i ] = silk_SUB32( b[ i ], tmp_32 ); - } -} - -/* Solve L^t*x = b, where L is lower triangular with ones on the diagonal */ -static OPUS_INLINE void silk_LS_SolveLast_FIX( - const opus_int32 *L_Q16, /* I Pointer to Lower Triangular Matrix */ - const opus_int M, /* I Dim of Matrix equation */ - const opus_int32 *b, /* I b Vector */ - opus_int32 *x_Q16 /* O x Vector */ -) -{ - opus_int i, j; - const opus_int32 *ptr32; - opus_int32 tmp_32; - - for( i = M - 1; i >= 0; i-- ) { - ptr32 = matrix_adr( L_Q16, 0, i, M ); - tmp_32 = 0; - for( j = M - 1; j > i; j-- ) { - tmp_32 = silk_SMLAWW( tmp_32, ptr32[ silk_SMULBB( j, M ) ], x_Q16[ j ] ); - } - x_Q16[ i ] = silk_SUB32( b[ i ], tmp_32 ); - } -} diff --git a/libs/libopus/silk/fixed/structs_FIX.h b/libs/libopus/silk/fixed/structs_FIX.h index 3294b2512..2774a97b2 100644 --- a/libs/libopus/silk/fixed/structs_FIX.h +++ b/libs/libopus/silk/fixed/structs_FIX.h @@ -47,31 +47,17 @@ typedef struct { opus_int32 Tilt_smth_Q16; } silk_shape_state_FIX; -/********************************/ -/* Prefilter state */ -/********************************/ -typedef struct { - opus_int16 sLTP_shp[ LTP_BUF_LENGTH ]; - opus_int32 sAR_shp[ MAX_SHAPE_LPC_ORDER + 1 ]; - opus_int sLTP_shp_buf_idx; - opus_int32 sLF_AR_shp_Q12; - opus_int32 sLF_MA_shp_Q12; - opus_int32 sHarmHP_Q2; - opus_int32 rand_seed; - opus_int lagPrev; -} silk_prefilter_state_FIX; - /********************************/ /* Encoder state FIX */ /********************************/ typedef struct { silk_encoder_state sCmn; /* Common struct, shared with floating-point code */ silk_shape_state_FIX sShape; /* Shape state */ - silk_prefilter_state_FIX sPrefilt; /* Prefilter State */ /* Buffer for find pitch and noise shape analysis */ silk_DWORD_ALIGN opus_int16 x_buf[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];/* Buffer for find pitch and noise shape analysis */ opus_int LTPCorr_Q15; /* Normalized correlation from pitch lag estimator */ + opus_int32 resNrgSmth; } silk_encoder_state_FIX; /************************/ @@ -87,11 +73,8 @@ typedef struct { /* Noise shaping parameters */ /* Testing */ - silk_DWORD_ALIGN opus_int16 AR1_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; - silk_DWORD_ALIGN opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; + silk_DWORD_ALIGN opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ]; /* Packs two int16 coefficients per int32 value */ - opus_int GainsPre_Q14[ MAX_NB_SUBFR ]; - opus_int HarmBoost_Q14[ MAX_NB_SUBFR ]; opus_int Tilt_Q14[ MAX_NB_SUBFR ]; opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ]; opus_int Lambda_Q10; @@ -99,7 +82,6 @@ typedef struct { opus_int coding_quality_Q14; /* measures */ - opus_int sparseness_Q8; opus_int32 predGain_Q16; opus_int LTPredCodGain_Q7; opus_int32 ResNrg[ MAX_NB_SUBFR ]; /* Residual energy per subframe */ diff --git a/libs/libopus/silk/fixed/vector_ops_FIX.c b/libs/libopus/silk/fixed/vector_ops_FIX.c index d94980014..dcf84070a 100644 --- a/libs/libopus/silk/fixed/vector_ops_FIX.c +++ b/libs/libopus/silk/fixed/vector_ops_FIX.c @@ -87,7 +87,7 @@ opus_int32 silk_inner_prod_aligned( #endif } -opus_int64 silk_inner_prod16_aligned_64_c( +opus_int64 silk_inner_prod16_c( const opus_int16 *inVec1, /* I input vector 1 */ const opus_int16 *inVec2, /* I input vector 2 */ const opus_int len /* I vector lengths */ diff --git a/libs/libopus/silk/fixed/warped_autocorrelation_FIX.c b/libs/libopus/silk/fixed/warped_autocorrelation_FIX.c index 6ca6c1184..5c79553bc 100644 --- a/libs/libopus/silk/fixed/warped_autocorrelation_FIX.c +++ b/libs/libopus/silk/fixed/warped_autocorrelation_FIX.c @@ -31,17 +31,14 @@ POSSIBILITY OF SUCH DAMAGE. #include "main_FIX.h" -#define QC 10 -#define QS 14 - #if defined(MIPSr1_ASM) #include "mips/warped_autocorrelation_FIX_mipsr1.h" #endif -#ifndef OVERRIDE_silk_warped_autocorrelation_FIX /* Autocorrelations for a warped frequency axis */ -void silk_warped_autocorrelation_FIX( +#ifndef OVERRIDE_silk_warped_autocorrelation_FIX_c +void silk_warped_autocorrelation_FIX_c( opus_int32 *corr, /* O Result [order + 1] */ opus_int *scale, /* O Scaling of the correlation vector */ const opus_int16 *input, /* I Input data to correlate */ @@ -56,7 +53,7 @@ void silk_warped_autocorrelation_FIX( opus_int64 corr_QC[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; /* Order must be even */ - silk_assert( ( order & 1 ) == 0 ); + celt_assert( ( order & 1 ) == 0 ); silk_assert( 2 * QS - QC >= 0 ); /* Loop over samples */ @@ -92,4 +89,4 @@ void silk_warped_autocorrelation_FIX( } silk_assert( corr_QC[ 0 ] >= 0 ); /* If breaking, decrease QC*/ } -#endif /* OVERRIDE_silk_warped_autocorrelation_FIX */ +#endif /* OVERRIDE_silk_warped_autocorrelation_FIX_c */ diff --git a/libs/libopus/silk/fixed/x86/burg_modified_FIX_sse.c b/libs/libopus/silk/fixed/x86/burg_modified_FIX_sse.c deleted file mode 100644 index 3c3583c5f..000000000 --- a/libs/libopus/silk/fixed/x86/burg_modified_FIX_sse.c +++ /dev/null @@ -1,377 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include - -#include "SigProc_FIX.h" -#include "define.h" -#include "tuning_parameters.h" -#include "pitch.h" -#include "celt/x86/x86cpu.h" - -#define MAX_FRAME_SIZE 384 /* subfr_length * nb_subfr = ( 0.005 * 16000 + 16 ) * 4 = 384 */ - -#define QA 25 -#define N_BITS_HEAD_ROOM 2 -#define MIN_RSHIFTS -16 -#define MAX_RSHIFTS (32 - QA) - -/* Compute reflection coefficients from input signal */ -void silk_burg_modified_sse4_1( - opus_int32 *res_nrg, /* O Residual energy */ - opus_int *res_nrg_Q, /* O Residual energy Q value */ - opus_int32 A_Q16[], /* O Prediction coefficients (length order) */ - const opus_int16 x[], /* I Input signal, length: nb_subfr * ( D + subfr_length ) */ - const opus_int32 minInvGain_Q30, /* I Inverse of max prediction gain */ - const opus_int subfr_length, /* I Input signal subframe length (incl. D preceding samples) */ - const opus_int nb_subfr, /* I Number of subframes stacked in x */ - const opus_int D, /* I Order */ - int arch /* I Run-time architecture */ -) -{ - opus_int k, n, s, lz, rshifts, rshifts_extra, reached_max_gain; - opus_int32 C0, num, nrg, rc_Q31, invGain_Q30, Atmp_QA, Atmp1, tmp1, tmp2, x1, x2; - const opus_int16 *x_ptr; - opus_int32 C_first_row[ SILK_MAX_ORDER_LPC ]; - opus_int32 C_last_row[ SILK_MAX_ORDER_LPC ]; - opus_int32 Af_QA[ SILK_MAX_ORDER_LPC ]; - opus_int32 CAf[ SILK_MAX_ORDER_LPC + 1 ]; - opus_int32 CAb[ SILK_MAX_ORDER_LPC + 1 ]; - opus_int32 xcorr[ SILK_MAX_ORDER_LPC ]; - - __m128i FIRST_3210, LAST_3210, ATMP_3210, TMP1_3210, TMP2_3210, T1_3210, T2_3210, PTR_3210, SUBFR_3210, X1_3210, X2_3210; - __m128i CONST1 = _mm_set1_epi32(1); - - silk_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); - - /* Compute autocorrelations, added over subframes */ - silk_sum_sqr_shift( &C0, &rshifts, x, nb_subfr * subfr_length ); - if( rshifts > MAX_RSHIFTS ) { - C0 = silk_LSHIFT32( C0, rshifts - MAX_RSHIFTS ); - silk_assert( C0 > 0 ); - rshifts = MAX_RSHIFTS; - } else { - lz = silk_CLZ32( C0 ) - 1; - rshifts_extra = N_BITS_HEAD_ROOM - lz; - if( rshifts_extra > 0 ) { - rshifts_extra = silk_min( rshifts_extra, MAX_RSHIFTS - rshifts ); - C0 = silk_RSHIFT32( C0, rshifts_extra ); - } else { - rshifts_extra = silk_max( rshifts_extra, MIN_RSHIFTS - rshifts ); - C0 = silk_LSHIFT32( C0, -rshifts_extra ); - } - rshifts += rshifts_extra; - } - CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ) + 1; /* Q(-rshifts) */ - silk_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) ); - if( rshifts > 0 ) { - for( s = 0; s < nb_subfr; s++ ) { - x_ptr = x + s * subfr_length; - for( n = 1; n < D + 1; n++ ) { - C_first_row[ n - 1 ] += (opus_int32)silk_RSHIFT64( - silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n, arch ), rshifts ); - } - } - } else { - for( s = 0; s < nb_subfr; s++ ) { - int i; - opus_int32 d; - x_ptr = x + s * subfr_length; - celt_pitch_xcorr(x_ptr, x_ptr + 1, xcorr, subfr_length - D, D, arch ); - for( n = 1; n < D + 1; n++ ) { - for ( i = n + subfr_length - D, d = 0; i < subfr_length; i++ ) - d = MAC16_16( d, x_ptr[ i ], x_ptr[ i - n ] ); - xcorr[ n - 1 ] += d; - } - for( n = 1; n < D + 1; n++ ) { - C_first_row[ n - 1 ] += silk_LSHIFT32( xcorr[ n - 1 ], -rshifts ); - } - } - } - silk_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) ); - - /* Initialize */ - CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ) + 1; /* Q(-rshifts) */ - - invGain_Q30 = (opus_int32)1 << 30; - reached_max_gain = 0; - for( n = 0; n < D; n++ ) { - /* Update first row of correlation matrix (without first element) */ - /* Update last row of correlation matrix (without last element, stored in reversed order) */ - /* Update C * Af */ - /* Update C * flipud(Af) (stored in reversed order) */ - if( rshifts > -2 ) { - for( s = 0; s < nb_subfr; s++ ) { - x_ptr = x + s * subfr_length; - x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); /* Q(16-rshifts) */ - x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); /* Q(16-rshifts) */ - tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16) */ - tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 ); /* Q(QA-16) */ - for( k = 0; k < n; k++ ) { - C_first_row[ k ] = silk_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */ - C_last_row[ k ] = silk_SMLAWB( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */ - Atmp_QA = Af_QA[ k ]; - tmp1 = silk_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); /* Q(QA-16) */ - tmp2 = silk_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] ); /* Q(QA-16) */ - } - tmp1 = silk_LSHIFT32( -tmp1, 32 - QA - rshifts ); /* Q(16-rshifts) */ - tmp2 = silk_LSHIFT32( -tmp2, 32 - QA - rshifts ); /* Q(16-rshifts) */ - for( k = 0; k <= n; k++ ) { - CAf[ k ] = silk_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); /* Q( -rshift ) */ - CAb[ k ] = silk_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] ); /* Q( -rshift ) */ - } - } - } else { - for( s = 0; s < nb_subfr; s++ ) { - x_ptr = x + s * subfr_length; - x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], -rshifts ); /* Q( -rshifts ) */ - x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts ); /* Q( -rshifts ) */ - tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], 17 ); /* Q17 */ - tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 ); /* Q17 */ - - X1_3210 = _mm_set1_epi32( x1 ); - X2_3210 = _mm_set1_epi32( x2 ); - TMP1_3210 = _mm_setzero_si128(); - TMP2_3210 = _mm_setzero_si128(); - for( k = 0; k < n - 3; k += 4 ) { - PTR_3210 = OP_CVTEPI16_EPI32_M64( &x_ptr[ n - k - 1 - 3 ] ); - SUBFR_3210 = OP_CVTEPI16_EPI32_M64( &x_ptr[ subfr_length - n + k ] ); - FIRST_3210 = _mm_loadu_si128( (__m128i *)&C_first_row[ k ] ); - PTR_3210 = _mm_shuffle_epi32( PTR_3210, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - LAST_3210 = _mm_loadu_si128( (__m128i *)&C_last_row[ k ] ); - ATMP_3210 = _mm_loadu_si128( (__m128i *)&Af_QA[ k ] ); - - T1_3210 = _mm_mullo_epi32( PTR_3210, X1_3210 ); - T2_3210 = _mm_mullo_epi32( SUBFR_3210, X2_3210 ); - - ATMP_3210 = _mm_srai_epi32( ATMP_3210, 7 ); - ATMP_3210 = _mm_add_epi32( ATMP_3210, CONST1 ); - ATMP_3210 = _mm_srai_epi32( ATMP_3210, 1 ); - - FIRST_3210 = _mm_add_epi32( FIRST_3210, T1_3210 ); - LAST_3210 = _mm_add_epi32( LAST_3210, T2_3210 ); - - PTR_3210 = _mm_mullo_epi32( ATMP_3210, PTR_3210 ); - SUBFR_3210 = _mm_mullo_epi32( ATMP_3210, SUBFR_3210 ); - - _mm_storeu_si128( (__m128i *)&C_first_row[ k ], FIRST_3210 ); - _mm_storeu_si128( (__m128i *)&C_last_row[ k ], LAST_3210 ); - - TMP1_3210 = _mm_add_epi32( TMP1_3210, PTR_3210 ); - TMP2_3210 = _mm_add_epi32( TMP2_3210, SUBFR_3210 ); - } - - TMP1_3210 = _mm_add_epi32( TMP1_3210, _mm_unpackhi_epi64(TMP1_3210, TMP1_3210 ) ); - TMP2_3210 = _mm_add_epi32( TMP2_3210, _mm_unpackhi_epi64(TMP2_3210, TMP2_3210 ) ); - TMP1_3210 = _mm_add_epi32( TMP1_3210, _mm_shufflelo_epi16(TMP1_3210, 0x0E ) ); - TMP2_3210 = _mm_add_epi32( TMP2_3210, _mm_shufflelo_epi16(TMP2_3210, 0x0E ) ); - - tmp1 += _mm_cvtsi128_si32( TMP1_3210 ); - tmp2 += _mm_cvtsi128_si32( TMP2_3210 ); - - for( ; k < n; k++ ) { - C_first_row[ k ] = silk_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */ - C_last_row[ k ] = silk_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */ - Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); /* Q17 */ - tmp1 = silk_MLA( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); /* Q17 */ - tmp2 = silk_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 ); /* Q17 */ - } - - tmp1 = -tmp1; /* Q17 */ - tmp2 = -tmp2; /* Q17 */ - - { - __m128i xmm_tmp1, xmm_tmp2; - __m128i xmm_x_ptr_n_k_x2x0, xmm_x_ptr_n_k_x3x1; - __m128i xmm_x_ptr_sub_x2x0, xmm_x_ptr_sub_x3x1; - - xmm_tmp1 = _mm_set1_epi32( tmp1 ); - xmm_tmp2 = _mm_set1_epi32( tmp2 ); - - for( k = 0; k <= n - 3; k += 4 ) { - xmm_x_ptr_n_k_x2x0 = OP_CVTEPI16_EPI32_M64( &x_ptr[ n - k - 3 ] ); - xmm_x_ptr_sub_x2x0 = OP_CVTEPI16_EPI32_M64( &x_ptr[ subfr_length - n + k - 1 ] ); - - xmm_x_ptr_n_k_x2x0 = _mm_shuffle_epi32( xmm_x_ptr_n_k_x2x0, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - - xmm_x_ptr_n_k_x2x0 = _mm_slli_epi32( xmm_x_ptr_n_k_x2x0, -rshifts - 1 ); - xmm_x_ptr_sub_x2x0 = _mm_slli_epi32( xmm_x_ptr_sub_x2x0, -rshifts - 1 ); - - /* equal shift right 4 bytes, xmm_x_ptr_n_k_x3x1 = _mm_srli_si128(xmm_x_ptr_n_k_x2x0, 4)*/ - xmm_x_ptr_n_k_x3x1 = _mm_shuffle_epi32( xmm_x_ptr_n_k_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - xmm_x_ptr_sub_x3x1 = _mm_shuffle_epi32( xmm_x_ptr_sub_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - - xmm_x_ptr_n_k_x2x0 = _mm_mul_epi32( xmm_x_ptr_n_k_x2x0, xmm_tmp1 ); - xmm_x_ptr_n_k_x3x1 = _mm_mul_epi32( xmm_x_ptr_n_k_x3x1, xmm_tmp1 ); - xmm_x_ptr_sub_x2x0 = _mm_mul_epi32( xmm_x_ptr_sub_x2x0, xmm_tmp2 ); - xmm_x_ptr_sub_x3x1 = _mm_mul_epi32( xmm_x_ptr_sub_x3x1, xmm_tmp2 ); - - xmm_x_ptr_n_k_x2x0 = _mm_srli_epi64( xmm_x_ptr_n_k_x2x0, 16 ); - xmm_x_ptr_n_k_x3x1 = _mm_slli_epi64( xmm_x_ptr_n_k_x3x1, 16 ); - xmm_x_ptr_sub_x2x0 = _mm_srli_epi64( xmm_x_ptr_sub_x2x0, 16 ); - xmm_x_ptr_sub_x3x1 = _mm_slli_epi64( xmm_x_ptr_sub_x3x1, 16 ); - - xmm_x_ptr_n_k_x2x0 = _mm_blend_epi16( xmm_x_ptr_n_k_x2x0, xmm_x_ptr_n_k_x3x1, 0xCC ); - xmm_x_ptr_sub_x2x0 = _mm_blend_epi16( xmm_x_ptr_sub_x2x0, xmm_x_ptr_sub_x3x1, 0xCC ); - - X1_3210 = _mm_loadu_si128( (__m128i *)&CAf[ k ] ); - PTR_3210 = _mm_loadu_si128( (__m128i *)&CAb[ k ] ); - - X1_3210 = _mm_add_epi32( X1_3210, xmm_x_ptr_n_k_x2x0 ); - PTR_3210 = _mm_add_epi32( PTR_3210, xmm_x_ptr_sub_x2x0 ); - - _mm_storeu_si128( (__m128i *)&CAf[ k ], X1_3210 ); - _mm_storeu_si128( (__m128i *)&CAb[ k ], PTR_3210 ); - } - - for( ; k <= n; k++ ) { - CAf[ k ] = silk_SMLAWW( CAf[ k ], tmp1, - silk_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) ); /* Q( -rshift ) */ - CAb[ k ] = silk_SMLAWW( CAb[ k ], tmp2, - silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) ); /* Q( -rshift ) */ - } - } - } - } - - /* Calculate nominator and denominator for the next order reflection (parcor) coefficient */ - tmp1 = C_first_row[ n ]; /* Q( -rshifts ) */ - tmp2 = C_last_row[ n ]; /* Q( -rshifts ) */ - num = 0; /* Q( -rshifts ) */ - nrg = silk_ADD32( CAb[ 0 ], CAf[ 0 ] ); /* Q( 1-rshifts ) */ - for( k = 0; k < n; k++ ) { - Atmp_QA = Af_QA[ k ]; - lz = silk_CLZ32( silk_abs( Atmp_QA ) ) - 1; - lz = silk_min( 32 - QA, lz ); - Atmp1 = silk_LSHIFT32( Atmp_QA, lz ); /* Q( QA + lz ) */ - - tmp1 = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( C_last_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */ - tmp2 = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */ - num = silk_ADD_LSHIFT32( num, silk_SMMUL( CAb[ n - k ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */ - nrg = silk_ADD_LSHIFT32( nrg, silk_SMMUL( silk_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ), - Atmp1 ), 32 - QA - lz ); /* Q( 1-rshifts ) */ - } - CAf[ n + 1 ] = tmp1; /* Q( -rshifts ) */ - CAb[ n + 1 ] = tmp2; /* Q( -rshifts ) */ - num = silk_ADD32( num, tmp2 ); /* Q( -rshifts ) */ - num = silk_LSHIFT32( -num, 1 ); /* Q( 1-rshifts ) */ - - /* Calculate the next order reflection (parcor) coefficient */ - if( silk_abs( num ) < nrg ) { - rc_Q31 = silk_DIV32_varQ( num, nrg, 31 ); - } else { - rc_Q31 = ( num > 0 ) ? silk_int32_MAX : silk_int32_MIN; - } - - /* Update inverse prediction gain */ - tmp1 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 ); - tmp1 = silk_LSHIFT( silk_SMMUL( invGain_Q30, tmp1 ), 2 ); - if( tmp1 <= minInvGain_Q30 ) { - /* Max prediction gain exceeded; set reflection coefficient such that max prediction gain is exactly hit */ - tmp2 = ( (opus_int32)1 << 30 ) - silk_DIV32_varQ( minInvGain_Q30, invGain_Q30, 30 ); /* Q30 */ - rc_Q31 = silk_SQRT_APPROX( tmp2 ); /* Q15 */ - if( rc_Q31 > 0 ) { - /* Newton-Raphson iteration */ - rc_Q31 = silk_RSHIFT32( rc_Q31 + silk_DIV32( tmp2, rc_Q31 ), 1 ); /* Q15 */ - rc_Q31 = silk_LSHIFT32( rc_Q31, 16 ); /* Q31 */ - if( num < 0 ) { - /* Ensure adjusted reflection coefficients has the original sign */ - rc_Q31 = -rc_Q31; - } - } - invGain_Q30 = minInvGain_Q30; - reached_max_gain = 1; - } else { - invGain_Q30 = tmp1; - } - - /* Update the AR coefficients */ - for( k = 0; k < (n + 1) >> 1; k++ ) { - tmp1 = Af_QA[ k ]; /* QA */ - tmp2 = Af_QA[ n - k - 1 ]; /* QA */ - Af_QA[ k ] = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 ); /* QA */ - Af_QA[ n - k - 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 ); /* QA */ - } - Af_QA[ n ] = silk_RSHIFT32( rc_Q31, 31 - QA ); /* QA */ - - if( reached_max_gain ) { - /* Reached max prediction gain; set remaining coefficients to zero and exit loop */ - for( k = n + 1; k < D; k++ ) { - Af_QA[ k ] = 0; - } - break; - } - - /* Update C * Af and C * Ab */ - for( k = 0; k <= n + 1; k++ ) { - tmp1 = CAf[ k ]; /* Q( -rshifts ) */ - tmp2 = CAb[ n - k + 1 ]; /* Q( -rshifts ) */ - CAf[ k ] = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 ); /* Q( -rshifts ) */ - CAb[ n - k + 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 ); /* Q( -rshifts ) */ - } - } - - if( reached_max_gain ) { - for( k = 0; k < D; k++ ) { - /* Scale coefficients */ - A_Q16[ k ] = -silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); - } - /* Subtract energy of preceding samples from C0 */ - if( rshifts > 0 ) { - for( s = 0; s < nb_subfr; s++ ) { - x_ptr = x + s * subfr_length; - C0 -= (opus_int32)silk_RSHIFT64( silk_inner_prod16_aligned_64( x_ptr, x_ptr, D, arch ), rshifts ); - } - } else { - for( s = 0; s < nb_subfr; s++ ) { - x_ptr = x + s * subfr_length; - C0 -= silk_LSHIFT32( silk_inner_prod_aligned( x_ptr, x_ptr, D, arch ), -rshifts ); - } - } - /* Approximate residual energy */ - *res_nrg = silk_LSHIFT( silk_SMMUL( invGain_Q30, C0 ), 2 ); - *res_nrg_Q = -rshifts; - } else { - /* Return residual energy */ - nrg = CAf[ 0 ]; /* Q( -rshifts ) */ - tmp1 = (opus_int32)1 << 16; /* Q16 */ - for( k = 0; k < D; k++ ) { - Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); /* Q16 */ - nrg = silk_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 ); /* Q( -rshifts ) */ - tmp1 = silk_SMLAWW( tmp1, Atmp1, Atmp1 ); /* Q16 */ - A_Q16[ k ] = -Atmp1; - } - *res_nrg = silk_SMLAWW( nrg, silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ), -tmp1 );/* Q( -rshifts ) */ - *res_nrg_Q = -rshifts; - } -} diff --git a/libs/libopus/silk/fixed/x86/burg_modified_FIX_sse4_1.c b/libs/libopus/silk/fixed/x86/burg_modified_FIX_sse4_1.c new file mode 100644 index 000000000..e58bf079e --- /dev/null +++ b/libs/libopus/silk/fixed/x86/burg_modified_FIX_sse4_1.c @@ -0,0 +1,400 @@ +/* Copyright (c) 2014-2020, Cisco Systems, INC + Written by XiangMingZhu WeiZhou MinPeng YanWang FrancisQuiers + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include + +#include "SigProc_FIX.h" +#include "define.h" +#include "tuning_parameters.h" +#include "pitch.h" +#include "celt/x86/x86cpu.h" + +#define MAX_FRAME_SIZE 384 /* subfr_length * nb_subfr = ( 0.005 * 16000 + 16 ) * 4 = 384 */ + +#define QA 25 +#define N_BITS_HEAD_ROOM 3 +#define MIN_RSHIFTS -16 +#define MAX_RSHIFTS (32 - QA) + +/* Compute reflection coefficients from input signal */ +void silk_burg_modified_sse4_1( + opus_int32 *res_nrg, /* O Residual energy */ + opus_int *res_nrg_Q, /* O Residual energy Q value */ + opus_int32 A_Q16[], /* O Prediction coefficients (length order) */ + const opus_int16 x[], /* I Input signal, length: nb_subfr * ( D + subfr_length ) */ + const opus_int32 minInvGain_Q30, /* I Inverse of max prediction gain */ + const opus_int subfr_length, /* I Input signal subframe length (incl. D preceding samples) */ + const opus_int nb_subfr, /* I Number of subframes stacked in x */ + const opus_int D, /* I Order */ + int arch /* I Run-time architecture */ +) +{ + opus_int k, n, s, lz, rshifts, reached_max_gain; + opus_int32 C0, num, nrg, rc_Q31, invGain_Q30, Atmp_QA, Atmp1, tmp1, tmp2, x1, x2; + const opus_int16 *x_ptr; + opus_int32 C_first_row[ SILK_MAX_ORDER_LPC ]; + opus_int32 C_last_row[ SILK_MAX_ORDER_LPC ]; + opus_int32 Af_QA[ SILK_MAX_ORDER_LPC ]; + opus_int32 CAf[ SILK_MAX_ORDER_LPC + 1 ]; + opus_int32 CAb[ SILK_MAX_ORDER_LPC + 1 ]; + opus_int32 xcorr[ SILK_MAX_ORDER_LPC ]; + opus_int64 C0_64; + + __m128i FIRST_3210, LAST_3210, ATMP_3210, TMP1_3210, TMP2_3210, T1_3210, T2_3210, PTR_3210, SUBFR_3210, X1_3210, X2_3210; + __m128i CONST1 = _mm_set1_epi32(1); + + celt_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); + + /* Compute autocorrelations, added over subframes */ + C0_64 = silk_inner_prod16( x, x, subfr_length*nb_subfr, arch ); + lz = silk_CLZ64(C0_64); + rshifts = 32 + 1 + N_BITS_HEAD_ROOM - lz; + if (rshifts > MAX_RSHIFTS) rshifts = MAX_RSHIFTS; + if (rshifts < MIN_RSHIFTS) rshifts = MIN_RSHIFTS; + + if (rshifts > 0) { + C0 = (opus_int32)silk_RSHIFT64(C0_64, rshifts ); + } else { + C0 = silk_LSHIFT32((opus_int32)C0_64, -rshifts ); + } + + CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ) + 1; /* Q(-rshifts) */ + silk_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) ); + if( rshifts > 0 ) { + for( s = 0; s < nb_subfr; s++ ) { + x_ptr = x + s * subfr_length; + for( n = 1; n < D + 1; n++ ) { + C_first_row[ n - 1 ] += (opus_int32)silk_RSHIFT64( + silk_inner_prod16( x_ptr, x_ptr + n, subfr_length - n, arch ), rshifts ); + } + } + } else { + for( s = 0; s < nb_subfr; s++ ) { + int i; + opus_int32 d; + x_ptr = x + s * subfr_length; + celt_pitch_xcorr(x_ptr, x_ptr + 1, xcorr, subfr_length - D, D, arch ); + for( n = 1; n < D + 1; n++ ) { + for ( i = n + subfr_length - D, d = 0; i < subfr_length; i++ ) + d = MAC16_16( d, x_ptr[ i ], x_ptr[ i - n ] ); + xcorr[ n - 1 ] += d; + } + for( n = 1; n < D + 1; n++ ) { + C_first_row[ n - 1 ] += silk_LSHIFT32( xcorr[ n - 1 ], -rshifts ); + } + } + } + silk_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) ); + + /* Initialize */ + CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ) + 1; /* Q(-rshifts) */ + + invGain_Q30 = (opus_int32)1 << 30; + reached_max_gain = 0; + for( n = 0; n < D; n++ ) { + /* Update first row of correlation matrix (without first element) */ + /* Update last row of correlation matrix (without last element, stored in reversed order) */ + /* Update C * Af */ + /* Update C * flipud(Af) (stored in reversed order) */ + if( rshifts > -2 ) { + for( s = 0; s < nb_subfr; s++ ) { + x_ptr = x + s * subfr_length; + x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); /* Q(16-rshifts) */ + x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); /* Q(16-rshifts) */ + tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16) */ + tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 ); /* Q(QA-16) */ + for( k = 0; k < n; k++ ) { + C_first_row[ k ] = silk_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */ + C_last_row[ k ] = silk_SMLAWB( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */ + Atmp_QA = Af_QA[ k ]; + tmp1 = silk_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); /* Q(QA-16) */ + tmp2 = silk_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] ); /* Q(QA-16) */ + } + tmp1 = silk_LSHIFT32( -tmp1, 32 - QA - rshifts ); /* Q(16-rshifts) */ + tmp2 = silk_LSHIFT32( -tmp2, 32 - QA - rshifts ); /* Q(16-rshifts) */ + for( k = 0; k <= n; k++ ) { + CAf[ k ] = silk_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); /* Q( -rshift ) */ + CAb[ k ] = silk_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] ); /* Q( -rshift ) */ + } + } + } else { + for( s = 0; s < nb_subfr; s++ ) { + x_ptr = x + s * subfr_length; + x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], -rshifts ); /* Q( -rshifts ) */ + x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts ); /* Q( -rshifts ) */ + tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], 17 ); /* Q17 */ + tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 ); /* Q17 */ + + X1_3210 = _mm_set1_epi32( x1 ); + X2_3210 = _mm_set1_epi32( x2 ); + TMP1_3210 = _mm_setzero_si128(); + TMP2_3210 = _mm_setzero_si128(); + for( k = 0; k < n - 3; k += 4 ) { + PTR_3210 = OP_CVTEPI16_EPI32_M64( &x_ptr[ n - k - 1 - 3 ] ); + SUBFR_3210 = OP_CVTEPI16_EPI32_M64( &x_ptr[ subfr_length - n + k ] ); + FIRST_3210 = _mm_loadu_si128( (__m128i *)&C_first_row[ k ] ); + PTR_3210 = _mm_shuffle_epi32( PTR_3210, _MM_SHUFFLE( 0, 1, 2, 3 ) ); + LAST_3210 = _mm_loadu_si128( (__m128i *)&C_last_row[ k ] ); + ATMP_3210 = _mm_loadu_si128( (__m128i *)&Af_QA[ k ] ); + + T1_3210 = _mm_mullo_epi32( PTR_3210, X1_3210 ); + T2_3210 = _mm_mullo_epi32( SUBFR_3210, X2_3210 ); + + ATMP_3210 = _mm_srai_epi32( ATMP_3210, 7 ); + ATMP_3210 = _mm_add_epi32( ATMP_3210, CONST1 ); + ATMP_3210 = _mm_srai_epi32( ATMP_3210, 1 ); + + FIRST_3210 = _mm_add_epi32( FIRST_3210, T1_3210 ); + LAST_3210 = _mm_add_epi32( LAST_3210, T2_3210 ); + + PTR_3210 = _mm_mullo_epi32( ATMP_3210, PTR_3210 ); + SUBFR_3210 = _mm_mullo_epi32( ATMP_3210, SUBFR_3210 ); + + _mm_storeu_si128( (__m128i *)&C_first_row[ k ], FIRST_3210 ); + _mm_storeu_si128( (__m128i *)&C_last_row[ k ], LAST_3210 ); + + TMP1_3210 = _mm_add_epi32( TMP1_3210, PTR_3210 ); + TMP2_3210 = _mm_add_epi32( TMP2_3210, SUBFR_3210 ); + } + + TMP1_3210 = _mm_add_epi32( TMP1_3210, _mm_unpackhi_epi64(TMP1_3210, TMP1_3210 ) ); + TMP2_3210 = _mm_add_epi32( TMP2_3210, _mm_unpackhi_epi64(TMP2_3210, TMP2_3210 ) ); + TMP1_3210 = _mm_add_epi32( TMP1_3210, _mm_shufflelo_epi16(TMP1_3210, 0x0E ) ); + TMP2_3210 = _mm_add_epi32( TMP2_3210, _mm_shufflelo_epi16(TMP2_3210, 0x0E ) ); + + tmp1 += _mm_cvtsi128_si32( TMP1_3210 ); + tmp2 += _mm_cvtsi128_si32( TMP2_3210 ); + + for( ; k < n; k++ ) { + C_first_row[ k ] = silk_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */ + C_last_row[ k ] = silk_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */ + Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); /* Q17 */ + /* We sometimes get overflows in the multiplications (even beyond +/- 2^32), + but they cancel each other and the real result seems to always fit in a 32-bit + signed integer. This was determined experimentally, not theoretically (unfortunately). */ + tmp1 = silk_MLA_ovflw( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); /* Q17 */ + tmp2 = silk_MLA_ovflw( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 ); /* Q17 */ + } + + tmp1 = -tmp1; /* Q17 */ + tmp2 = -tmp2; /* Q17 */ + + { + __m128i xmm_tmp1, xmm_tmp2; + __m128i xmm_x_ptr_n_k_x2x0, xmm_x_ptr_n_k_x3x1; + __m128i xmm_x_ptr_sub_x2x0, xmm_x_ptr_sub_x3x1; + + xmm_tmp1 = _mm_set1_epi32( tmp1 ); + xmm_tmp2 = _mm_set1_epi32( tmp2 ); + + for( k = 0; k <= n - 3; k += 4 ) { + xmm_x_ptr_n_k_x2x0 = OP_CVTEPI16_EPI32_M64( &x_ptr[ n - k - 3 ] ); + xmm_x_ptr_sub_x2x0 = OP_CVTEPI16_EPI32_M64( &x_ptr[ subfr_length - n + k - 1 ] ); + + xmm_x_ptr_n_k_x2x0 = _mm_shuffle_epi32( xmm_x_ptr_n_k_x2x0, _MM_SHUFFLE( 0, 1, 2, 3 ) ); + + xmm_x_ptr_n_k_x2x0 = _mm_slli_epi32( xmm_x_ptr_n_k_x2x0, -rshifts - 1 ); + xmm_x_ptr_sub_x2x0 = _mm_slli_epi32( xmm_x_ptr_sub_x2x0, -rshifts - 1 ); + + /* equal shift right 4 bytes, xmm_x_ptr_n_k_x3x1 = _mm_srli_si128(xmm_x_ptr_n_k_x2x0, 4)*/ + xmm_x_ptr_n_k_x3x1 = _mm_shuffle_epi32( xmm_x_ptr_n_k_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + xmm_x_ptr_sub_x3x1 = _mm_shuffle_epi32( xmm_x_ptr_sub_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + xmm_x_ptr_n_k_x2x0 = _mm_mul_epi32( xmm_x_ptr_n_k_x2x0, xmm_tmp1 ); + xmm_x_ptr_n_k_x3x1 = _mm_mul_epi32( xmm_x_ptr_n_k_x3x1, xmm_tmp1 ); + xmm_x_ptr_sub_x2x0 = _mm_mul_epi32( xmm_x_ptr_sub_x2x0, xmm_tmp2 ); + xmm_x_ptr_sub_x3x1 = _mm_mul_epi32( xmm_x_ptr_sub_x3x1, xmm_tmp2 ); + + xmm_x_ptr_n_k_x2x0 = _mm_srli_epi64( xmm_x_ptr_n_k_x2x0, 16 ); + xmm_x_ptr_n_k_x3x1 = _mm_slli_epi64( xmm_x_ptr_n_k_x3x1, 16 ); + xmm_x_ptr_sub_x2x0 = _mm_srli_epi64( xmm_x_ptr_sub_x2x0, 16 ); + xmm_x_ptr_sub_x3x1 = _mm_slli_epi64( xmm_x_ptr_sub_x3x1, 16 ); + + xmm_x_ptr_n_k_x2x0 = _mm_blend_epi16( xmm_x_ptr_n_k_x2x0, xmm_x_ptr_n_k_x3x1, 0xCC ); + xmm_x_ptr_sub_x2x0 = _mm_blend_epi16( xmm_x_ptr_sub_x2x0, xmm_x_ptr_sub_x3x1, 0xCC ); + + X1_3210 = _mm_loadu_si128( (__m128i *)&CAf[ k ] ); + PTR_3210 = _mm_loadu_si128( (__m128i *)&CAb[ k ] ); + + X1_3210 = _mm_add_epi32( X1_3210, xmm_x_ptr_n_k_x2x0 ); + PTR_3210 = _mm_add_epi32( PTR_3210, xmm_x_ptr_sub_x2x0 ); + + _mm_storeu_si128( (__m128i *)&CAf[ k ], X1_3210 ); + _mm_storeu_si128( (__m128i *)&CAb[ k ], PTR_3210 ); + } + + for( ; k <= n; k++ ) { + CAf[ k ] = silk_SMLAWW( CAf[ k ], tmp1, + silk_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) ); /* Q( -rshift ) */ + CAb[ k ] = silk_SMLAWW( CAb[ k ], tmp2, + silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) ); /* Q( -rshift ) */ + } + } + } + } + + /* Calculate nominator and denominator for the next order reflection (parcor) coefficient */ + tmp1 = C_first_row[ n ]; /* Q( -rshifts ) */ + tmp2 = C_last_row[ n ]; /* Q( -rshifts ) */ + num = 0; /* Q( -rshifts ) */ + nrg = silk_ADD32( CAb[ 0 ], CAf[ 0 ] ); /* Q( 1-rshifts ) */ + for( k = 0; k < n; k++ ) { + Atmp_QA = Af_QA[ k ]; + lz = silk_CLZ32( silk_abs( Atmp_QA ) ) - 1; + lz = silk_min( 32 - QA, lz ); + Atmp1 = silk_LSHIFT32( Atmp_QA, lz ); /* Q( QA + lz ) */ + + tmp1 = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( C_last_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */ + tmp2 = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */ + num = silk_ADD_LSHIFT32( num, silk_SMMUL( CAb[ n - k ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */ + nrg = silk_ADD_LSHIFT32( nrg, silk_SMMUL( silk_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ), + Atmp1 ), 32 - QA - lz ); /* Q( 1-rshifts ) */ + } + CAf[ n + 1 ] = tmp1; /* Q( -rshifts ) */ + CAb[ n + 1 ] = tmp2; /* Q( -rshifts ) */ + num = silk_ADD32( num, tmp2 ); /* Q( -rshifts ) */ + num = silk_LSHIFT32( -num, 1 ); /* Q( 1-rshifts ) */ + + /* Calculate the next order reflection (parcor) coefficient */ + if( silk_abs( num ) < nrg ) { + rc_Q31 = silk_DIV32_varQ( num, nrg, 31 ); + } else { + rc_Q31 = ( num > 0 ) ? silk_int32_MAX : silk_int32_MIN; + } + + /* Update inverse prediction gain */ + tmp1 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 ); + tmp1 = silk_LSHIFT( silk_SMMUL( invGain_Q30, tmp1 ), 2 ); + if( tmp1 <= minInvGain_Q30 ) { + /* Max prediction gain exceeded; set reflection coefficient such that max prediction gain is exactly hit */ + tmp2 = ( (opus_int32)1 << 30 ) - silk_DIV32_varQ( minInvGain_Q30, invGain_Q30, 30 ); /* Q30 */ + rc_Q31 = silk_SQRT_APPROX( tmp2 ); /* Q15 */ + if( rc_Q31 > 0 ) { + /* Newton-Raphson iteration */ + rc_Q31 = silk_RSHIFT32( rc_Q31 + silk_DIV32( tmp2, rc_Q31 ), 1 ); /* Q15 */ + rc_Q31 = silk_LSHIFT32( rc_Q31, 16 ); /* Q31 */ + if( num < 0 ) { + /* Ensure adjusted reflection coefficients has the original sign */ + rc_Q31 = -rc_Q31; + } + } + invGain_Q30 = minInvGain_Q30; + reached_max_gain = 1; + } else { + invGain_Q30 = tmp1; + } + + /* Update the AR coefficients */ + for( k = 0; k < (n + 1) >> 1; k++ ) { + tmp1 = Af_QA[ k ]; /* QA */ + tmp2 = Af_QA[ n - k - 1 ]; /* QA */ + Af_QA[ k ] = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 ); /* QA */ + Af_QA[ n - k - 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 ); /* QA */ + } + Af_QA[ n ] = silk_RSHIFT32( rc_Q31, 31 - QA ); /* QA */ + + if( reached_max_gain ) { + /* Reached max prediction gain; set remaining coefficients to zero and exit loop */ + for( k = n + 1; k < D; k++ ) { + Af_QA[ k ] = 0; + } + break; + } + + /* Update C * Af and C * Ab */ + for( k = 0; k <= n + 1; k++ ) { + tmp1 = CAf[ k ]; /* Q( -rshifts ) */ + tmp2 = CAb[ n - k + 1 ]; /* Q( -rshifts ) */ + CAf[ k ] = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 ); /* Q( -rshifts ) */ + CAb[ n - k + 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 ); /* Q( -rshifts ) */ + } + } + + if( reached_max_gain ) { + for( k = 0; k < D; k++ ) { + /* Scale coefficients */ + A_Q16[ k ] = -silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); + } + /* Subtract energy of preceding samples from C0 */ + if( rshifts > 0 ) { + for( s = 0; s < nb_subfr; s++ ) { + x_ptr = x + s * subfr_length; + C0 -= (opus_int32)silk_RSHIFT64( silk_inner_prod16( x_ptr, x_ptr, D, arch ), rshifts ); + } + } else { + for( s = 0; s < nb_subfr; s++ ) { + x_ptr = x + s * subfr_length; + C0 -= silk_LSHIFT32( silk_inner_prod_aligned( x_ptr, x_ptr, D, arch ), -rshifts ); + } + } + /* Approximate residual energy */ + *res_nrg = silk_LSHIFT( silk_SMMUL( invGain_Q30, C0 ), 2 ); + *res_nrg_Q = -rshifts; + } else { + /* Return residual energy */ + nrg = CAf[ 0 ]; /* Q( -rshifts ) */ + tmp1 = (opus_int32)1 << 16; /* Q16 */ + for( k = 0; k < D; k++ ) { + Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); /* Q16 */ + nrg = silk_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 ); /* Q( -rshifts ) */ + tmp1 = silk_SMLAWW( tmp1, Atmp1, Atmp1 ); /* Q16 */ + A_Q16[ k ] = -Atmp1; + } + *res_nrg = silk_SMLAWW( nrg, silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ), -tmp1 );/* Q( -rshifts ) */ + *res_nrg_Q = -rshifts; + } + +#ifdef OPUS_CHECK_ASM + { + opus_int32 res_nrg_c = 0; + opus_int res_nrg_Q_c = 0; + opus_int32 A_Q16_c[ MAX_LPC_ORDER ] = {0}; + + silk_burg_modified_c( + &res_nrg_c, + &res_nrg_Q_c, + A_Q16_c, + x, + minInvGain_Q30, + subfr_length, + nb_subfr, + D, + 0 + ); + + silk_assert( *res_nrg == res_nrg_c ); + silk_assert( *res_nrg_Q == res_nrg_Q_c ); + silk_assert( !memcmp( A_Q16, A_Q16_c, D * sizeof( *A_Q16 ) ) ); + } +#endif +} diff --git a/libs/libopus/silk/fixed/x86/prefilter_FIX_sse.c b/libs/libopus/silk/fixed/x86/prefilter_FIX_sse.c deleted file mode 100644 index 488a603f5..000000000 --- a/libs/libopus/silk/fixed/x86/prefilter_FIX_sse.c +++ /dev/null @@ -1,160 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "main.h" -#include "celt/x86/x86cpu.h" - -void silk_warped_LPC_analysis_filter_FIX_sse4_1( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -) -{ - opus_int n, i; - opus_int32 acc_Q11, tmp1, tmp2; - - /* Order must be even */ - silk_assert( ( order & 1 ) == 0 ); - - if (order == 10) - { - if (0 == lambda_Q16) - { - __m128i coef_Q13_3210, coef_Q13_7654; - __m128i coef_Q13_0123, coef_Q13_4567; - __m128i state_0123, state_4567; - __m128i xmm_product1, xmm_product2; - __m128i xmm_tempa, xmm_tempb; - - register opus_int32 sum; - register opus_int32 state_8, state_9, state_a; - register opus_int64 coef_Q13_8, coef_Q13_9; - - silk_assert( length > 0 ); - - coef_Q13_3210 = OP_CVTEPI16_EPI32_M64( &coef_Q13[ 0 ] ); - coef_Q13_7654 = OP_CVTEPI16_EPI32_M64( &coef_Q13[ 4 ] ); - - coef_Q13_0123 = _mm_shuffle_epi32( coef_Q13_3210, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - coef_Q13_4567 = _mm_shuffle_epi32( coef_Q13_7654, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - - coef_Q13_8 = (opus_int64) coef_Q13[ 8 ]; - coef_Q13_9 = (opus_int64) coef_Q13[ 9 ]; - - state_0123 = _mm_loadu_si128( (__m128i *)(&state[ 0 ] ) ); - state_4567 = _mm_loadu_si128( (__m128i *)(&state[ 4 ] ) ); - - state_0123 = _mm_shuffle_epi32( state_0123, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - state_4567 = _mm_shuffle_epi32( state_4567, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - - state_8 = state[ 8 ]; - state_9 = state[ 9 ]; - state_a = 0; - - for( n = 0; n < length; n++ ) - { - xmm_product1 = _mm_mul_epi32( coef_Q13_0123, state_0123 ); /* 64-bit multiply, only 2 pairs */ - xmm_product2 = _mm_mul_epi32( coef_Q13_4567, state_4567 ); - - xmm_tempa = _mm_shuffle_epi32( state_0123, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - xmm_tempb = _mm_shuffle_epi32( state_4567, _MM_SHUFFLE( 0, 1, 2, 3 ) ); - - xmm_product1 = _mm_srli_epi64( xmm_product1, 16 ); /* >> 16, zero extending works */ - xmm_product2 = _mm_srli_epi64( xmm_product2, 16 ); - - xmm_tempa = _mm_mul_epi32( coef_Q13_3210, xmm_tempa ); - xmm_tempb = _mm_mul_epi32( coef_Q13_7654, xmm_tempb ); - - xmm_tempa = _mm_srli_epi64( xmm_tempa, 16 ); - xmm_tempb = _mm_srli_epi64( xmm_tempb, 16 ); - - xmm_tempa = _mm_add_epi32( xmm_tempa, xmm_product1 ); - xmm_tempb = _mm_add_epi32( xmm_tempb, xmm_product2 ); - xmm_tempa = _mm_add_epi32( xmm_tempa, xmm_tempb ); - - sum = (coef_Q13_8 * state_8) >> 16; - sum += (coef_Q13_9 * state_9) >> 16; - - xmm_tempa = _mm_add_epi32( xmm_tempa, _mm_shuffle_epi32( xmm_tempa, _MM_SHUFFLE( 0, 0, 0, 2 ) ) ); - sum += _mm_cvtsi128_si32( xmm_tempa); - res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROUND( ( 5 + sum ), 9); - - /* move right */ - state_a = state_9; - state_9 = state_8; - state_8 = _mm_cvtsi128_si32( state_4567 ); - state_4567 = _mm_alignr_epi8( state_0123, state_4567, 4 ); - - state_0123 = _mm_alignr_epi8( _mm_cvtsi32_si128( silk_LSHIFT( input[ n ], 14 ) ), state_0123, 4 ); - } - - _mm_storeu_si128( (__m128i *)( &state[ 0 ] ), _mm_shuffle_epi32( state_0123, _MM_SHUFFLE( 0, 1, 2, 3 ) ) ); - _mm_storeu_si128( (__m128i *)( &state[ 4 ] ), _mm_shuffle_epi32( state_4567, _MM_SHUFFLE( 0, 1, 2, 3 ) ) ); - state[ 8 ] = state_8; - state[ 9 ] = state_9; - state[ 10 ] = state_a; - - return; - } - } - - for( n = 0; n < length; n++ ) { - /* Output of lowpass section */ - tmp2 = silk_SMLAWB( state[ 0 ], state[ 1 ], lambda_Q16 ); - state[ 0 ] = silk_LSHIFT( input[ n ], 14 ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( state[ 1 ], state[ 2 ] - tmp2, lambda_Q16 ); - state[ 1 ] = tmp2; - acc_Q11 = silk_RSHIFT( order, 1 ); - acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ 0 ] ); - /* Loop over allpass sections */ - for( i = 2; i < order; i += 2 ) { - /* Output of allpass section */ - tmp2 = silk_SMLAWB( state[ i ], state[ i + 1 ] - tmp1, lambda_Q16 ); - state[ i ] = tmp1; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ i - 1 ] ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q16 ); - state[ i + 1 ] = tmp2; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] ); - } - state[ order ] = tmp1; - acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] ); - res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROUND( acc_Q11, 9 ); - } -} diff --git a/libs/libopus/silk/fixed/x86/vector_ops_FIX_sse.c b/libs/libopus/silk/fixed/x86/vector_ops_FIX_sse.c deleted file mode 100644 index c1e90564d..000000000 --- a/libs/libopus/silk/fixed/x86/vector_ops_FIX_sse.c +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "main.h" - -#include "SigProc_FIX.h" -#include "pitch.h" - -opus_int64 silk_inner_prod16_aligned_64_sse4_1( - const opus_int16 *inVec1, /* I input vector 1 */ - const opus_int16 *inVec2, /* I input vector 2 */ - const opus_int len /* I vector lengths */ -) -{ - opus_int i, dataSize8; - opus_int64 sum; - - __m128i xmm_tempa; - __m128i inVec1_76543210, acc1; - __m128i inVec2_76543210, acc2; - - sum = 0; - dataSize8 = len & ~7; - - acc1 = _mm_setzero_si128(); - acc2 = _mm_setzero_si128(); - - for( i = 0; i < dataSize8; i += 8 ) { - inVec1_76543210 = _mm_loadu_si128( (__m128i *)(&inVec1[i + 0] ) ); - inVec2_76543210 = _mm_loadu_si128( (__m128i *)(&inVec2[i + 0] ) ); - - /* only when all 4 operands are -32768 (0x8000), this results in wrap around */ - inVec1_76543210 = _mm_madd_epi16( inVec1_76543210, inVec2_76543210 ); - - xmm_tempa = _mm_cvtepi32_epi64( inVec1_76543210 ); - /* equal shift right 8 bytes */ - inVec1_76543210 = _mm_shuffle_epi32( inVec1_76543210, _MM_SHUFFLE( 0, 0, 3, 2 ) ); - inVec1_76543210 = _mm_cvtepi32_epi64( inVec1_76543210 ); - - acc1 = _mm_add_epi64( acc1, xmm_tempa ); - acc2 = _mm_add_epi64( acc2, inVec1_76543210 ); - } - - acc1 = _mm_add_epi64( acc1, acc2 ); - - /* equal shift right 8 bytes */ - acc2 = _mm_shuffle_epi32( acc1, _MM_SHUFFLE( 0, 0, 3, 2 ) ); - acc1 = _mm_add_epi64( acc1, acc2 ); - - _mm_storel_epi64( (__m128i *)&sum, acc1 ); - - for( ; i < len; i++ ) { - sum = silk_SMLABB( sum, inVec1[ i ], inVec2[ i ] ); - } - - return sum; -} diff --git a/libs/libopus/silk/fixed/x86/vector_ops_FIX_sse4_1.c b/libs/libopus/silk/fixed/x86/vector_ops_FIX_sse4_1.c new file mode 100644 index 000000000..0cfb08d90 --- /dev/null +++ b/libs/libopus/silk/fixed/x86/vector_ops_FIX_sse4_1.c @@ -0,0 +1,92 @@ +/* Copyright (c) 2014, Cisco Systems, INC + Written by XiangMingZhu WeiZhou MinPeng YanWang + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include +#include "main.h" + +#include "SigProc_FIX.h" +#include "pitch.h" + +opus_int64 silk_inner_prod16_sse4_1( + const opus_int16 *inVec1, /* I input vector 1 */ + const opus_int16 *inVec2, /* I input vector 2 */ + const opus_int len /* I vector lengths */ +) +{ + opus_int i, dataSize4; + opus_int64 sum; + + __m128i xmm_prod_20, xmm_prod_31; + __m128i inVec1_3210, acc1; + __m128i inVec2_3210, acc2; + + sum = 0; + dataSize4 = len & ~3; + + acc1 = _mm_setzero_si128(); + acc2 = _mm_setzero_si128(); + + for( i = 0; i < dataSize4; i += 4 ) { + inVec1_3210 = OP_CVTEPI16_EPI32_M64( &inVec1[i + 0] ); + inVec2_3210 = OP_CVTEPI16_EPI32_M64( &inVec2[i + 0] ); + xmm_prod_20 = _mm_mul_epi32( inVec1_3210, inVec2_3210 ); + + inVec1_3210 = _mm_shuffle_epi32( inVec1_3210, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + inVec2_3210 = _mm_shuffle_epi32( inVec2_3210, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + xmm_prod_31 = _mm_mul_epi32( inVec1_3210, inVec2_3210 ); + + acc1 = _mm_add_epi64( acc1, xmm_prod_20 ); + acc2 = _mm_add_epi64( acc2, xmm_prod_31 ); + } + + acc1 = _mm_add_epi64( acc1, acc2 ); + + /* equal shift right 8 bytes */ + acc2 = _mm_shuffle_epi32( acc1, _MM_SHUFFLE( 0, 0, 3, 2 ) ); + acc1 = _mm_add_epi64( acc1, acc2 ); + + _mm_storel_epi64( (__m128i *)&sum, acc1 ); + + for( ; i < len; i++ ) { + sum = silk_SMLABB( sum, inVec1[ i ], inVec2[ i ] ); + } + +#ifdef OPUS_CHECK_ASM + { + opus_int64 sum_c = silk_inner_prod16_c( inVec1, inVec2, len ); + silk_assert( sum == sum_c ); + } +#endif + + return sum; +} diff --git a/libs/libopus/silk/float/LPC_analysis_filter_FLP.c b/libs/libopus/silk/float/LPC_analysis_filter_FLP.c index cae89a0a1..0e1a1fed0 100644 --- a/libs/libopus/silk/float/LPC_analysis_filter_FLP.c +++ b/libs/libopus/silk/float/LPC_analysis_filter_FLP.c @@ -215,7 +215,7 @@ void silk_LPC_analysis_filter_FLP( const opus_int Order /* I LPC order */ ) { - silk_assert( Order <= length ); + celt_assert( Order <= length ); switch( Order ) { case 6: @@ -239,7 +239,7 @@ void silk_LPC_analysis_filter_FLP( break; default: - silk_assert( 0 ); + celt_assert( 0 ); break; } diff --git a/libs/libopus/silk/float/LPC_inv_pred_gain_FLP.c b/libs/libopus/silk/float/LPC_inv_pred_gain_FLP.c index 25178bacd..2be2122d6 100644 --- a/libs/libopus/silk/float/LPC_inv_pred_gain_FLP.c +++ b/libs/libopus/silk/float/LPC_inv_pred_gain_FLP.c @@ -31,8 +31,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "SigProc_FIX.h" #include "SigProc_FLP.h" - -#define RC_THRESHOLD 0.9999f +#include "define.h" /* compute inverse of LPC prediction gain, and */ /* test if LPC coefficients are stable (all poles within unit circle) */ @@ -43,34 +42,32 @@ silk_float silk_LPC_inverse_pred_gain_FLP( /* O return inverse prediction ga ) { opus_int k, n; - double invGain, rc, rc_mult1, rc_mult2; - silk_float Atmp[ 2 ][ SILK_MAX_ORDER_LPC ]; - silk_float *Aold, *Anew; + double invGain, rc, rc_mult1, rc_mult2, tmp1, tmp2; + silk_float Atmp[ SILK_MAX_ORDER_LPC ]; - Anew = Atmp[ order & 1 ]; - silk_memcpy( Anew, A, order * sizeof(silk_float) ); + silk_memcpy( Atmp, A, order * sizeof(silk_float) ); invGain = 1.0; for( k = order - 1; k > 0; k-- ) { - rc = -Anew[ k ]; - if( rc > RC_THRESHOLD || rc < -RC_THRESHOLD ) { + rc = -Atmp[ k ]; + rc_mult1 = 1.0f - rc * rc; + invGain *= rc_mult1; + if( invGain * MAX_PREDICTION_POWER_GAIN < 1.0f ) { return 0.0f; } - rc_mult1 = 1.0f - rc * rc; rc_mult2 = 1.0f / rc_mult1; - invGain *= rc_mult1; - /* swap pointers */ - Aold = Anew; - Anew = Atmp[ k & 1 ]; - for( n = 0; n < k; n++ ) { - Anew[ n ] = (silk_float)( ( Aold[ n ] - Aold[ k - n - 1 ] * rc ) * rc_mult2 ); + for( n = 0; n < (k + 1) >> 1; n++ ) { + tmp1 = Atmp[ n ]; + tmp2 = Atmp[ k - n - 1 ]; + Atmp[ n ] = (silk_float)( ( tmp1 - tmp2 * rc ) * rc_mult2 ); + Atmp[ k - n - 1 ] = (silk_float)( ( tmp2 - tmp1 * rc ) * rc_mult2 ); } } - rc = -Anew[ 0 ]; - if( rc > RC_THRESHOLD || rc < -RC_THRESHOLD ) { - return 0.0f; - } + rc = -Atmp[ 0 ]; rc_mult1 = 1.0f - rc * rc; invGain *= rc_mult1; + if( invGain * MAX_PREDICTION_POWER_GAIN < 1.0f ) { + return 0.0f; + } return (silk_float)invGain; } diff --git a/libs/libopus/silk/float/SigProc_FLP.h b/libs/libopus/silk/float/SigProc_FLP.h index f0cb3733b..953de8b09 100644 --- a/libs/libopus/silk/float/SigProc_FLP.h +++ b/libs/libopus/silk/float/SigProc_FLP.h @@ -68,13 +68,6 @@ void silk_k2a_FLP( opus_int32 order /* I prediction order */ ); -/* Solve the normal equations using the Levinson-Durbin recursion */ -silk_float silk_levinsondurbin_FLP( /* O prediction error energy */ - silk_float A[], /* O prediction coefficients [order] */ - const silk_float corr[], /* I input auto-correlations [order + 1] */ - const opus_int order /* I prediction order */ -); - /* compute autocorrelation */ void silk_autocorrelation_FLP( silk_float *results, /* O result (length correlationCount) */ diff --git a/libs/libopus/silk/float/apply_sine_window_FLP.c b/libs/libopus/silk/float/apply_sine_window_FLP.c index 6aae57c0a..e49e71799 100644 --- a/libs/libopus/silk/float/apply_sine_window_FLP.c +++ b/libs/libopus/silk/float/apply_sine_window_FLP.c @@ -45,10 +45,10 @@ void silk_apply_sine_window_FLP( opus_int k; silk_float freq, c, S0, S1; - silk_assert( win_type == 1 || win_type == 2 ); + celt_assert( win_type == 1 || win_type == 2 ); /* Length must be multiple of 4 */ - silk_assert( ( length & 3 ) == 0 ); + celt_assert( ( length & 3 ) == 0 ); freq = PI / ( length + 1 ); diff --git a/libs/libopus/silk/float/burg_modified_FLP.c b/libs/libopus/silk/float/burg_modified_FLP.c index ea5dc25a9..756b76a35 100644 --- a/libs/libopus/silk/float/burg_modified_FLP.c +++ b/libs/libopus/silk/float/burg_modified_FLP.c @@ -52,7 +52,7 @@ silk_float silk_burg_modified_FLP( /* O returns residual energy double CAf[ SILK_MAX_ORDER_LPC + 1 ], CAb[ SILK_MAX_ORDER_LPC + 1 ]; double Af[ SILK_MAX_ORDER_LPC ]; - silk_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); + celt_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE ); /* Compute autocorrelations, added over subframes */ C0 = silk_energy_FLP( x, nb_subfr * subfr_length ); diff --git a/libs/libopus/silk/float/encode_frame_FLP.c b/libs/libopus/silk/float/encode_frame_FLP.c index 2092a4d9e..b029c3f5c 100644 --- a/libs/libopus/silk/float/encode_frame_FLP.c +++ b/libs/libopus/silk/float/encode_frame_FLP.c @@ -29,6 +29,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "config.h" #endif +#include #include "main_FLP.h" #include "tuning_parameters.h" @@ -41,21 +42,28 @@ static OPUS_INLINE void silk_LBRR_encode_FLP( ); void silk_encode_do_VAD_FLP( - silk_encoder_state_FLP *psEnc /* I/O Encoder state FLP */ + silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ + opus_int activity /* I Decision of Opus voice activity detector */ ) { + const opus_int activity_threshold = SILK_FIX_CONST( SPEECH_ACTIVITY_DTX_THRES, 8 ); + /****************************/ /* Voice Activity Detection */ /****************************/ silk_VAD_GetSA_Q8( &psEnc->sCmn, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.arch ); + /* If Opus VAD is inactive and Silk VAD is active: lower Silk VAD to just under the threshold */ + if( activity == VAD_NO_ACTIVITY && psEnc->sCmn.speech_activity_Q8 >= activity_threshold ) { + psEnc->sCmn.speech_activity_Q8 = activity_threshold - 1; + } /**************************************************/ /* Convert speech activity into VAD and DTX flags */ /**************************************************/ - if( psEnc->sCmn.speech_activity_Q8 < SILK_FIX_CONST( SPEECH_ACTIVITY_DTX_THRES, 8 ) ) { + if( psEnc->sCmn.speech_activity_Q8 < activity_threshold ) { psEnc->sCmn.indices.signalType = TYPE_NO_VOICE_ACTIVITY; psEnc->sCmn.noSpeechCounter++; - if( psEnc->sCmn.noSpeechCounter < NB_SPEECH_FRAMES_BEFORE_DTX ) { + if( psEnc->sCmn.noSpeechCounter <= NB_SPEECH_FRAMES_BEFORE_DTX ) { psEnc->sCmn.inDTX = 0; } else if( psEnc->sCmn.noSpeechCounter > MAX_CONSECUTIVE_DTX + NB_SPEECH_FRAMES_BEFORE_DTX ) { psEnc->sCmn.noSpeechCounter = NB_SPEECH_FRAMES_BEFORE_DTX; @@ -85,7 +93,6 @@ opus_int silk_encode_frame_FLP( silk_encoder_control_FLP sEncCtrl; opus_int i, iter, maxIter, found_upper, found_lower, ret = 0; silk_float *x_frame, *res_pitch_frame; - silk_float xfw[ MAX_FRAME_LENGTH ]; silk_float res_pitch[ 2 * MAX_FRAME_LENGTH + LA_PITCH_MAX ]; ec_enc sRangeEnc_copy, sRangeEnc_copy2; silk_nsq_state sNSQ_copy, sNSQ_copy2; @@ -97,6 +104,9 @@ opus_int silk_encode_frame_FLP( opus_int8 LastGainIndex_copy2; opus_int32 pGains_Q16[ MAX_NB_SUBFR ]; opus_uint8 ec_buf_copy[ 1275 ]; + opus_int gain_lock[ MAX_NB_SUBFR ] = {0}; + opus_int16 best_gain_mult[ MAX_NB_SUBFR ]; + opus_int best_sum[ MAX_NB_SUBFR ]; /* This is totally unnecessary but many compilers (including gcc) are too dumb to realise it */ LastGainIndex_copy2 = nBits_lower = nBits_upper = gainMult_lower = gainMult_upper = 0; @@ -139,22 +149,17 @@ opus_int silk_encode_frame_FLP( /***************************************************/ /* Find linear prediction coefficients (LPC + LTP) */ /***************************************************/ - silk_find_pred_coefs_FLP( psEnc, &sEncCtrl, res_pitch, x_frame, condCoding ); + silk_find_pred_coefs_FLP( psEnc, &sEncCtrl, res_pitch_frame, x_frame, condCoding ); /****************************************/ /* Process gains */ /****************************************/ silk_process_gains_FLP( psEnc, &sEncCtrl, condCoding ); - /*****************************************/ - /* Prefiltering for noise shaper */ - /*****************************************/ - silk_prefilter_FLP( psEnc, &sEncCtrl, xfw, x_frame ); - /****************************************/ /* Low Bitrate Redundant Encoding */ /****************************************/ - silk_LBRR_encode_FLP( psEnc, &sEncCtrl, xfw, condCoding ); + silk_LBRR_encode_FLP( psEnc, &sEncCtrl, x_frame, condCoding ); /* Loop over quantizer and entroy coding to control bitrate */ maxIter = 6; @@ -188,7 +193,11 @@ opus_int silk_encode_frame_FLP( /*****************************************/ /* Noise shaping quantization */ /*****************************************/ - silk_NSQ_wrapper_FLP( psEnc, &sEncCtrl, &psEnc->sCmn.indices, &psEnc->sCmn.sNSQ, psEnc->sCmn.pulses, xfw ); + silk_NSQ_wrapper_FLP( psEnc, &sEncCtrl, &psEnc->sCmn.indices, &psEnc->sCmn.sNSQ, psEnc->sCmn.pulses, x_frame ); + + if ( iter == maxIter && !found_lower ) { + silk_memcpy( &sRangeEnc_copy2, psRangeEnc, sizeof( ec_enc ) ); + } /****************************************/ /* Encode Parameters */ @@ -203,6 +212,33 @@ opus_int silk_encode_frame_FLP( nBits = ec_tell( psRangeEnc ); + /* If we still bust after the last iteration, do some damage control. */ + if ( iter == maxIter && !found_lower && nBits > maxBits ) { + silk_memcpy( psRangeEnc, &sRangeEnc_copy2, sizeof( ec_enc ) ); + + /* Keep gains the same as the last frame. */ + psEnc->sShape.LastGainIndex = sEncCtrl.lastGainIndexPrev; + for ( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { + psEnc->sCmn.indices.GainsIndices[ i ] = 4; + } + if (condCoding != CODE_CONDITIONALLY) { + psEnc->sCmn.indices.GainsIndices[ 0 ] = sEncCtrl.lastGainIndexPrev; + } + psEnc->sCmn.ec_prevLagIndex = ec_prevLagIndex_copy; + psEnc->sCmn.ec_prevSignalType = ec_prevSignalType_copy; + /* Clear all pulses. */ + for ( i = 0; i < psEnc->sCmn.frame_length; i++ ) { + psEnc->sCmn.pulses[ i ] = 0; + } + + silk_encode_indices( &psEnc->sCmn, psRangeEnc, psEnc->sCmn.nFramesEncoded, 0, condCoding ); + + silk_encode_pulses( psRangeEnc, psEnc->sCmn.indices.signalType, psEnc->sCmn.indices.quantOffsetType, + psEnc->sCmn.pulses, psEnc->sCmn.frame_length ); + + nBits = ec_tell( psRangeEnc ); + } + if( useCBR == 0 && iter == 0 && nBits <= maxBits ) { break; } @@ -212,7 +248,7 @@ opus_int silk_encode_frame_FLP( if( found_lower && ( gainsID == gainsID_lower || nBits > maxBits ) ) { /* Restore output state from earlier iteration that did meet the bitrate budget */ silk_memcpy( psRangeEnc, &sRangeEnc_copy2, sizeof( ec_enc ) ); - silk_assert( sRangeEnc_copy2.offs <= 1275 ); + celt_assert( sRangeEnc_copy2.offs <= 1275 ); silk_memcpy( psRangeEnc->buf, ec_buf_copy, sRangeEnc_copy2.offs ); silk_memcpy( &psEnc->sCmn.sNSQ, &sNSQ_copy2, sizeof( silk_nsq_state ) ); psEnc->sShape.LastGainIndex = LastGainIndex_copy2; @@ -223,7 +259,9 @@ opus_int silk_encode_frame_FLP( if( nBits > maxBits ) { if( found_lower == 0 && iter >= 2 ) { /* Adjust the quantizer's rate/distortion tradeoff and discard previous "upper" results */ - sEncCtrl.Lambda *= 1.5f; + sEncCtrl.Lambda = silk_max_float(sEncCtrl.Lambda*1.5f, 1.5f); + /* Reducing dithering can help us hit the target. */ + psEnc->sCmn.indices.quantOffsetType = 0; found_upper = 0; gainsID_upper = -1; } else { @@ -240,7 +278,7 @@ opus_int silk_encode_frame_FLP( gainsID_lower = gainsID; /* Copy part of the output state */ silk_memcpy( &sRangeEnc_copy2, psRangeEnc, sizeof( ec_enc ) ); - silk_assert( psRangeEnc->offs <= 1275 ); + celt_assert( psRangeEnc->offs <= 1275 ); silk_memcpy( ec_buf_copy, psRangeEnc->buf, psRangeEnc->offs ); silk_memcpy( &sNSQ_copy2, &psEnc->sCmn.sNSQ, sizeof( silk_nsq_state ) ); LastGainIndex_copy2 = psEnc->sShape.LastGainIndex; @@ -250,15 +288,34 @@ opus_int silk_encode_frame_FLP( break; } + if ( !found_lower && nBits > maxBits ) { + int j; + for ( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { + int sum=0; + for ( j = i*psEnc->sCmn.subfr_length; j < (i+1)*psEnc->sCmn.subfr_length; j++ ) { + sum += abs( psEnc->sCmn.pulses[j] ); + } + if ( iter == 0 || (sum < best_sum[i] && !gain_lock[i]) ) { + best_sum[i] = sum; + best_gain_mult[i] = gainMult_Q8; + } else { + gain_lock[i] = 1; + } + } + } if( ( found_lower & found_upper ) == 0 ) { /* Adjust gain according to high-rate rate/distortion curve */ - opus_int32 gain_factor_Q16; - gain_factor_Q16 = silk_log2lin( silk_LSHIFT( nBits - maxBits, 7 ) / psEnc->sCmn.frame_length + SILK_FIX_CONST( 16, 7 ) ); - gain_factor_Q16 = silk_min_32( gain_factor_Q16, SILK_FIX_CONST( 2, 16 ) ); if( nBits > maxBits ) { - gain_factor_Q16 = silk_max_32( gain_factor_Q16, SILK_FIX_CONST( 1.3, 16 ) ); + if (gainMult_Q8 < 16384) { + gainMult_Q8 *= 2; + } else { + gainMult_Q8 = 32767; + } + } else { + opus_int32 gain_factor_Q16; + gain_factor_Q16 = silk_log2lin( silk_LSHIFT( nBits - maxBits, 7 ) / psEnc->sCmn.frame_length + SILK_FIX_CONST( 16, 7 ) ); + gainMult_Q8 = silk_SMULWB( gain_factor_Q16, gainMult_Q8 ); } - gainMult_Q8 = silk_SMULWB( gain_factor_Q16, gainMult_Q8 ); } else { /* Adjust gain by interpolating */ gainMult_Q8 = gainMult_lower + ( ( gainMult_upper - gainMult_lower ) * ( maxBits - nBits_lower ) ) / ( nBits_upper - nBits_lower ); @@ -272,7 +329,13 @@ opus_int silk_encode_frame_FLP( } for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { - pGains_Q16[ i ] = silk_LSHIFT_SAT32( silk_SMULWB( sEncCtrl.GainsUnq_Q16[ i ], gainMult_Q8 ), 8 ); + opus_int16 tmp; + if ( gain_lock[i] ) { + tmp = best_gain_mult[i]; + } else { + tmp = gainMult_Q8; + } + pGains_Q16[ i ] = silk_LSHIFT_SAT32( silk_SMULWB( sEncCtrl.GainsUnq_Q16[ i ], tmp ), 8 ); } /* Quantize gains */ diff --git a/libs/libopus/silk/float/energy_FLP.c b/libs/libopus/silk/float/energy_FLP.c index 24b8179f9..7bc7173c9 100644 --- a/libs/libopus/silk/float/energy_FLP.c +++ b/libs/libopus/silk/float/energy_FLP.c @@ -37,13 +37,12 @@ double silk_energy_FLP( opus_int dataSize ) { - opus_int i, dataSize4; + opus_int i; double result; /* 4x unrolled loop */ result = 0.0; - dataSize4 = dataSize & 0xFFFC; - for( i = 0; i < dataSize4; i += 4 ) { + for( i = 0; i < dataSize - 3; i += 4 ) { result += data[ i + 0 ] * (double)data[ i + 0 ] + data[ i + 1 ] * (double)data[ i + 1 ] + data[ i + 2 ] * (double)data[ i + 2 ] + diff --git a/libs/libopus/silk/float/find_LPC_FLP.c b/libs/libopus/silk/float/find_LPC_FLP.c index fcfe1c368..fa3ffe7f8 100644 --- a/libs/libopus/silk/float/find_LPC_FLP.c +++ b/libs/libopus/silk/float/find_LPC_FLP.c @@ -73,7 +73,7 @@ void silk_find_LPC_FLP( silk_interpolate( NLSF0_Q15, psEncC->prev_NLSFq_Q15, NLSF_Q15, k, psEncC->predictLPCOrder ); /* Convert to LPC for residual energy evaluation */ - silk_NLSF2A_FLP( a_tmp, NLSF0_Q15, psEncC->predictLPCOrder ); + silk_NLSF2A_FLP( a_tmp, NLSF0_Q15, psEncC->predictLPCOrder, psEncC->arch ); /* Calculate residual energy with LSF interpolation */ silk_LPC_analysis_filter_FLP( LPC_res, a_tmp, x, 2 * subfr_length, psEncC->predictLPCOrder ); @@ -99,6 +99,6 @@ void silk_find_LPC_FLP( silk_A2NLSF_FLP( NLSF_Q15, a, psEncC->predictLPCOrder ); } - silk_assert( psEncC->indices.NLSFInterpCoef_Q2 == 4 || + celt_assert( psEncC->indices.NLSFInterpCoef_Q2 == 4 || ( psEncC->useInterpolatedNLSFs && !psEncC->first_frame_after_reset && psEncC->nb_subfr == MAX_NB_SUBFR ) ); } diff --git a/libs/libopus/silk/float/find_LTP_FLP.c b/libs/libopus/silk/float/find_LTP_FLP.c index 722999601..f97064930 100644 --- a/libs/libopus/silk/float/find_LTP_FLP.c +++ b/libs/libopus/silk/float/find_LTP_FLP.c @@ -33,100 +33,32 @@ POSSIBILITY OF SUCH DAMAGE. #include "tuning_parameters.h" void silk_find_LTP_FLP( - silk_float b[ MAX_NB_SUBFR * LTP_ORDER ], /* O LTP coefs */ - silk_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ - silk_float *LTPredCodGain, /* O LTP coding gain */ - const silk_float r_lpc[], /* I LPC residual */ - const opus_int lag[ MAX_NB_SUBFR ], /* I LTP lags */ - const silk_float Wght[ MAX_NB_SUBFR ], /* I Weights */ + silk_float XX[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ + silk_float xX[ MAX_NB_SUBFR * LTP_ORDER ], /* O Weight for LTP quantization */ + const silk_float r_ptr[], /* I LPC residual */ + const opus_int lag[ MAX_NB_SUBFR ], /* I LTP lags */ const opus_int subfr_length, /* I Subframe length */ - const opus_int nb_subfr, /* I number of subframes */ - const opus_int mem_offset /* I Number of samples in LTP memory */ + const opus_int nb_subfr /* I number of subframes */ ) { - opus_int i, k; - silk_float *b_ptr, temp, *WLTP_ptr; - silk_float LPC_res_nrg, LPC_LTP_res_nrg; - silk_float d[ MAX_NB_SUBFR ], m, g, delta_b[ LTP_ORDER ]; - silk_float w[ MAX_NB_SUBFR ], nrg[ MAX_NB_SUBFR ], regu; - silk_float Rr[ LTP_ORDER ], rr[ MAX_NB_SUBFR ]; - const silk_float *r_ptr, *lag_ptr; + opus_int k; + silk_float *xX_ptr, *XX_ptr; + const silk_float *lag_ptr; + silk_float xx, temp; - b_ptr = b; - WLTP_ptr = WLTP; - r_ptr = &r_lpc[ mem_offset ]; + xX_ptr = xX; + XX_ptr = XX; for( k = 0; k < nb_subfr; k++ ) { lag_ptr = r_ptr - ( lag[ k ] + LTP_ORDER / 2 ); + silk_corrMatrix_FLP( lag_ptr, subfr_length, LTP_ORDER, XX_ptr ); + silk_corrVector_FLP( lag_ptr, r_ptr, subfr_length, LTP_ORDER, xX_ptr ); + xx = ( silk_float )silk_energy_FLP( r_ptr, subfr_length + LTP_ORDER ); + temp = 1.0f / silk_max( xx, LTP_CORR_INV_MAX * 0.5f * ( XX_ptr[ 0 ] + XX_ptr[ 24 ] ) + 1.0f ); + silk_scale_vector_FLP( XX_ptr, temp, LTP_ORDER * LTP_ORDER ); + silk_scale_vector_FLP( xX_ptr, temp, LTP_ORDER ); - silk_corrMatrix_FLP( lag_ptr, subfr_length, LTP_ORDER, WLTP_ptr ); - silk_corrVector_FLP( lag_ptr, r_ptr, subfr_length, LTP_ORDER, Rr ); - - rr[ k ] = ( silk_float )silk_energy_FLP( r_ptr, subfr_length ); - regu = 1.0f + rr[ k ] + - matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ) + - matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER ); - regu *= LTP_DAMPING / 3; - silk_regularize_correlations_FLP( WLTP_ptr, &rr[ k ], regu, LTP_ORDER ); - silk_solve_LDL_FLP( WLTP_ptr, LTP_ORDER, Rr, b_ptr ); - - /* Calculate residual energy */ - nrg[ k ] = silk_residual_energy_covar_FLP( b_ptr, WLTP_ptr, Rr, rr[ k ], LTP_ORDER ); - - temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); - silk_scale_vector_FLP( WLTP_ptr, temp, LTP_ORDER * LTP_ORDER ); - w[ k ] = matrix_ptr( WLTP_ptr, LTP_ORDER / 2, LTP_ORDER / 2, LTP_ORDER ); - - r_ptr += subfr_length; - b_ptr += LTP_ORDER; - WLTP_ptr += LTP_ORDER * LTP_ORDER; - } - - /* Compute LTP coding gain */ - if( LTPredCodGain != NULL ) { - LPC_LTP_res_nrg = 1e-6f; - LPC_res_nrg = 0.0f; - for( k = 0; k < nb_subfr; k++ ) { - LPC_res_nrg += rr[ k ] * Wght[ k ]; - LPC_LTP_res_nrg += nrg[ k ] * Wght[ k ]; - } - - silk_assert( LPC_LTP_res_nrg > 0 ); - *LTPredCodGain = 3.0f * silk_log2( LPC_res_nrg / LPC_LTP_res_nrg ); - } - - /* Smoothing */ - /* d = sum( B, 1 ); */ - b_ptr = b; - for( k = 0; k < nb_subfr; k++ ) { - d[ k ] = 0; - for( i = 0; i < LTP_ORDER; i++ ) { - d[ k ] += b_ptr[ i ]; - } - b_ptr += LTP_ORDER; - } - /* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */ - temp = 1e-3f; - for( k = 0; k < nb_subfr; k++ ) { - temp += w[ k ]; - } - m = 0; - for( k = 0; k < nb_subfr; k++ ) { - m += d[ k ] * w[ k ]; - } - m = m / temp; - - b_ptr = b; - for( k = 0; k < nb_subfr; k++ ) { - g = LTP_SMOOTHING / ( LTP_SMOOTHING + w[ k ] ) * ( m - d[ k ] ); - temp = 0; - for( i = 0; i < LTP_ORDER; i++ ) { - delta_b[ i ] = silk_max_float( b_ptr[ i ], 0.1f ); - temp += delta_b[ i ]; - } - temp = g / temp; - for( i = 0; i < LTP_ORDER; i++ ) { - b_ptr[ i ] = b_ptr[ i ] + delta_b[ i ] * temp; - } - b_ptr += LTP_ORDER; + r_ptr += subfr_length; + XX_ptr += LTP_ORDER * LTP_ORDER; + xX_ptr += LTP_ORDER; } } diff --git a/libs/libopus/silk/float/find_pitch_lags_FLP.c b/libs/libopus/silk/float/find_pitch_lags_FLP.c index f3b22d25c..dedbcd283 100644 --- a/libs/libopus/silk/float/find_pitch_lags_FLP.c +++ b/libs/libopus/silk/float/find_pitch_lags_FLP.c @@ -56,7 +56,7 @@ void silk_find_pitch_lags_FLP( buf_len = psEnc->sCmn.la_pitch + psEnc->sCmn.frame_length + psEnc->sCmn.ltp_mem_length; /* Safety check */ - silk_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length ); + celt_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length ); x_buf = x - psEnc->sCmn.ltp_mem_length; diff --git a/libs/libopus/silk/float/find_pred_coefs_FLP.c b/libs/libopus/silk/float/find_pred_coefs_FLP.c index 1af4fe5f1..dcf7c5202 100644 --- a/libs/libopus/silk/float/find_pred_coefs_FLP.c +++ b/libs/libopus/silk/float/find_pred_coefs_FLP.c @@ -41,8 +41,9 @@ void silk_find_pred_coefs_FLP( ) { opus_int i; - silk_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ]; - silk_float invGains[ MAX_NB_SUBFR ], Wght[ MAX_NB_SUBFR ]; + silk_float XXLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ]; + silk_float xXLTP[ MAX_NB_SUBFR * LTP_ORDER ]; + silk_float invGains[ MAX_NB_SUBFR ]; opus_int16 NLSF_Q15[ MAX_LPC_ORDER ]; const silk_float *x_ptr; silk_float *x_pre_ptr, LPC_in_pre[ MAX_NB_SUBFR * MAX_LPC_ORDER + MAX_FRAME_LENGTH ]; @@ -52,23 +53,20 @@ void silk_find_pred_coefs_FLP( for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { silk_assert( psEncCtrl->Gains[ i ] > 0.0f ); invGains[ i ] = 1.0f / psEncCtrl->Gains[ i ]; - Wght[ i ] = invGains[ i ] * invGains[ i ]; } if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /**********/ /* VOICED */ /**********/ - silk_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 ); + celt_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 ); /* LTP analysis */ - silk_find_LTP_FLP( psEncCtrl->LTPCoef, WLTP, &psEncCtrl->LTPredCodGain, res_pitch, - psEncCtrl->pitchL, Wght, psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr, psEnc->sCmn.ltp_mem_length ); + silk_find_LTP_FLP( XXLTP, xXLTP, res_pitch, psEncCtrl->pitchL, psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr ); /* Quantize LTP gain parameters */ silk_quant_LTP_gains_FLP( psEncCtrl->LTPCoef, psEnc->sCmn.indices.LTPIndex, &psEnc->sCmn.indices.PERIndex, - &psEnc->sCmn.sum_log_gain_Q7, WLTP, psEnc->sCmn.mu_LTP_Q9, psEnc->sCmn.LTPQuantLowComplexity, psEnc->sCmn.nb_subfr, - psEnc->sCmn.arch ); + &psEnc->sCmn.sum_log_gain_Q7, &psEncCtrl->LTPredCodGain, XXLTP, xXLTP, psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr, psEnc->sCmn.arch ); /* Control LTP scaling */ silk_LTP_scale_ctrl_FLP( psEnc, psEncCtrl, condCoding ); diff --git a/libs/libopus/silk/float/inner_product_FLP.c b/libs/libopus/silk/float/inner_product_FLP.c index 029c01291..cdd39d24c 100644 --- a/libs/libopus/silk/float/inner_product_FLP.c +++ b/libs/libopus/silk/float/inner_product_FLP.c @@ -38,13 +38,12 @@ double silk_inner_product_FLP( opus_int dataSize ) { - opus_int i, dataSize4; + opus_int i; double result; /* 4x unrolled loop */ result = 0.0; - dataSize4 = dataSize & 0xFFFC; - for( i = 0; i < dataSize4; i += 4 ) { + for( i = 0; i < dataSize - 3; i += 4 ) { result += data1[ i + 0 ] * (double)data2[ i + 0 ] + data1[ i + 1 ] * (double)data2[ i + 1 ] + data1[ i + 2 ] * (double)data2[ i + 2 ] + diff --git a/libs/libopus/silk/float/k2a_FLP.c b/libs/libopus/silk/float/k2a_FLP.c index 12af4e766..1448008db 100644 --- a/libs/libopus/silk/float/k2a_FLP.c +++ b/libs/libopus/silk/float/k2a_FLP.c @@ -39,15 +39,16 @@ void silk_k2a_FLP( ) { opus_int k, n; - silk_float Atmp[ SILK_MAX_ORDER_LPC ]; + silk_float rck, tmp1, tmp2; for( k = 0; k < order; k++ ) { - for( n = 0; n < k; n++ ) { - Atmp[ n ] = A[ n ]; + rck = rc[ k ]; + for( n = 0; n < (k + 1) >> 1; n++ ) { + tmp1 = A[ n ]; + tmp2 = A[ k - n - 1 ]; + A[ n ] = tmp1 + tmp2 * rck; + A[ k - n - 1 ] = tmp2 + tmp1 * rck; } - for( n = 0; n < k; n++ ) { - A[ n ] += Atmp[ k - n - 1 ] * rc[ k ]; - } - A[ k ] = -rc[ k ]; + A[ k ] = -rck; } } diff --git a/libs/libopus/silk/float/levinsondurbin_FLP.c b/libs/libopus/silk/float/levinsondurbin_FLP.c deleted file mode 100644 index f0ba60698..000000000 --- a/libs/libopus/silk/float/levinsondurbin_FLP.c +++ /dev/null @@ -1,81 +0,0 @@ -/*********************************************************************** -Copyright (c) 2006-2011, Skype Limited. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -- Neither the name of Internet Society, IETF or IETF Trust, nor the -names of specific contributors, may be used to endorse or promote -products derived from this software without specific prior written -permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -***********************************************************************/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "SigProc_FLP.h" - -/* Solve the normal equations using the Levinson-Durbin recursion */ -silk_float silk_levinsondurbin_FLP( /* O prediction error energy */ - silk_float A[], /* O prediction coefficients [order] */ - const silk_float corr[], /* I input auto-correlations [order + 1] */ - const opus_int order /* I prediction order */ -) -{ - opus_int i, mHalf, m; - silk_float min_nrg, nrg, t, km, Atmp1, Atmp2; - - min_nrg = 1e-12f * corr[ 0 ] + 1e-9f; - nrg = corr[ 0 ]; - nrg = silk_max_float(min_nrg, nrg); - A[ 0 ] = corr[ 1 ] / nrg; - nrg -= A[ 0 ] * corr[ 1 ]; - nrg = silk_max_float(min_nrg, nrg); - - for( m = 1; m < order; m++ ) - { - t = corr[ m + 1 ]; - for( i = 0; i < m; i++ ) { - t -= A[ i ] * corr[ m - i ]; - } - - /* reflection coefficient */ - km = t / nrg; - - /* residual energy */ - nrg -= km * t; - nrg = silk_max_float(min_nrg, nrg); - - mHalf = m >> 1; - for( i = 0; i < mHalf; i++ ) { - Atmp1 = A[ i ]; - Atmp2 = A[ m - i - 1 ]; - A[ m - i - 1 ] -= km * Atmp1; - A[ i ] -= km * Atmp2; - } - if( m & 1 ) { - A[ mHalf ] -= km * A[ mHalf ]; - } - A[ m ] = km; - } - - /* return the residual energy */ - return nrg; -} - diff --git a/libs/libopus/silk/float/main_FLP.h b/libs/libopus/silk/float/main_FLP.h index e5a75972e..5dc0ccf4a 100644 --- a/libs/libopus/silk/float/main_FLP.h +++ b/libs/libopus/silk/float/main_FLP.h @@ -56,7 +56,8 @@ void silk_HP_variable_cutoff( /* Encoder main function */ void silk_encode_do_VAD_FLP( - silk_encoder_state_FLP *psEnc /* I/O Encoder state FLP */ + silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ + opus_int activity /* I Decision of Opus voice activity detector */ ); /* Encoder main function */ @@ -79,22 +80,11 @@ opus_int silk_init_encoder( opus_int silk_control_encoder( silk_encoder_state_FLP *psEnc, /* I/O Pointer to Silk encoder state FLP */ silk_EncControlStruct *encControl, /* I Control structure */ - const opus_int32 TargetRate_bps, /* I Target max bitrate (bps) */ const opus_int allow_bw_switch, /* I Flag to allow switching audio bandwidth */ const opus_int channelNb, /* I Channel number */ const opus_int force_fs_kHz ); -/****************/ -/* Prefiltering */ -/****************/ -void silk_prefilter_FLP( - silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ - const silk_encoder_control_FLP *psEncCtrl, /* I Encoder control FLP */ - silk_float xw[], /* O Weighted signal */ - const silk_float x[] /* I Speech signal */ -); - /**************************/ /* Noise shaping analysis */ /**************************/ @@ -153,15 +143,12 @@ void silk_find_LPC_FLP( /* LTP analysis */ void silk_find_LTP_FLP( - silk_float b[ MAX_NB_SUBFR * LTP_ORDER ], /* O LTP coefs */ - silk_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ - silk_float *LTPredCodGain, /* O LTP coding gain */ - const silk_float r_lpc[], /* I LPC residual */ + silk_float XX[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ + silk_float xX[ MAX_NB_SUBFR * LTP_ORDER ], /* O Weight for LTP quantization */ + const silk_float r_ptr[], /* I LPC residual */ const opus_int lag[ MAX_NB_SUBFR ], /* I LTP lags */ - const silk_float Wght[ MAX_NB_SUBFR ], /* I Weights */ const opus_int subfr_length, /* I Subframe length */ - const opus_int nb_subfr, /* I number of subframes */ - const opus_int mem_offset /* I Number of samples in LTP memory */ + const opus_int nb_subfr /* I number of subframes */ ); void silk_LTP_analysis_filter_FLP( @@ -198,14 +185,15 @@ void silk_LPC_analysis_filter_FLP( /* LTP tap quantizer */ void silk_quant_LTP_gains_FLP( - silk_float B[ MAX_NB_SUBFR * LTP_ORDER ], /* I/O (Un-)quantized LTP gains */ + silk_float B[ MAX_NB_SUBFR * LTP_ORDER ], /* O Quantized LTP gains */ opus_int8 cbk_index[ MAX_NB_SUBFR ], /* O Codebook index */ opus_int8 *periodicity_index, /* O Periodicity index */ opus_int32 *sum_log_gain_Q7, /* I/O Cumulative max prediction gain */ - const silk_float W[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* I Error weights */ - const opus_int mu_Q10, /* I Mu value (R/D tradeoff) */ - const opus_int lowComplexity, /* I Flag for low complexity */ - const opus_int nb_subfr, /* I number of subframes */ + silk_float *pred_gain_dB, /* O LTP prediction gain */ + const silk_float XX[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* I Correlation matrix */ + const silk_float xX[ MAX_NB_SUBFR * LTP_ORDER ], /* I Correlation vector */ + const opus_int subfr_len, /* I Number of samples per subframe */ + const opus_int nb_subfr, /* I Number of subframes */ int arch /* I Run-time architecture */ ); @@ -245,22 +233,6 @@ void silk_corrVector_FLP( silk_float *Xt /* O X'*t correlation vector [order] */ ); -/* Add noise to matrix diagonal */ -void silk_regularize_correlations_FLP( - silk_float *XX, /* I/O Correlation matrices */ - silk_float *xx, /* I/O Correlation values */ - const silk_float noise, /* I Noise energy to add */ - const opus_int D /* I Dimension of XX */ -); - -/* Function to solve linear equation Ax = b, where A is an MxM symmetric matrix */ -void silk_solve_LDL_FLP( - silk_float *A, /* I/O Symmetric square matrix, out: reg. */ - const opus_int M, /* I Size of matrix */ - const silk_float *b, /* I Pointer to b vector */ - silk_float *x /* O Pointer to x solution vector */ -); - /* Apply sine window to signal vector. */ /* Window types: */ /* 1 -> sine window from 0 to pi/2 */ @@ -285,7 +257,8 @@ void silk_A2NLSF_FLP( void silk_NLSF2A_FLP( silk_float *pAR, /* O LPC coefficients [ LPC_order ] */ const opus_int16 *NLSF_Q15, /* I NLSF vector [ LPC_order ] */ - const opus_int LPC_order /* I LPC order */ + const opus_int LPC_order, /* I LPC order */ + int arch /* I Run-time architecture */ ); /* Limit, stabilize, and quantize NLSFs */ diff --git a/libs/libopus/silk/float/noise_shape_analysis_FLP.c b/libs/libopus/silk/float/noise_shape_analysis_FLP.c index 65f6ea587..cb3d8a50b 100644 --- a/libs/libopus/silk/float/noise_shape_analysis_FLP.c +++ b/libs/libopus/silk/float/noise_shape_analysis_FLP.c @@ -55,25 +55,21 @@ static OPUS_INLINE silk_float warped_gain( /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum */ /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */ static OPUS_INLINE void warped_true2monic_coefs( - silk_float *coefs_syn, - silk_float *coefs_ana, + silk_float *coefs, silk_float lambda, silk_float limit, opus_int order ) { opus_int i, iter, ind = 0; - silk_float tmp, maxabs, chirp, gain_syn, gain_ana; + silk_float tmp, maxabs, chirp, gain; /* Convert to monic coefficients */ for( i = order - 1; i > 0; i-- ) { - coefs_syn[ i - 1 ] -= lambda * coefs_syn[ i ]; - coefs_ana[ i - 1 ] -= lambda * coefs_ana[ i ]; + coefs[ i - 1 ] -= lambda * coefs[ i ]; } - gain_syn = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs_syn[ 0 ] ); - gain_ana = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs_ana[ 0 ] ); + gain = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs[ 0 ] ); for( i = 0; i < order; i++ ) { - coefs_syn[ i ] *= gain_syn; - coefs_ana[ i ] *= gain_ana; + coefs[ i ] *= gain; } /* Limit */ @@ -81,7 +77,7 @@ static OPUS_INLINE void warped_true2monic_coefs( /* Find maximum absolute value */ maxabs = -1.0f; for( i = 0; i < order; i++ ) { - tmp = silk_max( silk_abs_float( coefs_syn[ i ] ), silk_abs_float( coefs_ana[ i ] ) ); + tmp = silk_abs_float( coefs[ i ] ); if( tmp > maxabs ) { maxabs = tmp; ind = i; @@ -94,36 +90,59 @@ static OPUS_INLINE void warped_true2monic_coefs( /* Convert back to true warped coefficients */ for( i = 1; i < order; i++ ) { - coefs_syn[ i - 1 ] += lambda * coefs_syn[ i ]; - coefs_ana[ i - 1 ] += lambda * coefs_ana[ i ]; + coefs[ i - 1 ] += lambda * coefs[ i ]; } - gain_syn = 1.0f / gain_syn; - gain_ana = 1.0f / gain_ana; + gain = 1.0f / gain; for( i = 0; i < order; i++ ) { - coefs_syn[ i ] *= gain_syn; - coefs_ana[ i ] *= gain_ana; + coefs[ i ] *= gain; } /* Apply bandwidth expansion */ chirp = 0.99f - ( 0.8f + 0.1f * iter ) * ( maxabs - limit ) / ( maxabs * ( ind + 1 ) ); - silk_bwexpander_FLP( coefs_syn, order, chirp ); - silk_bwexpander_FLP( coefs_ana, order, chirp ); + silk_bwexpander_FLP( coefs, order, chirp ); /* Convert to monic warped coefficients */ for( i = order - 1; i > 0; i-- ) { - coefs_syn[ i - 1 ] -= lambda * coefs_syn[ i ]; - coefs_ana[ i - 1 ] -= lambda * coefs_ana[ i ]; + coefs[ i - 1 ] -= lambda * coefs[ i ]; } - gain_syn = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs_syn[ 0 ] ); - gain_ana = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs_ana[ 0 ] ); + gain = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs[ 0 ] ); for( i = 0; i < order; i++ ) { - coefs_syn[ i ] *= gain_syn; - coefs_ana[ i ] *= gain_ana; + coefs[ i ] *= gain; } } silk_assert( 0 ); } +static OPUS_INLINE void limit_coefs( + silk_float *coefs, + silk_float limit, + opus_int order +) { + opus_int i, iter, ind = 0; + silk_float tmp, maxabs, chirp; + + for( iter = 0; iter < 10; iter++ ) { + /* Find maximum absolute value */ + maxabs = -1.0f; + for( i = 0; i < order; i++ ) { + tmp = silk_abs_float( coefs[ i ] ); + if( tmp > maxabs ) { + maxabs = tmp; + ind = i; + } + } + if( maxabs <= limit ) { + /* Coefficients are within range - done */ + return; + } + + /* Apply bandwidth expansion */ + chirp = 0.99f - ( 0.8f + 0.1f * iter ) * ( maxabs - limit ) / ( maxabs * ( ind + 1 ) ); + silk_bwexpander_FLP( coefs, order, chirp ); + } + silk_assert( 0 ); +} + /* Compute noise shaping coefficients and initial gain values */ void silk_noise_shape_analysis_FLP( silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ @@ -133,12 +152,13 @@ void silk_noise_shape_analysis_FLP( ) { silk_shape_state_FLP *psShapeSt = &psEnc->sShape; - opus_int k, nSamples; - silk_float SNR_adj_dB, HarmBoost, HarmShapeGain, Tilt; - silk_float nrg, pre_nrg, log_energy, log_energy_prev, energy_variation; - silk_float delta, BWExp1, BWExp2, gain_mult, gain_add, strength, b, warping; + opus_int k, nSamples, nSegs; + silk_float SNR_adj_dB, HarmShapeGain, Tilt; + silk_float nrg, log_energy, log_energy_prev, energy_variation; + silk_float BWExp, gain_mult, gain_add, strength, b, warping; silk_float x_windowed[ SHAPE_LPC_WIN_MAX ]; silk_float auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ]; + silk_float rc[ MAX_SHAPE_LPC_ORDER + 1 ]; const silk_float *x_ptr, *pitch_res_ptr; /* Point to start of first LPC analysis block */ @@ -176,14 +196,14 @@ void silk_noise_shape_analysis_FLP( if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Initially set to 0; may be overruled in process_gains(..) */ psEnc->sCmn.indices.quantOffsetType = 0; - psEncCtrl->sparseness = 0.0f; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = 2 * psEnc->sCmn.fs_kHz; energy_variation = 0.0f; log_energy_prev = 0.0f; pitch_res_ptr = pitch_res; - for( k = 0; k < silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) { + nSegs = silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; + for( k = 0; k < nSegs; k++ ) { nrg = ( silk_float )nSamples + ( silk_float )silk_energy_FLP( pitch_res_ptr, nSamples ); log_energy = silk_log2( nrg ); if( k > 0 ) { @@ -192,17 +212,13 @@ void silk_noise_shape_analysis_FLP( log_energy_prev = log_energy; pitch_res_ptr += nSamples; } - psEncCtrl->sparseness = silk_sigmoid( 0.4f * ( energy_variation - 5.0f ) ); /* Set quantization offset depending on sparseness measure */ - if( psEncCtrl->sparseness > SPARSENESS_THRESHOLD_QNT_OFFSET ) { + if( energy_variation > ENERGY_VARIATION_THRESHOLD_QNT_OFFSET * (nSegs-1) ) { psEnc->sCmn.indices.quantOffsetType = 0; } else { psEnc->sCmn.indices.quantOffsetType = 1; } - - /* Increase coding SNR for sparse signals */ - SNR_adj_dB += SPARSE_SNR_INCR_dB * ( psEncCtrl->sparseness - 0.5f ); } /*******************************/ @@ -210,19 +226,10 @@ void silk_noise_shape_analysis_FLP( /*******************************/ /* More BWE for signals with high prediction gain */ strength = FIND_PITCH_WHITE_NOISE_FRACTION * psEncCtrl->predGain; /* between 0.0 and 1.0 */ - BWExp1 = BWExp2 = BANDWIDTH_EXPANSION / ( 1.0f + strength * strength ); - delta = LOW_RATE_BANDWIDTH_EXPANSION_DELTA * ( 1.0f - 0.75f * psEncCtrl->coding_quality ); - BWExp1 -= delta; - BWExp2 += delta; - /* BWExp1 will be applied after BWExp2, so make it relative */ - BWExp1 /= BWExp2; - - if( psEnc->sCmn.warping_Q16 > 0 ) { - /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ - warping = (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f + 0.01f * psEncCtrl->coding_quality; - } else { - warping = 0.0f; - } + BWExp = BANDWIDTH_EXPANSION / ( 1.0f + strength * strength ); + + /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ + warping = (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f + 0.01f * psEncCtrl->coding_quality; /********************************************/ /* Compute noise shaping AR coefs and gains */ @@ -252,37 +259,28 @@ void silk_noise_shape_analysis_FLP( } /* Add white noise, as a fraction of energy */ - auto_corr[ 0 ] += auto_corr[ 0 ] * SHAPE_WHITE_NOISE_FRACTION; + auto_corr[ 0 ] += auto_corr[ 0 ] * SHAPE_WHITE_NOISE_FRACTION + 1.0f; /* Convert correlations to prediction coefficients, and compute residual energy */ - nrg = silk_levinsondurbin_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], auto_corr, psEnc->sCmn.shapingLPCOrder ); + nrg = silk_schur_FLP( rc, auto_corr, psEnc->sCmn.shapingLPCOrder ); + silk_k2a_FLP( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], rc, psEnc->sCmn.shapingLPCOrder ); psEncCtrl->Gains[ k ] = ( silk_float )sqrt( nrg ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Adjust gain for warping */ - psEncCtrl->Gains[ k ] *= warped_gain( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], warping, psEnc->sCmn.shapingLPCOrder ); + psEncCtrl->Gains[ k ] *= warped_gain( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], warping, psEnc->sCmn.shapingLPCOrder ); } /* Bandwidth expansion for synthesis filter shaping */ - silk_bwexpander_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp2 ); - - /* Compute noise shaping filter coefficients */ - silk_memcpy( - &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], - &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], - psEnc->sCmn.shapingLPCOrder * sizeof( silk_float ) ); + silk_bwexpander_FLP( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp ); - /* Bandwidth expansion for analysis filter shaping */ - silk_bwexpander_FLP( &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp1 ); - - /* Ratio of prediction gains, in energy domain */ - pre_nrg = silk_LPC_inverse_pred_gain_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder ); - nrg = silk_LPC_inverse_pred_gain_FLP( &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder ); - psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ); - - /* Convert to monic warped prediction coefficients and limit absolute values */ - warped_true2monic_coefs( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], - warping, 3.999f, psEnc->sCmn.shapingLPCOrder ); + if( psEnc->sCmn.warping_Q16 > 0 ) { + /* Convert to monic warped prediction coefficients and limit absolute values */ + warped_true2monic_coefs( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], warping, 3.999f, psEnc->sCmn.shapingLPCOrder ); + } else { + /* Limit absolute values */ + limit_coefs( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], 3.999f, psEnc->sCmn.shapingLPCOrder ); + } } /*****************/ @@ -296,11 +294,6 @@ void silk_noise_shape_analysis_FLP( psEncCtrl->Gains[ k ] += gain_add; } - gain_mult = 1.0f + INPUT_TILT + psEncCtrl->coding_quality * HIGH_RATE_INPUT_TILT; - for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { - psEncCtrl->GainsPre[ k ] *= gain_mult; - } - /************************************************/ /* Control low-frequency shaping and noise tilt */ /************************************************/ @@ -331,12 +324,6 @@ void silk_noise_shape_analysis_FLP( /****************************/ /* HARMONIC SHAPING CONTROL */ /****************************/ - /* Control boosting of harmonic frequencies */ - HarmBoost = LOW_RATE_HARMONIC_BOOST * ( 1.0f - psEncCtrl->coding_quality ) * psEnc->LTPCorr; - - /* More harmonic boost for noisy input signals */ - HarmBoost += LOW_INPUT_QUALITY_HARMONIC_BOOST * ( 1.0f - psEncCtrl->input_quality ); - if( USE_HARM_SHAPING && psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Harmonic noise shaping */ HarmShapeGain = HARMONIC_SHAPING; @@ -355,8 +342,6 @@ void silk_noise_shape_analysis_FLP( /* Smooth over subframes */ /*************************/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { - psShapeSt->HarmBoost_smth += SUBFR_SMTH_COEF * ( HarmBoost - psShapeSt->HarmBoost_smth ); - psEncCtrl->HarmBoost[ k ] = psShapeSt->HarmBoost_smth; psShapeSt->HarmShapeGain_smth += SUBFR_SMTH_COEF * ( HarmShapeGain - psShapeSt->HarmShapeGain_smth ); psEncCtrl->HarmShapeGain[ k ] = psShapeSt->HarmShapeGain_smth; psShapeSt->Tilt_smth += SUBFR_SMTH_COEF * ( Tilt - psShapeSt->Tilt_smth ); diff --git a/libs/libopus/silk/float/pitch_analysis_core_FLP.c b/libs/libopus/silk/float/pitch_analysis_core_FLP.c index d0e637a29..f351bc371 100644 --- a/libs/libopus/silk/float/pitch_analysis_core_FLP.c +++ b/libs/libopus/silk/float/pitch_analysis_core_FLP.c @@ -109,11 +109,11 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, const opus_int8 *Lag_CB_ptr; /* Check for valid sampling frequency */ - silk_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 ); + celt_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 ); /* Check for valid complexity setting */ - silk_assert( complexity >= SILK_PE_MIN_COMPLEX ); - silk_assert( complexity <= SILK_PE_MAX_COMPLEX ); + celt_assert( complexity >= SILK_PE_MIN_COMPLEX ); + celt_assert( complexity <= SILK_PE_MAX_COMPLEX ); silk_assert( search_thres1 >= 0.0f && search_thres1 <= 1.0f ); silk_assert( search_thres2 >= 0.0f && search_thres2 <= 1.0f ); @@ -148,7 +148,7 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, silk_resampler_down2_3( filt_state, frame_8_FIX, frame_12_FIX, frame_length ); silk_short2float_array( frame_8kHz, frame_8_FIX, frame_length_8kHz ); } else { - silk_assert( Fs_kHz == 8 ); + celt_assert( Fs_kHz == 8 ); silk_float2short_array( frame_8_FIX, frame, frame_length_8kHz ); } @@ -159,7 +159,7 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, /* Low-pass filter */ for( i = frame_length_4kHz - 1; i > 0; i-- ) { - frame_4kHz[ i ] += frame_4kHz[ i - 1 ]; + frame_4kHz[ i ] = silk_ADD_SAT16( frame_4kHz[ i ], frame_4kHz[ i - 1 ] ); } /****************************************************************************** @@ -169,14 +169,14 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, target_ptr = &frame_4kHz[ silk_LSHIFT( sf_length_4kHz, 2 ) ]; for( k = 0; k < nb_subfr >> 1; k++ ) { /* Check that we are within range of the array */ - silk_assert( target_ptr >= frame_4kHz ); - silk_assert( target_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz ); + celt_assert( target_ptr >= frame_4kHz ); + celt_assert( target_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz ); basis_ptr = target_ptr - min_lag_4kHz; /* Check that we are within range of the array */ - silk_assert( basis_ptr >= frame_4kHz ); - silk_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz ); + celt_assert( basis_ptr >= frame_4kHz ); + celt_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz ); celt_pitch_xcorr( target_ptr, target_ptr-max_lag_4kHz, xcorr, sf_length_8kHz, max_lag_4kHz - min_lag_4kHz + 1, arch ); @@ -215,7 +215,7 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, /* Sort */ length_d_srch = 4 + 2 * complexity; - silk_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH ); + celt_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH ); silk_insertion_sort_decreasing_FLP( &C[ 0 ][ min_lag_4kHz ], d_srch, max_lag_4kHz - min_lag_4kHz + 1, length_d_srch ); /* Escape if correlation is very low already here */ @@ -238,7 +238,7 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, break; } } - silk_assert( length_d_srch > 0 ); + celt_assert( length_d_srch > 0 ); for( i = min_lag_8kHz - 5; i < max_lag_8kHz + 5; i++ ) { d_comp[ i ] = 0; @@ -471,7 +471,7 @@ opus_int silk_pitch_analysis_core_FLP( /* O Voicing estimate: 0 voiced, *lagIndex = (opus_int16)( lag - min_lag_8kHz ); *contourIndex = (opus_int8)CBimax; } - silk_assert( *lagIndex >= 0 ); + celt_assert( *lagIndex >= 0 ); /* return as voiced */ return 0; } @@ -506,8 +506,8 @@ static void silk_P_Ana_calc_corr_st3( opus_val32 xcorr[ SCRATCH_SIZE ]; const opus_int8 *Lag_range_ptr, *Lag_CB_ptr; - silk_assert( complexity >= SILK_PE_MIN_COMPLEX ); - silk_assert( complexity <= SILK_PE_MAX_COMPLEX ); + celt_assert( complexity >= SILK_PE_MIN_COMPLEX ); + celt_assert( complexity <= SILK_PE_MAX_COMPLEX ); if( nb_subfr == PE_MAX_NB_SUBFR ) { Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ]; @@ -515,7 +515,7 @@ static void silk_P_Ana_calc_corr_st3( nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ]; cbk_size = PE_NB_CBKS_STAGE3_MAX; } else { - silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); + celt_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ]; Lag_CB_ptr = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ]; nb_cbk_search = PE_NB_CBKS_STAGE3_10MS; @@ -572,8 +572,8 @@ static void silk_P_Ana_calc_energy_st3( silk_float scratch_mem[ SCRATCH_SIZE ]; const opus_int8 *Lag_range_ptr, *Lag_CB_ptr; - silk_assert( complexity >= SILK_PE_MIN_COMPLEX ); - silk_assert( complexity <= SILK_PE_MAX_COMPLEX ); + celt_assert( complexity >= SILK_PE_MIN_COMPLEX ); + celt_assert( complexity <= SILK_PE_MAX_COMPLEX ); if( nb_subfr == PE_MAX_NB_SUBFR ) { Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ]; @@ -581,7 +581,7 @@ static void silk_P_Ana_calc_energy_st3( nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ]; cbk_size = PE_NB_CBKS_STAGE3_MAX; } else { - silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); + celt_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1); Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ]; Lag_CB_ptr = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ]; nb_cbk_search = PE_NB_CBKS_STAGE3_10MS; diff --git a/libs/libopus/silk/float/prefilter_FLP.c b/libs/libopus/silk/float/prefilter_FLP.c deleted file mode 100644 index 8bc32fb41..000000000 --- a/libs/libopus/silk/float/prefilter_FLP.c +++ /dev/null @@ -1,206 +0,0 @@ -/*********************************************************************** -Copyright (c) 2006-2011, Skype Limited. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -- Neither the name of Internet Society, IETF or IETF Trust, nor the -names of specific contributors, may be used to endorse or promote -products derived from this software without specific prior written -permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -***********************************************************************/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "main_FLP.h" -#include "tuning_parameters.h" - -/* -* Prefilter for finding Quantizer input signal -*/ -static OPUS_INLINE void silk_prefilt_FLP( - silk_prefilter_state_FLP *P, /* I/O state */ - silk_float st_res[], /* I */ - silk_float xw[], /* O */ - silk_float *HarmShapeFIR, /* I */ - silk_float Tilt, /* I */ - silk_float LF_MA_shp, /* I */ - silk_float LF_AR_shp, /* I */ - opus_int lag, /* I */ - opus_int length /* I */ -); - -static void silk_warped_LPC_analysis_filter_FLP( - silk_float state[], /* I/O State [order + 1] */ - silk_float res[], /* O Residual signal [length] */ - const silk_float coef[], /* I Coefficients [order] */ - const silk_float input[], /* I Input signal [length] */ - const silk_float lambda, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -) -{ - opus_int n, i; - silk_float acc, tmp1, tmp2; - - /* Order must be even */ - silk_assert( ( order & 1 ) == 0 ); - - for( n = 0; n < length; n++ ) { - /* Output of lowpass section */ - tmp2 = state[ 0 ] + lambda * state[ 1 ]; - state[ 0 ] = input[ n ]; - /* Output of allpass section */ - tmp1 = state[ 1 ] + lambda * ( state[ 2 ] - tmp2 ); - state[ 1 ] = tmp2; - acc = coef[ 0 ] * tmp2; - /* Loop over allpass sections */ - for( i = 2; i < order; i += 2 ) { - /* Output of allpass section */ - tmp2 = state[ i ] + lambda * ( state[ i + 1 ] - tmp1 ); - state[ i ] = tmp1; - acc += coef[ i - 1 ] * tmp1; - /* Output of allpass section */ - tmp1 = state[ i + 1 ] + lambda * ( state[ i + 2 ] - tmp2 ); - state[ i + 1 ] = tmp2; - acc += coef[ i ] * tmp2; - } - state[ order ] = tmp1; - acc += coef[ order - 1 ] * tmp1; - res[ n ] = input[ n ] - acc; - } -} - -/* -* silk_prefilter. Main prefilter function -*/ -void silk_prefilter_FLP( - silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ - const silk_encoder_control_FLP *psEncCtrl, /* I Encoder control FLP */ - silk_float xw[], /* O Weighted signal */ - const silk_float x[] /* I Speech signal */ -) -{ - silk_prefilter_state_FLP *P = &psEnc->sPrefilt; - opus_int j, k, lag; - silk_float HarmShapeGain, Tilt, LF_MA_shp, LF_AR_shp; - silk_float B[ 2 ]; - const silk_float *AR1_shp; - const silk_float *px; - silk_float *pxw; - silk_float HarmShapeFIR[ 3 ]; - silk_float st_res[ MAX_SUB_FRAME_LENGTH + MAX_LPC_ORDER ]; - - /* Set up pointers */ - px = x; - pxw = xw; - lag = P->lagPrev; - for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { - /* Update Variables that change per sub frame */ - if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { - lag = psEncCtrl->pitchL[ k ]; - } - - /* Noise shape parameters */ - HarmShapeGain = psEncCtrl->HarmShapeGain[ k ] * ( 1.0f - psEncCtrl->HarmBoost[ k ] ); - HarmShapeFIR[ 0 ] = 0.25f * HarmShapeGain; - HarmShapeFIR[ 1 ] = 32767.0f / 65536.0f * HarmShapeGain; - HarmShapeFIR[ 2 ] = 0.25f * HarmShapeGain; - Tilt = psEncCtrl->Tilt[ k ]; - LF_MA_shp = psEncCtrl->LF_MA_shp[ k ]; - LF_AR_shp = psEncCtrl->LF_AR_shp[ k ]; - AR1_shp = &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ]; - - /* Short term FIR filtering */ - silk_warped_LPC_analysis_filter_FLP( P->sAR_shp, st_res, AR1_shp, px, - (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f, psEnc->sCmn.subfr_length, psEnc->sCmn.shapingLPCOrder ); - - /* Reduce (mainly) low frequencies during harmonic emphasis */ - B[ 0 ] = psEncCtrl->GainsPre[ k ]; - B[ 1 ] = -psEncCtrl->GainsPre[ k ] * - ( psEncCtrl->HarmBoost[ k ] * HarmShapeGain + INPUT_TILT + psEncCtrl->coding_quality * HIGH_RATE_INPUT_TILT ); - pxw[ 0 ] = B[ 0 ] * st_res[ 0 ] + B[ 1 ] * P->sHarmHP; - for( j = 1; j < psEnc->sCmn.subfr_length; j++ ) { - pxw[ j ] = B[ 0 ] * st_res[ j ] + B[ 1 ] * st_res[ j - 1 ]; - } - P->sHarmHP = st_res[ psEnc->sCmn.subfr_length - 1 ]; - - silk_prefilt_FLP( P, pxw, pxw, HarmShapeFIR, Tilt, LF_MA_shp, LF_AR_shp, lag, psEnc->sCmn.subfr_length ); - - px += psEnc->sCmn.subfr_length; - pxw += psEnc->sCmn.subfr_length; - } - P->lagPrev = psEncCtrl->pitchL[ psEnc->sCmn.nb_subfr - 1 ]; -} - -/* -* Prefilter for finding Quantizer input signal -*/ -static OPUS_INLINE void silk_prefilt_FLP( - silk_prefilter_state_FLP *P, /* I/O state */ - silk_float st_res[], /* I */ - silk_float xw[], /* O */ - silk_float *HarmShapeFIR, /* I */ - silk_float Tilt, /* I */ - silk_float LF_MA_shp, /* I */ - silk_float LF_AR_shp, /* I */ - opus_int lag, /* I */ - opus_int length /* I */ -) -{ - opus_int i; - opus_int idx, LTP_shp_buf_idx; - silk_float n_Tilt, n_LF, n_LTP; - silk_float sLF_AR_shp, sLF_MA_shp; - silk_float *LTP_shp_buf; - - /* To speed up use temp variables instead of using the struct */ - LTP_shp_buf = P->sLTP_shp; - LTP_shp_buf_idx = P->sLTP_shp_buf_idx; - sLF_AR_shp = P->sLF_AR_shp; - sLF_MA_shp = P->sLF_MA_shp; - - for( i = 0; i < length; i++ ) { - if( lag > 0 ) { - silk_assert( HARM_SHAPE_FIR_TAPS == 3 ); - idx = lag + LTP_shp_buf_idx; - n_LTP = LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK ] * HarmShapeFIR[ 0 ]; - n_LTP += LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 ) & LTP_MASK ] * HarmShapeFIR[ 1 ]; - n_LTP += LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 + 1) & LTP_MASK ] * HarmShapeFIR[ 2 ]; - } else { - n_LTP = 0; - } - - n_Tilt = sLF_AR_shp * Tilt; - n_LF = sLF_AR_shp * LF_AR_shp + sLF_MA_shp * LF_MA_shp; - - sLF_AR_shp = st_res[ i ] - n_Tilt; - sLF_MA_shp = sLF_AR_shp - n_LF; - - LTP_shp_buf_idx = ( LTP_shp_buf_idx - 1 ) & LTP_MASK; - LTP_shp_buf[ LTP_shp_buf_idx ] = sLF_MA_shp; - - xw[ i ] = sLF_MA_shp - n_LTP; - } - /* Copy temp variable back to state */ - P->sLF_AR_shp = sLF_AR_shp; - P->sLF_MA_shp = sLF_MA_shp; - P->sLTP_shp_buf_idx = LTP_shp_buf_idx; -} diff --git a/libs/libopus/silk/float/residual_energy_FLP.c b/libs/libopus/silk/float/residual_energy_FLP.c index b2e03a86a..1bd07b33a 100644 --- a/libs/libopus/silk/float/residual_energy_FLP.c +++ b/libs/libopus/silk/float/residual_energy_FLP.c @@ -47,7 +47,7 @@ silk_float silk_residual_energy_covar_FLP( /* O silk_float tmp, nrg = 0.0f, regularization; /* Safety checks */ - silk_assert( D >= 0 ); + celt_assert( D >= 0 ); regularization = REGULARIZATION_FACTOR * ( wXX[ 0 ] + wXX[ D * D - 1 ] ); for( k = 0; k < MAX_ITERATIONS_RESIDUAL_NRG; k++ ) { diff --git a/libs/libopus/silk/float/schur_FLP.c b/libs/libopus/silk/float/schur_FLP.c index ee436f835..8526c748d 100644 --- a/libs/libopus/silk/float/schur_FLP.c +++ b/libs/libopus/silk/float/schur_FLP.c @@ -38,22 +38,23 @@ silk_float silk_schur_FLP( /* O returns residual energy ) { opus_int k, n; - silk_float C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ]; - silk_float Ctmp1, Ctmp2, rc_tmp; + double C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ]; + double Ctmp1, Ctmp2, rc_tmp; - silk_assert( order==6||order==8||order==10||order==12||order==14||order==16 ); + celt_assert( order >= 0 && order <= SILK_MAX_ORDER_LPC ); /* Copy correlations */ - for( k = 0; k < order+1; k++ ) { + k = 0; + do { C[ k ][ 0 ] = C[ k ][ 1 ] = auto_corr[ k ]; - } + } while( ++k <= order ); for( k = 0; k < order; k++ ) { /* Get reflection coefficient */ rc_tmp = -C[ k + 1 ][ 0 ] / silk_max_float( C[ 0 ][ 1 ], 1e-9f ); /* Save the output */ - refl_coef[ k ] = rc_tmp; + refl_coef[ k ] = (silk_float)rc_tmp; /* Update correlations */ for( n = 0; n < order - k; n++ ) { @@ -65,6 +66,5 @@ silk_float silk_schur_FLP( /* O returns residual energy } /* Return residual energy */ - return C[ 0 ][ 1 ]; + return (silk_float)C[ 0 ][ 1 ]; } - diff --git a/libs/libopus/silk/float/solve_LS_FLP.c b/libs/libopus/silk/float/solve_LS_FLP.c deleted file mode 100644 index 7c90d665a..000000000 --- a/libs/libopus/silk/float/solve_LS_FLP.c +++ /dev/null @@ -1,207 +0,0 @@ -/*********************************************************************** -Copyright (c) 2006-2011, Skype Limited. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -- Neither the name of Internet Society, IETF or IETF Trust, nor the -names of specific contributors, may be used to endorse or promote -products derived from this software without specific prior written -permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -***********************************************************************/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "main_FLP.h" -#include "tuning_parameters.h" - -/********************************************************************** - * LDL Factorisation. Finds the upper triangular matrix L and the diagonal - * Matrix D (only the diagonal elements returned in a vector)such that - * the symmetric matric A is given by A = L*D*L'. - **********************************************************************/ -static OPUS_INLINE void silk_LDL_FLP( - silk_float *A, /* I/O Pointer to Symetric Square Matrix */ - opus_int M, /* I Size of Matrix */ - silk_float *L, /* I/O Pointer to Square Upper triangular Matrix */ - silk_float *Dinv /* I/O Pointer to vector holding the inverse diagonal elements of D */ -); - -/********************************************************************** - * Function to solve linear equation Ax = b, when A is a MxM lower - * triangular matrix, with ones on the diagonal. - **********************************************************************/ -static OPUS_INLINE void silk_SolveWithLowerTriangularWdiagOnes_FLP( - const silk_float *L, /* I Pointer to Lower Triangular Matrix */ - opus_int M, /* I Dim of Matrix equation */ - const silk_float *b, /* I b Vector */ - silk_float *x /* O x Vector */ -); - -/********************************************************************** - * Function to solve linear equation (A^T)x = b, when A is a MxM lower - * triangular, with ones on the diagonal. (ie then A^T is upper triangular) - **********************************************************************/ -static OPUS_INLINE void silk_SolveWithUpperTriangularFromLowerWdiagOnes_FLP( - const silk_float *L, /* I Pointer to Lower Triangular Matrix */ - opus_int M, /* I Dim of Matrix equation */ - const silk_float *b, /* I b Vector */ - silk_float *x /* O x Vector */ -); - -/********************************************************************** - * Function to solve linear equation Ax = b, when A is a MxM - * symmetric square matrix - using LDL factorisation - **********************************************************************/ -void silk_solve_LDL_FLP( - silk_float *A, /* I/O Symmetric square matrix, out: reg. */ - const opus_int M, /* I Size of matrix */ - const silk_float *b, /* I Pointer to b vector */ - silk_float *x /* O Pointer to x solution vector */ -) -{ - opus_int i; - silk_float L[ MAX_MATRIX_SIZE ][ MAX_MATRIX_SIZE ]; - silk_float T[ MAX_MATRIX_SIZE ]; - silk_float Dinv[ MAX_MATRIX_SIZE ]; /* inverse diagonal elements of D*/ - - silk_assert( M <= MAX_MATRIX_SIZE ); - - /*************************************************** - Factorize A by LDL such that A = L*D*(L^T), - where L is lower triangular with ones on diagonal - ****************************************************/ - silk_LDL_FLP( A, M, &L[ 0 ][ 0 ], Dinv ); - - /**************************************************** - * substitute D*(L^T) = T. ie: - L*D*(L^T)*x = b => L*T = b <=> T = inv(L)*b - ******************************************************/ - silk_SolveWithLowerTriangularWdiagOnes_FLP( &L[ 0 ][ 0 ], M, b, T ); - - /**************************************************** - D*(L^T)*x = T <=> (L^T)*x = inv(D)*T, because D is - diagonal just multiply with 1/d_i - ****************************************************/ - for( i = 0; i < M; i++ ) { - T[ i ] = T[ i ] * Dinv[ i ]; - } - /**************************************************** - x = inv(L') * inv(D) * T - *****************************************************/ - silk_SolveWithUpperTriangularFromLowerWdiagOnes_FLP( &L[ 0 ][ 0 ], M, T, x ); -} - -static OPUS_INLINE void silk_SolveWithUpperTriangularFromLowerWdiagOnes_FLP( - const silk_float *L, /* I Pointer to Lower Triangular Matrix */ - opus_int M, /* I Dim of Matrix equation */ - const silk_float *b, /* I b Vector */ - silk_float *x /* O x Vector */ -) -{ - opus_int i, j; - silk_float temp; - const silk_float *ptr1; - - for( i = M - 1; i >= 0; i-- ) { - ptr1 = matrix_adr( L, 0, i, M ); - temp = 0; - for( j = M - 1; j > i ; j-- ) { - temp += ptr1[ j * M ] * x[ j ]; - } - temp = b[ i ] - temp; - x[ i ] = temp; - } -} - -static OPUS_INLINE void silk_SolveWithLowerTriangularWdiagOnes_FLP( - const silk_float *L, /* I Pointer to Lower Triangular Matrix */ - opus_int M, /* I Dim of Matrix equation */ - const silk_float *b, /* I b Vector */ - silk_float *x /* O x Vector */ -) -{ - opus_int i, j; - silk_float temp; - const silk_float *ptr1; - - for( i = 0; i < M; i++ ) { - ptr1 = matrix_adr( L, i, 0, M ); - temp = 0; - for( j = 0; j < i; j++ ) { - temp += ptr1[ j ] * x[ j ]; - } - temp = b[ i ] - temp; - x[ i ] = temp; - } -} - -static OPUS_INLINE void silk_LDL_FLP( - silk_float *A, /* I/O Pointer to Symetric Square Matrix */ - opus_int M, /* I Size of Matrix */ - silk_float *L, /* I/O Pointer to Square Upper triangular Matrix */ - silk_float *Dinv /* I/O Pointer to vector holding the inverse diagonal elements of D */ -) -{ - opus_int i, j, k, loop_count, err = 1; - silk_float *ptr1, *ptr2; - double temp, diag_min_value; - silk_float v[ MAX_MATRIX_SIZE ], D[ MAX_MATRIX_SIZE ]; /* temp arrays*/ - - silk_assert( M <= MAX_MATRIX_SIZE ); - - diag_min_value = FIND_LTP_COND_FAC * 0.5f * ( A[ 0 ] + A[ M * M - 1 ] ); - for( loop_count = 0; loop_count < M && err == 1; loop_count++ ) { - err = 0; - for( j = 0; j < M; j++ ) { - ptr1 = matrix_adr( L, j, 0, M ); - temp = matrix_ptr( A, j, j, M ); /* element in row j column j*/ - for( i = 0; i < j; i++ ) { - v[ i ] = ptr1[ i ] * D[ i ]; - temp -= ptr1[ i ] * v[ i ]; - } - if( temp < diag_min_value ) { - /* Badly conditioned matrix: add white noise and run again */ - temp = ( loop_count + 1 ) * diag_min_value - temp; - for( i = 0; i < M; i++ ) { - matrix_ptr( A, i, i, M ) += ( silk_float )temp; - } - err = 1; - break; - } - D[ j ] = ( silk_float )temp; - Dinv[ j ] = ( silk_float )( 1.0f / temp ); - matrix_ptr( L, j, j, M ) = 1.0f; - - ptr1 = matrix_adr( A, j, 0, M ); - ptr2 = matrix_adr( L, j + 1, 0, M); - for( i = j + 1; i < M; i++ ) { - temp = 0.0; - for( k = 0; k < j; k++ ) { - temp += ptr2[ k ] * v[ k ]; - } - matrix_ptr( L, i, j, M ) = ( silk_float )( ( ptr1[ i ] - temp ) * Dinv[ j ] ); - ptr2 += M; /* go to next column*/ - } - } - } - silk_assert( err == 0 ); -} - diff --git a/libs/libopus/silk/float/sort_FLP.c b/libs/libopus/silk/float/sort_FLP.c index f08d7592c..0e18f3195 100644 --- a/libs/libopus/silk/float/sort_FLP.c +++ b/libs/libopus/silk/float/sort_FLP.c @@ -47,9 +47,9 @@ void silk_insertion_sort_decreasing_FLP( opus_int i, j; /* Safety checks */ - silk_assert( K > 0 ); - silk_assert( L > 0 ); - silk_assert( L >= K ); + celt_assert( K > 0 ); + celt_assert( L > 0 ); + celt_assert( L >= K ); /* Write start indices in index vector */ for( i = 0; i < K; i++ ) { diff --git a/libs/libopus/silk/float/structs_FLP.h b/libs/libopus/silk/float/structs_FLP.h index 14d647ced..3150b386e 100644 --- a/libs/libopus/silk/float/structs_FLP.h +++ b/libs/libopus/silk/float/structs_FLP.h @@ -42,32 +42,16 @@ extern "C" /********************************/ typedef struct { opus_int8 LastGainIndex; - silk_float HarmBoost_smth; silk_float HarmShapeGain_smth; silk_float Tilt_smth; } silk_shape_state_FLP; -/********************************/ -/* Prefilter state */ -/********************************/ -typedef struct { - silk_float sLTP_shp[ LTP_BUF_LENGTH ]; - silk_float sAR_shp[ MAX_SHAPE_LPC_ORDER + 1 ]; - opus_int sLTP_shp_buf_idx; - silk_float sLF_AR_shp; - silk_float sLF_MA_shp; - silk_float sHarmHP; - opus_int32 rand_seed; - opus_int lagPrev; -} silk_prefilter_state_FLP; - /********************************/ /* Encoder state FLP */ /********************************/ typedef struct { silk_encoder_state sCmn; /* Common struct, shared with fixed-point code */ silk_shape_state_FLP sShape; /* Noise shaping state */ - silk_prefilter_state_FLP sPrefilt; /* Prefilter State */ /* Buffer for find pitch and noise shape analysis */ silk_float x_buf[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];/* Buffer for find pitch and noise shape analysis */ @@ -86,12 +70,9 @@ typedef struct { opus_int pitchL[ MAX_NB_SUBFR ]; /* Noise shaping parameters */ - silk_float AR1[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; - silk_float AR2[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; + silk_float AR[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; silk_float LF_MA_shp[ MAX_NB_SUBFR ]; silk_float LF_AR_shp[ MAX_NB_SUBFR ]; - silk_float GainsPre[ MAX_NB_SUBFR ]; - silk_float HarmBoost[ MAX_NB_SUBFR ]; silk_float Tilt[ MAX_NB_SUBFR ]; silk_float HarmShapeGain[ MAX_NB_SUBFR ]; silk_float Lambda; @@ -99,7 +80,6 @@ typedef struct { silk_float coding_quality; /* Measures */ - silk_float sparseness; silk_float predGain; silk_float LTPredCodGain; silk_float ResNrg[ MAX_NB_SUBFR ]; /* Residual energy per subframe */ diff --git a/libs/libopus/silk/float/warped_autocorrelation_FLP.c b/libs/libopus/silk/float/warped_autocorrelation_FLP.c index 542414f48..09186e73d 100644 --- a/libs/libopus/silk/float/warped_autocorrelation_FLP.c +++ b/libs/libopus/silk/float/warped_autocorrelation_FLP.c @@ -42,11 +42,11 @@ void silk_warped_autocorrelation_FLP( { opus_int n, i; double tmp1, tmp2; - double state[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; - double C[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; + double state[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; + double C[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 }; /* Order must be even */ - silk_assert( ( order & 1 ) == 0 ); + celt_assert( ( order & 1 ) == 0 ); /* Loop over samples */ for( n = 0; n < length; n++ ) { diff --git a/libs/libopus/silk/float/wrappers_FLP.c b/libs/libopus/silk/float/wrappers_FLP.c index 6666b8efa..ad90b874a 100644 --- a/libs/libopus/silk/float/wrappers_FLP.c +++ b/libs/libopus/silk/float/wrappers_FLP.c @@ -54,13 +54,14 @@ void silk_A2NLSF_FLP( void silk_NLSF2A_FLP( silk_float *pAR, /* O LPC coefficients [ LPC_order ] */ const opus_int16 *NLSF_Q15, /* I NLSF vector [ LPC_order ] */ - const opus_int LPC_order /* I LPC order */ + const opus_int LPC_order, /* I LPC order */ + int arch /* I Run-time architecture */ ) { opus_int i; opus_int16 a_fix_Q12[ MAX_LPC_ORDER ]; - silk_NLSF2A( a_fix_Q12, NLSF_Q15, LPC_order ); + silk_NLSF2A( a_fix_Q12, NLSF_Q15, LPC_order, arch ); for( i = 0; i < LPC_order; i++ ) { pAR[ i ] = ( silk_float )a_fix_Q12[ i ] * ( 1.0f / 4096.0f ); @@ -102,14 +103,14 @@ void silk_NSQ_wrapper_FLP( ) { opus_int i, j; - opus_int32 x_Q3[ MAX_FRAME_LENGTH ]; + opus_int16 x16[ MAX_FRAME_LENGTH ]; opus_int32 Gains_Q16[ MAX_NB_SUBFR ]; silk_DWORD_ALIGN opus_int16 PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ]; opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ]; opus_int LTP_scale_Q14; /* Noise shaping parameters */ - opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; + opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ]; opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ]; /* Packs two int16 coefficients per int32 value */ opus_int Lambda_Q10; opus_int Tilt_Q14[ MAX_NB_SUBFR ]; @@ -119,7 +120,7 @@ void silk_NSQ_wrapper_FLP( /* Noise shape parameters */ for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) { for( j = 0; j < psEnc->sCmn.shapingLPCOrder; j++ ) { - AR2_Q13[ i * MAX_SHAPE_LPC_ORDER + j ] = silk_float2int( psEncCtrl->AR2[ i * MAX_SHAPE_LPC_ORDER + j ] * 8192.0f ); + AR_Q13[ i * MAX_SHAPE_LPC_ORDER + j ] = silk_float2int( psEncCtrl->AR[ i * MAX_SHAPE_LPC_ORDER + j ] * 8192.0f ); } } @@ -155,16 +156,16 @@ void silk_NSQ_wrapper_FLP( /* Convert input to fix */ for( i = 0; i < psEnc->sCmn.frame_length; i++ ) { - x_Q3[ i ] = silk_float2int( 8.0f * x[ i ] ); + x16[ i ] = silk_float2int( x[ i ] ); } /* Call NSQ */ if( psEnc->sCmn.nStatesDelayedDecision > 1 || psEnc->sCmn.warping_Q16 > 0 ) { - silk_NSQ_del_dec( &psEnc->sCmn, psNSQ, psIndices, x_Q3, pulses, PredCoef_Q12[ 0 ], LTPCoef_Q14, - AR2_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, psEncCtrl->pitchL, Lambda_Q10, LTP_scale_Q14, psEnc->sCmn.arch ); + silk_NSQ_del_dec( &psEnc->sCmn, psNSQ, psIndices, x16, pulses, PredCoef_Q12[ 0 ], LTPCoef_Q14, + AR_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, psEncCtrl->pitchL, Lambda_Q10, LTP_scale_Q14, psEnc->sCmn.arch ); } else { - silk_NSQ( &psEnc->sCmn, psNSQ, psIndices, x_Q3, pulses, PredCoef_Q12[ 0 ], LTPCoef_Q14, - AR2_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, psEncCtrl->pitchL, Lambda_Q10, LTP_scale_Q14, psEnc->sCmn.arch ); + silk_NSQ( &psEnc->sCmn, psNSQ, psIndices, x16, pulses, PredCoef_Q12[ 0 ], LTPCoef_Q14, + AR_Q13, HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, psEncCtrl->pitchL, Lambda_Q10, LTP_scale_Q14, psEnc->sCmn.arch ); } } @@ -172,31 +173,35 @@ void silk_NSQ_wrapper_FLP( /* Floating-point Silk LTP quantiation wrapper */ /***********************************************/ void silk_quant_LTP_gains_FLP( - silk_float B[ MAX_NB_SUBFR * LTP_ORDER ], /* I/O (Un-)quantized LTP gains */ + silk_float B[ MAX_NB_SUBFR * LTP_ORDER ], /* O Quantized LTP gains */ opus_int8 cbk_index[ MAX_NB_SUBFR ], /* O Codebook index */ opus_int8 *periodicity_index, /* O Periodicity index */ opus_int32 *sum_log_gain_Q7, /* I/O Cumulative max prediction gain */ - const silk_float W[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* I Error weights */ - const opus_int mu_Q10, /* I Mu value (R/D tradeoff) */ - const opus_int lowComplexity, /* I Flag for low complexity */ - const opus_int nb_subfr, /* I number of subframes */ + silk_float *pred_gain_dB, /* O LTP prediction gain */ + const silk_float XX[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* I Correlation matrix */ + const silk_float xX[ MAX_NB_SUBFR * LTP_ORDER ], /* I Correlation vector */ + const opus_int subfr_len, /* I Number of samples per subframe */ + const opus_int nb_subfr, /* I Number of subframes */ int arch /* I Run-time architecture */ ) { - opus_int i; + opus_int i, pred_gain_dB_Q7; opus_int16 B_Q14[ MAX_NB_SUBFR * LTP_ORDER ]; - opus_int32 W_Q18[ MAX_NB_SUBFR*LTP_ORDER*LTP_ORDER ]; + opus_int32 XX_Q17[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ]; + opus_int32 xX_Q17[ MAX_NB_SUBFR * LTP_ORDER ]; - for( i = 0; i < nb_subfr * LTP_ORDER; i++ ) { - B_Q14[ i ] = (opus_int16)silk_float2int( B[ i ] * 16384.0f ); - } for( i = 0; i < nb_subfr * LTP_ORDER * LTP_ORDER; i++ ) { - W_Q18[ i ] = (opus_int32)silk_float2int( W[ i ] * 262144.0f ); + XX_Q17[ i ] = (opus_int32)silk_float2int( XX[ i ] * 131072.0f ); + } + for( i = 0; i < nb_subfr * LTP_ORDER; i++ ) { + xX_Q17[ i ] = (opus_int32)silk_float2int( xX[ i ] * 131072.0f ); } - silk_quant_LTP_gains( B_Q14, cbk_index, periodicity_index, sum_log_gain_Q7, W_Q18, mu_Q10, lowComplexity, nb_subfr, arch ); + silk_quant_LTP_gains( B_Q14, cbk_index, periodicity_index, sum_log_gain_Q7, &pred_gain_dB_Q7, XX_Q17, xX_Q17, subfr_len, nb_subfr, arch ); for( i = 0; i < nb_subfr * LTP_ORDER; i++ ) { B[ i ] = (silk_float)B_Q14[ i ] * ( 1.0f / 16384.0f ); } + + *pred_gain_dB = (silk_float)pred_gain_dB_Q7 * ( 1.0f / 128.0f ); } diff --git a/libs/libopus/silk/gain_quant.c b/libs/libopus/silk/gain_quant.c index 64ccd0611..ee65245aa 100644 --- a/libs/libopus/silk/gain_quant.c +++ b/libs/libopus/silk/gain_quant.c @@ -76,6 +76,7 @@ void silk_gains_quant( /* Accumulate deltas */ if( ind[ k ] > double_step_size_threshold ) { *prev_ind += silk_LSHIFT( ind[ k ], 1 ) - double_step_size_threshold; + *prev_ind = silk_min_int( *prev_ind, N_LEVELS_QGAIN - 1 ); } else { *prev_ind += ind[ k ]; } diff --git a/libs/libopus/silk/init_decoder.c b/libs/libopus/silk/init_decoder.c index f887c6788..16c03dcd1 100644 --- a/libs/libopus/silk/init_decoder.c +++ b/libs/libopus/silk/init_decoder.c @@ -44,6 +44,7 @@ opus_int silk_init_decoder( /* Used to deactivate LSF interpolation */ psDec->first_frame_after_reset = 1; psDec->prev_gain_Q16 = 65536; + psDec->arch = opus_select_arch(); /* Reset CNG state */ silk_CNG_Reset( psDec ); diff --git a/libs/libopus/silk/interpolate.c b/libs/libopus/silk/interpolate.c index 1bd8ca4d5..833c28ef8 100644 --- a/libs/libopus/silk/interpolate.c +++ b/libs/libopus/silk/interpolate.c @@ -42,8 +42,8 @@ void silk_interpolate( { opus_int i; - silk_assert( ifact_Q2 >= 0 ); - silk_assert( ifact_Q2 <= 4 ); + celt_assert( ifact_Q2 >= 0 ); + celt_assert( ifact_Q2 <= 4 ); for( i = 0; i < d; i++ ) { xi[ i ] = (opus_int16)silk_ADD_RSHIFT( x0[ i ], silk_SMULBB( x1[ i ] - x0[ i ], ifact_Q2 ), 2 ); diff --git a/libs/libopus/silk/lin2log.c b/libs/libopus/silk/lin2log.c index d4fe51532..0d5155aa8 100644 --- a/libs/libopus/silk/lin2log.c +++ b/libs/libopus/silk/lin2log.c @@ -41,6 +41,6 @@ opus_int32 silk_lin2log( silk_CLZ_FRAC( inLin, &lz, &frac_Q7 ); /* Piece-wise parabolic approximation */ - return silk_LSHIFT( 31 - lz, 7 ) + silk_SMLAWB( frac_Q7, silk_MUL( frac_Q7, 128 - frac_Q7 ), 179 ); + return silk_ADD_LSHIFT32( silk_SMLAWB( frac_Q7, silk_MUL( frac_Q7, 128 - frac_Q7 ), 179 ), 31 - lz, 7 ); } diff --git a/libs/libopus/silk/macros.h b/libs/libopus/silk/macros.h index d3ca34752..3c67b6e5d 100644 --- a/libs/libopus/silk/macros.h +++ b/libs/libopus/silk/macros.h @@ -36,14 +36,6 @@ POSSIBILITY OF SUCH DAMAGE. #include "opus_defines.h" #include "arch.h" -#if OPUS_GNUC_PREREQ(3, 0) -#define opus_likely(x) (__builtin_expect(!!(x), 1)) -#define opus_unlikely(x) (__builtin_expect(!!(x), 0)) -#else -#define opus_likely(x) (!!(x)) -#define opus_unlikely(x) (!!(x)) -#endif - /* This is an OPUS_INLINE header file for general platform. */ /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */ diff --git a/libs/libopus/silk/main.h b/libs/libopus/silk/main.h index 2f90d68f7..a5f568758 100644 --- a/libs/libopus/silk/main.h +++ b/libs/libopus/silk/main.h @@ -42,6 +42,10 @@ POSSIBILITY OF SUCH DAMAGE. #include "x86/main_sse.h" #endif +#if (defined(OPUS_ARM_ASM) || defined(OPUS_ARM_MAY_HAVE_NEON_INTR)) +#include "arm/NSQ_del_dec_arm.h" +#endif + /* Convert Left/Right stereo signal to adaptive Mid/Side representation */ void silk_stereo_LR_to_MS( stereo_enc_state *state, /* I/O State */ @@ -109,22 +113,22 @@ void silk_stereo_decode_mid_only( /* Encodes signs of excitation */ void silk_encode_signs( - ec_enc *psRangeEnc, /* I/O Compressor data structure */ - const opus_int8 pulses[], /* I pulse signal */ - opus_int length, /* I length of input */ - const opus_int signalType, /* I Signal type */ - const opus_int quantOffsetType, /* I Quantization offset type */ - const opus_int sum_pulses[ MAX_NB_SHELL_BLOCKS ] /* I Sum of absolute pulses per block */ + ec_enc *psRangeEnc, /* I/O Compressor data structure */ + const opus_int8 pulses[], /* I pulse signal */ + opus_int length, /* I length of input */ + const opus_int signalType, /* I Signal type */ + const opus_int quantOffsetType, /* I Quantization offset type */ + const opus_int sum_pulses[ MAX_NB_SHELL_BLOCKS ] /* I Sum of absolute pulses per block */ ); /* Decodes signs of excitation */ void silk_decode_signs( - ec_dec *psRangeDec, /* I/O Compressor data structure */ - opus_int16 pulses[], /* I/O pulse signal */ - opus_int length, /* I length of input */ - const opus_int signalType, /* I Signal type */ - const opus_int quantOffsetType, /* I Quantization offset type */ - const opus_int sum_pulses[ MAX_NB_SHELL_BLOCKS ] /* I Sum of absolute pulses per block */ + ec_dec *psRangeDec, /* I/O Compressor data structure */ + opus_int16 pulses[], /* I/O pulse signal */ + opus_int length, /* I length of input */ + const opus_int signalType, /* I Signal type */ + const opus_int quantOffsetType, /* I Quantization offset type */ + const opus_int sum_pulses[ MAX_NB_SHELL_BLOCKS ] /* I Sum of absolute pulses per block */ ); /* Check encoder control struct */ @@ -205,37 +209,37 @@ void silk_interpolate( /* LTP tap quantizer */ void silk_quant_LTP_gains( - opus_int16 B_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* I/O (un)quantized LTP gains */ + opus_int16 B_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* O Quantized LTP gains */ opus_int8 cbk_index[ MAX_NB_SUBFR ], /* O Codebook Index */ opus_int8 *periodicity_index, /* O Periodicity Index */ opus_int32 *sum_gain_dB_Q7, /* I/O Cumulative max prediction gain */ - const opus_int32 W_Q18[ MAX_NB_SUBFR*LTP_ORDER*LTP_ORDER ], /* I Error Weights in Q18 */ - opus_int mu_Q9, /* I Mu value (R/D tradeoff) */ - opus_int lowComplexity, /* I Flag for low complexity */ - const opus_int nb_subfr, /* I number of subframes */ + opus_int *pred_gain_dB_Q7, /* O LTP prediction gain */ + const opus_int32 XX_Q17[ MAX_NB_SUBFR*LTP_ORDER*LTP_ORDER ], /* I Correlation matrix in Q18 */ + const opus_int32 xX_Q17[ MAX_NB_SUBFR*LTP_ORDER ], /* I Correlation vector in Q18 */ + const opus_int subfr_len, /* I Number of samples per subframe */ + const opus_int nb_subfr, /* I Number of subframes */ int arch /* I Run-time architecture */ ); /* Entropy constrained matrix-weighted VQ, for a single input data vector */ void silk_VQ_WMat_EC_c( opus_int8 *ind, /* O index of best codebook vector */ - opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */ + opus_int32 *res_nrg_Q15, /* O best residual energy */ + opus_int32 *rate_dist_Q8, /* O best total bitrate */ opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ - const opus_int16 *in_Q14, /* I input vector to be quantized */ - const opus_int32 *W_Q18, /* I weighting matrix */ + const opus_int32 *XX_Q17, /* I correlation matrix */ + const opus_int32 *xX_Q17, /* I correlation vector */ const opus_int8 *cb_Q7, /* I codebook */ const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ - const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */ + const opus_int subfr_len, /* I number of samples per subframe */ const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ - opus_int L /* I number of vectors in codebook */ + const opus_int L /* I number of vectors in codebook */ ); #if !defined(OVERRIDE_silk_VQ_WMat_EC) -#define silk_VQ_WMat_EC(ind, rate_dist_Q14, gain_Q7, in_Q14, W_Q18, cb_Q7, cb_gain_Q7, cl_Q5, \ - mu_Q9, max_gain_Q7, L, arch) \ - ((void)(arch),silk_VQ_WMat_EC_c(ind, rate_dist_Q14, gain_Q7, in_Q14, W_Q18, cb_Q7, cb_gain_Q7, cl_Q5, \ - mu_Q9, max_gain_Q7, L)) +#define silk_VQ_WMat_EC(ind, res_nrg_Q15, rate_dist_Q8, gain_Q7, XX_Q17, xX_Q17, cb_Q7, cb_gain_Q7, cl_Q5, subfr_len, max_gain_Q7, L, arch) \ + ((void)(arch),silk_VQ_WMat_EC_c(ind, res_nrg_Q15, rate_dist_Q8, gain_Q7, XX_Q17, xX_Q17, cb_Q7, cb_gain_Q7, cl_Q5, subfr_len, max_gain_Q7, L)) #endif /************************************/ @@ -243,53 +247,53 @@ void silk_VQ_WMat_EC_c( /************************************/ void silk_NSQ_c( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ); #if !defined(OVERRIDE_silk_NSQ) -#define silk_NSQ(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ +#define silk_NSQ(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14, arch) \ - ((void)(arch),silk_NSQ_c(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ + ((void)(arch),silk_NSQ_c(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14)) #endif /* Noise shaping using delayed decision */ void silk_NSQ_del_dec_c( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ); #if !defined(OVERRIDE_silk_NSQ_del_dec) -#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ +#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14, arch) \ - ((void)(arch),silk_NSQ_del_dec_c(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ + ((void)(arch),silk_NSQ_del_dec_c(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14)) #endif @@ -346,6 +350,7 @@ void silk_NLSF_VQ( opus_int32 err_Q26[], /* O Quantization errors [K] */ const opus_int16 in_Q15[], /* I Input vectors to be quantized [LPC_order] */ const opus_uint8 pCB_Q8[], /* I Codebook vectors [K*LPC_order] */ + const opus_int16 pWght_Q9[], /* I Codebook weights [K*LPC_order] */ const opus_int K, /* I Number of codebook vectors */ const opus_int LPC_order /* I Number of LPCs */ ); diff --git a/libs/libopus/silk/mips/NSQ_del_dec_mipsr1.h b/libs/libopus/silk/mips/NSQ_del_dec_mipsr1.h index ad1cfe2a9..cd70713a8 100644 --- a/libs/libopus/silk/mips/NSQ_del_dec_mipsr1.h +++ b/libs/libopus/silk/mips/NSQ_del_dec_mipsr1.h @@ -61,7 +61,7 @@ static inline void silk_noise_shape_quantizer_del_dec( opus_int predictLPCOrder, /* I Prediction filter order */ opus_int warping_Q16, /* I */ opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ - opus_int *smpl_buf_idx, /* I Index to newest samples in buffers */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ opus_int decisionDelay, /* I */ int arch /* I */ ) @@ -323,8 +323,9 @@ static inline void silk_noise_shape_quantizer_del_dec( psSS[ 1 ].xq_Q14 = xq_Q14; } - *smpl_buf_idx = ( *smpl_buf_idx - 1 ) & DECISION_DELAY_MASK; /* Index to newest samples */ - last_smple_idx = ( *smpl_buf_idx + decisionDelay ) & DECISION_DELAY_MASK; /* Index to decisionDelay old samples */ + *smpl_buf_idx = ( *smpl_buf_idx - 1 ) % DECISION_DELAY; + if( *smpl_buf_idx < 0 ) *smpl_buf_idx += DECISION_DELAY; + last_smple_idx = ( *smpl_buf_idx + decisionDelay ) % DECISION_DELAY; /* Find winner */ RDmin_Q10 = psSampleState[ 0 ][ 0 ].RD_Q10; diff --git a/libs/libopus/silk/mips/sigproc_fix_mipsr1.h b/libs/libopus/silk/mips/sigproc_fix_mipsr1.h index 3b0a69536..51520c0a6 100644 --- a/libs/libopus/silk/mips/sigproc_fix_mipsr1.h +++ b/libs/libopus/silk/mips/sigproc_fix_mipsr1.h @@ -28,11 +28,6 @@ POSSIBILITY OF SUCH DAMAGE. #ifndef SILK_SIGPROC_FIX_MIPSR1_H #define SILK_SIGPROC_FIX_MIPSR1_H -#ifdef __cplusplus -extern "C" -{ -#endif - #undef silk_SAT16 static inline short int silk_SAT16(int a) { diff --git a/libs/libopus/silk/process_NLSFs.c b/libs/libopus/silk/process_NLSFs.c index 0ab71f016..d13080954 100644 --- a/libs/libopus/silk/process_NLSFs.c +++ b/libs/libopus/silk/process_NLSFs.c @@ -48,7 +48,7 @@ void silk_process_NLSFs( silk_assert( psEncC->speech_activity_Q8 >= 0 ); silk_assert( psEncC->speech_activity_Q8 <= SILK_FIX_CONST( 1.0, 8 ) ); - silk_assert( psEncC->useInterpolatedNLSFs == 1 || psEncC->indices.NLSFInterpCoef_Q2 == ( 1 << 2 ) ); + celt_assert( psEncC->useInterpolatedNLSFs == 1 || psEncC->indices.NLSFInterpCoef_Q2 == ( 1 << 2 ) ); /***********************/ /* Calculate mu values */ @@ -60,7 +60,7 @@ void silk_process_NLSFs( NLSF_mu_Q20 = silk_ADD_RSHIFT( NLSF_mu_Q20, NLSF_mu_Q20, 1 ); } - silk_assert( NLSF_mu_Q20 > 0 ); + celt_assert( NLSF_mu_Q20 > 0 ); silk_assert( NLSF_mu_Q20 <= SILK_FIX_CONST( 0.005, 20 ) ); /* Calculate NLSF weights */ @@ -89,7 +89,7 @@ void silk_process_NLSFs( NLSF_mu_Q20, psEncC->NLSF_MSVQ_Survivors, psEncC->indices.signalType ); /* Convert quantized NLSFs back to LPC coefficients */ - silk_NLSF2A( PredCoef_Q12[ 1 ], pNLSF_Q15, psEncC->predictLPCOrder ); + silk_NLSF2A( PredCoef_Q12[ 1 ], pNLSF_Q15, psEncC->predictLPCOrder, psEncC->arch ); if( doInterpolate ) { /* Calculate the interpolated, quantized LSF vector for the first half */ @@ -97,11 +97,11 @@ void silk_process_NLSFs( psEncC->indices.NLSFInterpCoef_Q2, psEncC->predictLPCOrder ); /* Convert back to LPC coefficients */ - silk_NLSF2A( PredCoef_Q12[ 0 ], pNLSF0_temp_Q15, psEncC->predictLPCOrder ); + silk_NLSF2A( PredCoef_Q12[ 0 ], pNLSF0_temp_Q15, psEncC->predictLPCOrder, psEncC->arch ); } else { /* Copy LPC coefficients for first half from second half */ - silk_assert( psEncC->predictLPCOrder <= MAX_LPC_ORDER ); + celt_assert( psEncC->predictLPCOrder <= MAX_LPC_ORDER ); silk_memcpy( PredCoef_Q12[ 0 ], PredCoef_Q12[ 1 ], psEncC->predictLPCOrder * sizeof( opus_int16 ) ); } } diff --git a/libs/libopus/silk/quant_LTP_gains.c b/libs/libopus/silk/quant_LTP_gains.c index 513a8c446..d6b8eff8d 100644 --- a/libs/libopus/silk/quant_LTP_gains.c +++ b/libs/libopus/silk/quant_LTP_gains.c @@ -33,14 +33,15 @@ POSSIBILITY OF SUCH DAMAGE. #include "tuning_parameters.h" void silk_quant_LTP_gains( - opus_int16 B_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* I/O (un)quantized LTP gains */ + opus_int16 B_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* O Quantized LTP gains */ opus_int8 cbk_index[ MAX_NB_SUBFR ], /* O Codebook Index */ opus_int8 *periodicity_index, /* O Periodicity Index */ opus_int32 *sum_log_gain_Q7, /* I/O Cumulative max prediction gain */ - const opus_int32 W_Q18[ MAX_NB_SUBFR*LTP_ORDER*LTP_ORDER ], /* I Error Weights in Q18 */ - opus_int mu_Q9, /* I Mu value (R/D tradeoff) */ - opus_int lowComplexity, /* I Flag for low complexity */ - const opus_int nb_subfr, /* I number of subframes */ + opus_int *pred_gain_dB_Q7, /* O LTP prediction gain */ + const opus_int32 XX_Q17[ MAX_NB_SUBFR*LTP_ORDER*LTP_ORDER ], /* I Correlation matrix in Q18 */ + const opus_int32 xX_Q17[ MAX_NB_SUBFR*LTP_ORDER ], /* I Correlation vector in Q18 */ + const opus_int subfr_len, /* I Number of samples per subframe */ + const opus_int nb_subfr, /* I Number of subframes */ int arch /* I Run-time architecture */ ) { @@ -49,16 +50,16 @@ void silk_quant_LTP_gains( const opus_uint8 *cl_ptr_Q5; const opus_int8 *cbk_ptr_Q7; const opus_uint8 *cbk_gain_ptr_Q7; - const opus_int16 *b_Q14_ptr; - const opus_int32 *W_Q18_ptr; - opus_int32 rate_dist_Q14_subfr, rate_dist_Q14, min_rate_dist_Q14; - opus_int32 sum_log_gain_tmp_Q7, best_sum_log_gain_Q7, max_gain_Q7, gain_Q7; + const opus_int32 *XX_Q17_ptr, *xX_Q17_ptr; + opus_int32 res_nrg_Q15_subfr, res_nrg_Q15, rate_dist_Q7_subfr, rate_dist_Q7, min_rate_dist_Q7; + opus_int32 sum_log_gain_tmp_Q7, best_sum_log_gain_Q7, max_gain_Q7; + opus_int gain_Q7; /***************************************************/ /* iterate over different codebooks with different */ /* rates/distortions, and choose best */ /***************************************************/ - min_rate_dist_Q14 = silk_int32_MAX; + min_rate_dist_Q7 = silk_int32_MAX; best_sum_log_gain_Q7 = 0; for( k = 0; k < 3; k++ ) { /* Safety margin for pitch gain control, to take into account factors @@ -70,53 +71,47 @@ void silk_quant_LTP_gains( cbk_gain_ptr_Q7 = silk_LTP_vq_gain_ptrs_Q7[ k ]; cbk_size = silk_LTP_vq_sizes[ k ]; - /* Set up pointer to first subframe */ - W_Q18_ptr = W_Q18; - b_Q14_ptr = B_Q14; + /* Set up pointers to first subframe */ + XX_Q17_ptr = XX_Q17; + xX_Q17_ptr = xX_Q17; - rate_dist_Q14 = 0; + res_nrg_Q15 = 0; + rate_dist_Q7 = 0; sum_log_gain_tmp_Q7 = *sum_log_gain_Q7; for( j = 0; j < nb_subfr; j++ ) { max_gain_Q7 = silk_log2lin( ( SILK_FIX_CONST( MAX_SUM_LOG_GAIN_DB / 6.0, 7 ) - sum_log_gain_tmp_Q7 ) + SILK_FIX_CONST( 7, 7 ) ) - gain_safety; - silk_VQ_WMat_EC( &temp_idx[ j ], /* O index of best codebook vector */ - &rate_dist_Q14_subfr, /* O best weighted quantization error + mu * rate */ + &res_nrg_Q15_subfr, /* O residual energy */ + &rate_dist_Q7_subfr, /* O best weighted quantization error + mu * rate */ &gain_Q7, /* O sum of absolute LTP coefficients */ - b_Q14_ptr, /* I input vector to be quantized */ - W_Q18_ptr, /* I weighting matrix */ + XX_Q17_ptr, /* I correlation matrix */ + xX_Q17_ptr, /* I correlation vector */ cbk_ptr_Q7, /* I codebook */ cbk_gain_ptr_Q7, /* I codebook effective gains */ cl_ptr_Q5, /* I code length for each codebook vector */ - mu_Q9, /* I tradeoff between weighted error and rate */ + subfr_len, /* I number of samples per subframe */ max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ cbk_size, /* I number of vectors in codebook */ arch /* I Run-time architecture */ ); - rate_dist_Q14 = silk_ADD_POS_SAT32( rate_dist_Q14, rate_dist_Q14_subfr ); + res_nrg_Q15 = silk_ADD_POS_SAT32( res_nrg_Q15, res_nrg_Q15_subfr ); + rate_dist_Q7 = silk_ADD_POS_SAT32( rate_dist_Q7, rate_dist_Q7_subfr ); sum_log_gain_tmp_Q7 = silk_max(0, sum_log_gain_tmp_Q7 + silk_lin2log( gain_safety + gain_Q7 ) - SILK_FIX_CONST( 7, 7 )); - b_Q14_ptr += LTP_ORDER; - W_Q18_ptr += LTP_ORDER * LTP_ORDER; + XX_Q17_ptr += LTP_ORDER * LTP_ORDER; + xX_Q17_ptr += LTP_ORDER; } - /* Avoid never finding a codebook */ - rate_dist_Q14 = silk_min( silk_int32_MAX - 1, rate_dist_Q14 ); - - if( rate_dist_Q14 < min_rate_dist_Q14 ) { - min_rate_dist_Q14 = rate_dist_Q14; + if( rate_dist_Q7 <= min_rate_dist_Q7 ) { + min_rate_dist_Q7 = rate_dist_Q7; *periodicity_index = (opus_int8)k; silk_memcpy( cbk_index, temp_idx, nb_subfr * sizeof( opus_int8 ) ); best_sum_log_gain_Q7 = sum_log_gain_tmp_Q7; } - - /* Break early in low-complexity mode if rate distortion is below threshold */ - if( lowComplexity && ( rate_dist_Q14 < silk_LTP_gain_middle_avg_RD_Q14 ) ) { - break; - } } cbk_ptr_Q7 = silk_LTP_vq_ptrs_Q7[ *periodicity_index ]; @@ -125,5 +120,13 @@ void silk_quant_LTP_gains( B_Q14[ j * LTP_ORDER + k ] = silk_LSHIFT( cbk_ptr_Q7[ cbk_index[ j ] * LTP_ORDER + k ], 7 ); } } + + if( nb_subfr == 2 ) { + res_nrg_Q15 = silk_RSHIFT32( res_nrg_Q15, 1 ); + } else { + res_nrg_Q15 = silk_RSHIFT32( res_nrg_Q15, 2 ); + } + *sum_log_gain_Q7 = best_sum_log_gain_Q7; + *pred_gain_dB_Q7 = (opus_int)silk_SMULBB( -3, silk_lin2log( res_nrg_Q15 ) - ( 15 << 7 ) ); } diff --git a/libs/libopus/silk/resampler.c b/libs/libopus/silk/resampler.c index 374fbb372..1f11e5089 100644 --- a/libs/libopus/silk/resampler.c +++ b/libs/libopus/silk/resampler.c @@ -91,14 +91,14 @@ opus_int silk_resampler_init( if( forEnc ) { if( ( Fs_Hz_in != 8000 && Fs_Hz_in != 12000 && Fs_Hz_in != 16000 && Fs_Hz_in != 24000 && Fs_Hz_in != 48000 ) || ( Fs_Hz_out != 8000 && Fs_Hz_out != 12000 && Fs_Hz_out != 16000 ) ) { - silk_assert( 0 ); + celt_assert( 0 ); return -1; } S->inputDelay = delay_matrix_enc[ rateID( Fs_Hz_in ) ][ rateID( Fs_Hz_out ) ]; } else { if( ( Fs_Hz_in != 8000 && Fs_Hz_in != 12000 && Fs_Hz_in != 16000 ) || ( Fs_Hz_out != 8000 && Fs_Hz_out != 12000 && Fs_Hz_out != 16000 && Fs_Hz_out != 24000 && Fs_Hz_out != 48000 ) ) { - silk_assert( 0 ); + celt_assert( 0 ); return -1; } S->inputDelay = delay_matrix_dec[ rateID( Fs_Hz_in ) ][ rateID( Fs_Hz_out ) ]; @@ -151,7 +151,7 @@ opus_int silk_resampler_init( S->Coefs = silk_Resampler_1_6_COEFS; } else { /* None available */ - silk_assert( 0 ); + celt_assert( 0 ); return -1; } } else { @@ -181,9 +181,9 @@ opus_int silk_resampler( opus_int nSamples; /* Need at least 1 ms of input data */ - silk_assert( inLen >= S->Fs_in_kHz ); + celt_assert( inLen >= S->Fs_in_kHz ); /* Delay can't exceed the 1 ms of buffering */ - silk_assert( S->inputDelay <= S->Fs_in_kHz ); + celt_assert( S->inputDelay <= S->Fs_in_kHz ); nSamples = S->Fs_in_kHz - S->inputDelay; diff --git a/libs/libopus/silk/resampler_down2.c b/libs/libopus/silk/resampler_down2.c index cec363464..971d7bfd4 100644 --- a/libs/libopus/silk/resampler_down2.c +++ b/libs/libopus/silk/resampler_down2.c @@ -43,8 +43,8 @@ void silk_resampler_down2( opus_int32 k, len2 = silk_RSHIFT32( inLen, 1 ); opus_int32 in32, out32, Y, X; - silk_assert( silk_resampler_down2_0 > 0 ); - silk_assert( silk_resampler_down2_1 < 0 ); + celt_assert( silk_resampler_down2_0 > 0 ); + celt_assert( silk_resampler_down2_1 < 0 ); /* Internal variables and state are in Q10 format */ for( k = 0; k < len2; k++ ) { diff --git a/libs/libopus/silk/resampler_private_down_FIR.c b/libs/libopus/silk/resampler_private_down_FIR.c index 783e42b35..3e8735a35 100644 --- a/libs/libopus/silk/resampler_private_down_FIR.c +++ b/libs/libopus/silk/resampler_private_down_FIR.c @@ -136,7 +136,7 @@ static OPUS_INLINE opus_int16 *silk_resampler_private_down_FIR_INTERPOL( } break; default: - silk_assert( 0 ); + celt_assert( 0 ); } return out; } diff --git a/libs/libopus/silk/sort.c b/libs/libopus/silk/sort.c index 7187c9efb..4fba16f83 100644 --- a/libs/libopus/silk/sort.c +++ b/libs/libopus/silk/sort.c @@ -48,9 +48,9 @@ void silk_insertion_sort_increasing( opus_int i, j; /* Safety checks */ - silk_assert( K > 0 ); - silk_assert( L > 0 ); - silk_assert( L >= K ); + celt_assert( K > 0 ); + celt_assert( L > 0 ); + celt_assert( L >= K ); /* Write start indices in index vector */ for( i = 0; i < K; i++ ) { @@ -96,9 +96,9 @@ void silk_insertion_sort_decreasing_int16( opus_int value; /* Safety checks */ - silk_assert( K > 0 ); - silk_assert( L > 0 ); - silk_assert( L >= K ); + celt_assert( K > 0 ); + celt_assert( L > 0 ); + celt_assert( L >= K ); /* Write start indices in index vector */ for( i = 0; i < K; i++ ) { @@ -141,7 +141,7 @@ void silk_insertion_sort_increasing_all_values_int16( opus_int i, j; /* Safety checks */ - silk_assert( L > 0 ); + celt_assert( L > 0 ); /* Sort vector elements by value, increasing order */ for( i = 1; i < L; i++ ) { diff --git a/libs/libopus/silk/stereo_LR_to_MS.c b/libs/libopus/silk/stereo_LR_to_MS.c index dda0298de..c8226663c 100644 --- a/libs/libopus/silk/stereo_LR_to_MS.c +++ b/libs/libopus/silk/stereo_LR_to_MS.c @@ -109,7 +109,7 @@ void silk_stereo_LR_to_MS( if( total_rate_bps < 1 ) { total_rate_bps = 1; } - min_mid_rate_bps = silk_SMLABB( 2000, fs_kHz, 900 ); + min_mid_rate_bps = silk_SMLABB( 2000, fs_kHz, 600 ); silk_assert( min_mid_rate_bps < 32767 ); /* Default bitrate distribution: 8 parts for Mid and (5+3*frac) parts for Side. so: mid_rate = ( 8 / ( 13 + 3 * frac ) ) * total_ rate */ frac_3_Q16 = silk_MUL( 3, frac_Q16 ); diff --git a/libs/libopus/silk/stereo_encode_pred.c b/libs/libopus/silk/stereo_encode_pred.c index e6dd19506..03becb673 100644 --- a/libs/libopus/silk/stereo_encode_pred.c +++ b/libs/libopus/silk/stereo_encode_pred.c @@ -41,11 +41,11 @@ void silk_stereo_encode_pred( /* Entropy coding */ n = 5 * ix[ 0 ][ 2 ] + ix[ 1 ][ 2 ]; - silk_assert( n < 25 ); + celt_assert( n < 25 ); ec_enc_icdf( psRangeEnc, n, silk_stereo_pred_joint_iCDF, 8 ); for( n = 0; n < 2; n++ ) { - silk_assert( ix[ n ][ 0 ] < 3 ); - silk_assert( ix[ n ][ 1 ] < STEREO_QUANT_SUB_STEPS ); + celt_assert( ix[ n ][ 0 ] < 3 ); + celt_assert( ix[ n ][ 1 ] < STEREO_QUANT_SUB_STEPS ); ec_enc_icdf( psRangeEnc, ix[ n ][ 0 ], silk_uniform3_iCDF, 8 ); ec_enc_icdf( psRangeEnc, ix[ n ][ 1 ], silk_uniform5_iCDF, 8 ); } diff --git a/libs/libopus/silk/structs.h b/libs/libopus/silk/structs.h index 827829dc6..3380c757b 100644 --- a/libs/libopus/silk/structs.h +++ b/libs/libopus/silk/structs.h @@ -48,6 +48,7 @@ typedef struct { opus_int32 sLPC_Q14[ MAX_SUB_FRAME_LENGTH + NSQ_LPC_BUF_LENGTH ]; opus_int32 sAR2_Q14[ MAX_SHAPE_LPC_ORDER ]; opus_int32 sLF_AR_shp_Q14; + opus_int32 sDiff_shp_Q14; opus_int lagPrev; opus_int sLTP_buf_idx; opus_int sLTP_shp_buf_idx; @@ -77,6 +78,7 @@ typedef struct { opus_int32 In_LP_State[ 2 ]; /* Low pass filter state */ opus_int32 transition_frame_no; /* Counter which is mapped to a cut-off frequency */ opus_int mode; /* Operating mode, <0: switch down, >0: switch up; 0: do nothing */ + opus_int32 saved_fs_kHz; /* If non-zero, holds the last sampling rate before a bandwidth switching reset. */ } silk_LP_state; /* Structure containing NLSF codebook */ @@ -86,6 +88,7 @@ typedef struct { const opus_int16 quantStepSize_Q16; const opus_int16 invQuantStepSize_Q6; const opus_uint8 *CB1_NLSF_Q8; + const opus_int16 *CB1_Wght_Q9; const opus_uint8 *CB1_iCDF; const opus_uint8 *pred_Q8; const opus_uint8 *ec_sel; @@ -169,8 +172,6 @@ typedef struct { opus_int pitchEstimationComplexity; /* Complexity level for pitch estimator */ opus_int pitchEstimationLPCOrder; /* Whitening filter order for pitch estimator */ opus_int32 pitchEstimationThreshold_Q16; /* Threshold for pitch estimator */ - opus_int LTPQuantLowComplexity; /* Flag for low complexity LTP quantization */ - opus_int mu_LTP_Q9; /* Rate-distortion tradeoff in LTP quantization */ opus_int32 sum_log_gain_Q7; /* Cumulative max prediction gain */ opus_int NLSF_MSVQ_Survivors; /* Number of survivors in NLSF MSVQ */ opus_int first_frame_after_reset; /* Flag for deactivating NLSF interpolation, pitch prediction */ @@ -301,6 +302,7 @@ typedef struct { /* Stuff used for PLC */ opus_int lossCnt; opus_int prevSignalType; + int arch; silk_PLC_struct sPLC; diff --git a/libs/libopus/silk/sum_sqr_shift.c b/libs/libopus/silk/sum_sqr_shift.c index 129df191d..4fd0c3d7d 100644 --- a/libs/libopus/silk/sum_sqr_shift.c +++ b/libs/libopus/silk/sum_sqr_shift.c @@ -41,43 +41,40 @@ void silk_sum_sqr_shift( ) { opus_int i, shft; - opus_int32 nrg_tmp, nrg; + opus_uint32 nrg_tmp; + opus_int32 nrg; - nrg = 0; - shft = 0; - len--; - for( i = 0; i < len; i += 2 ) { - nrg = silk_SMLABB_ovflw( nrg, x[ i ], x[ i ] ); - nrg = silk_SMLABB_ovflw( nrg, x[ i + 1 ], x[ i + 1 ] ); - if( nrg < 0 ) { - /* Scale down */ - nrg = (opus_int32)silk_RSHIFT_uint( (opus_uint32)nrg, 2 ); - shft = 2; - i+=2; - break; - } + /* Do a first run with the maximum shift we could have. */ + shft = 31-silk_CLZ32(len); + /* Let's be conservative with rounding and start with nrg=len. */ + nrg = len; + for( i = 0; i < len - 1; i += 2 ) { + nrg_tmp = silk_SMULBB( x[ i ], x[ i ] ); + nrg_tmp = silk_SMLABB_ovflw( nrg_tmp, x[ i + 1 ], x[ i + 1 ] ); + nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, nrg_tmp, shft ); } - for( ; i < len; i += 2 ) { + if( i < len ) { + /* One sample left to process */ + nrg_tmp = silk_SMULBB( x[ i ], x[ i ] ); + nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, nrg_tmp, shft ); + } + silk_assert( nrg >= 0 ); + /* Make sure the result will fit in a 32-bit signed integer with two bits + of headroom. */ + shft = silk_max_32(0, shft+3 - silk_CLZ32(nrg)); + nrg = 0; + for( i = 0 ; i < len - 1; i += 2 ) { nrg_tmp = silk_SMULBB( x[ i ], x[ i ] ); nrg_tmp = silk_SMLABB_ovflw( nrg_tmp, x[ i + 1 ], x[ i + 1 ] ); - nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, (opus_uint32)nrg_tmp, shft ); - if( nrg < 0 ) { - /* Scale down */ - nrg = (opus_int32)silk_RSHIFT_uint( (opus_uint32)nrg, 2 ); - shft += 2; - } + nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, nrg_tmp, shft ); } - if( i == len ) { + if( i < len ) { /* One sample left to process */ nrg_tmp = silk_SMULBB( x[ i ], x[ i ] ); nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, nrg_tmp, shft ); } - /* Make sure to have at least one extra leading zero (two leading zeros in total) */ - if( nrg & 0xC0000000 ) { - nrg = silk_RSHIFT_uint( (opus_uint32)nrg, 2 ); - shft += 2; - } + silk_assert( nrg >= 0 ); /* Output arguments */ *shift = shft; diff --git a/libs/libopus/silk/tables.h b/libs/libopus/silk/tables.h index 7fea6fda3..95230c451 100644 --- a/libs/libopus/silk/tables.h +++ b/libs/libopus/silk/tables.h @@ -76,10 +76,8 @@ extern const opus_uint8 silk_NLSF_EXT_iCDF[ 7 ]; extern const opus_uint8 silk_LTP_per_index_iCDF[ 3 ]; /* 3 */ extern const opus_uint8 * const silk_LTP_gain_iCDF_ptrs[ NB_LTP_CBKS ]; /* 3 */ extern const opus_uint8 * const silk_LTP_gain_BITS_Q5_ptrs[ NB_LTP_CBKS ]; /* 3 */ -extern const opus_int16 silk_LTP_gain_middle_avg_RD_Q14; extern const opus_int8 * const silk_LTP_vq_ptrs_Q7[ NB_LTP_CBKS ]; /* 168 */ extern const opus_uint8 * const silk_LTP_vq_gain_ptrs_Q7[NB_LTP_CBKS]; - extern const opus_int8 silk_LTP_vq_sizes[ NB_LTP_CBKS ]; /* 3 */ extern const opus_uint8 silk_LTPscale_iCDF[ 3 ]; /* 4 */ @@ -99,12 +97,6 @@ extern const opus_uint8 silk_NLSF_interpolation_factor_iCDF[ 5 ]; extern const silk_NLSF_CB_struct silk_NLSF_CB_WB; /* 1040 */ extern const silk_NLSF_CB_struct silk_NLSF_CB_NB_MB; /* 728 */ -/* Piece-wise linear mapping from bitrate in kbps to coding quality in dB SNR */ -extern const opus_int32 silk_TargetRate_table_NB[ TARGET_RATE_TAB_SZ ]; /* 32 */ -extern const opus_int32 silk_TargetRate_table_MB[ TARGET_RATE_TAB_SZ ]; /* 32 */ -extern const opus_int32 silk_TargetRate_table_WB[ TARGET_RATE_TAB_SZ ]; /* 32 */ -extern const opus_int16 silk_SNR_table_Q1[ TARGET_RATE_TAB_SZ ]; /* 32 */ - /* Quantization offsets */ extern const opus_int16 silk_Quantization_Offsets_Q10[ 2 ][ 2 ]; /* 8 */ diff --git a/libs/libopus/silk/tables_LTP.c b/libs/libopus/silk/tables_LTP.c index 0e6a0254d..5e12c8643 100644 --- a/libs/libopus/silk/tables_LTP.c +++ b/libs/libopus/silk/tables_LTP.c @@ -51,8 +51,6 @@ static const opus_uint8 silk_LTP_gain_iCDF_2[32] = { 24, 20, 16, 12, 9, 5, 2, 0 }; -const opus_int16 silk_LTP_gain_middle_avg_RD_Q14 = 12304; - static const opus_uint8 silk_LTP_gain_BITS_Q5_0[8] = { 15, 131, 138, 138, 155, 155, 173, 173 }; diff --git a/libs/libopus/silk/tables_NLSF_CB_NB_MB.c b/libs/libopus/silk/tables_NLSF_CB_NB_MB.c index 8c59d207a..195d5b95b 100644 --- a/libs/libopus/silk/tables_NLSF_CB_NB_MB.c +++ b/libs/libopus/silk/tables_NLSF_CB_NB_MB.c @@ -74,6 +74,41 @@ static const opus_uint8 silk_NLSF_CB1_NB_MB_Q8[ 320 ] = { 64, 84, 104, 118, 156, 177, 201, 230 }; +static const opus_int16 silk_NLSF_CB1_Wght_Q9[ 320 ] = { + 2897, 2314, 2314, 2314, 2287, 2287, 2314, 2300, 2327, 2287, + 2888, 2580, 2394, 2367, 2314, 2274, 2274, 2274, 2274, 2194, + 2487, 2340, 2340, 2314, 2314, 2314, 2340, 2340, 2367, 2354, + 3216, 2766, 2340, 2340, 2314, 2274, 2221, 2207, 2261, 2194, + 2460, 2474, 2367, 2394, 2394, 2394, 2394, 2367, 2407, 2314, + 3479, 3056, 2127, 2207, 2274, 2274, 2274, 2287, 2314, 2261, + 3282, 3141, 2580, 2394, 2247, 2221, 2207, 2194, 2194, 2114, + 4096, 3845, 2221, 2620, 2620, 2407, 2314, 2394, 2367, 2074, + 3178, 3244, 2367, 2221, 2553, 2434, 2340, 2314, 2167, 2221, + 3338, 3488, 2726, 2194, 2261, 2460, 2354, 2367, 2207, 2101, + 2354, 2420, 2327, 2367, 2394, 2420, 2420, 2420, 2460, 2367, + 3779, 3629, 2434, 2527, 2367, 2274, 2274, 2300, 2207, 2048, + 3254, 3225, 2713, 2846, 2447, 2327, 2300, 2300, 2274, 2127, + 3263, 3300, 2753, 2806, 2447, 2261, 2261, 2247, 2127, 2101, + 2873, 2981, 2633, 2367, 2407, 2354, 2194, 2247, 2247, 2114, + 3225, 3197, 2633, 2580, 2274, 2181, 2247, 2221, 2221, 2141, + 3178, 3310, 2740, 2407, 2274, 2274, 2274, 2287, 2194, 2114, + 3141, 3272, 2460, 2061, 2287, 2500, 2367, 2487, 2434, 2181, + 3507, 3282, 2314, 2700, 2647, 2474, 2367, 2394, 2340, 2127, + 3423, 3535, 3038, 3056, 2300, 1950, 2221, 2274, 2274, 2274, + 3404, 3366, 2087, 2687, 2873, 2354, 2420, 2274, 2474, 2540, + 3760, 3488, 1950, 2660, 2897, 2527, 2394, 2367, 2460, 2261, + 3028, 3272, 2740, 2888, 2740, 2154, 2127, 2287, 2234, 2247, + 3695, 3657, 2025, 1969, 2660, 2700, 2580, 2500, 2327, 2367, + 3207, 3413, 2354, 2074, 2888, 2888, 2340, 2487, 2247, 2167, + 3338, 3366, 2846, 2780, 2327, 2154, 2274, 2287, 2114, 2061, + 2327, 2300, 2181, 2167, 2181, 2367, 2633, 2700, 2700, 2553, + 2407, 2434, 2221, 2261, 2221, 2221, 2340, 2420, 2607, 2700, + 3038, 3244, 2806, 2888, 2474, 2074, 2300, 2314, 2354, 2380, + 2221, 2154, 2127, 2287, 2500, 2793, 2793, 2620, 2580, 2367, + 3676, 3713, 2234, 1838, 2181, 2753, 2726, 2673, 2513, 2207, + 2793, 3160, 2726, 2553, 2846, 2513, 2181, 2394, 2221, 2181 +}; + static const opus_uint8 silk_NLSF_CB1_iCDF_NB_MB[ 64 ] = { 212, 178, 148, 129, 108, 96, 85, 82, 79, 77, 61, 59, 57, 56, 51, 49, @@ -150,6 +185,7 @@ const silk_NLSF_CB_struct silk_NLSF_CB_NB_MB = SILK_FIX_CONST( 0.18, 16 ), SILK_FIX_CONST( 1.0 / 0.18, 6 ), silk_NLSF_CB1_NB_MB_Q8, + silk_NLSF_CB1_Wght_Q9, silk_NLSF_CB1_iCDF_NB_MB, silk_NLSF_PRED_NB_MB_Q8, silk_NLSF_CB2_SELECT_NB_MB, diff --git a/libs/libopus/silk/tables_NLSF_CB_WB.c b/libs/libopus/silk/tables_NLSF_CB_WB.c index 50af87eb2..5cc9f57bf 100644 --- a/libs/libopus/silk/tables_NLSF_CB_WB.c +++ b/libs/libopus/silk/tables_NLSF_CB_WB.c @@ -98,6 +98,41 @@ static const opus_uint8 silk_NLSF_CB1_WB_Q8[ 512 ] = { 110, 119, 129, 141, 175, 198, 218, 237 }; +static const opus_int16 silk_NLSF_CB1_WB_Wght_Q9[ 512 ] = { + 3657, 2925, 2925, 2925, 2925, 2925, 2925, 2925, 2925, 2925, 2925, 2925, 2963, 2963, 2925, 2846, + 3216, 3085, 2972, 3056, 3056, 3010, 3010, 3010, 2963, 2963, 3010, 2972, 2888, 2846, 2846, 2726, + 3920, 4014, 2981, 3207, 3207, 2934, 3056, 2846, 3122, 3244, 2925, 2846, 2620, 2553, 2780, 2925, + 3516, 3197, 3010, 3103, 3019, 2888, 2925, 2925, 2925, 2925, 2888, 2888, 2888, 2888, 2888, 2753, + 5054, 5054, 2934, 3573, 3385, 3056, 3085, 2793, 3160, 3160, 2972, 2846, 2513, 2540, 2753, 2888, + 4428, 4149, 2700, 2753, 2972, 3010, 2925, 2846, 2981, 3019, 2925, 2925, 2925, 2925, 2888, 2726, + 3620, 3019, 2972, 3056, 3056, 2873, 2806, 3056, 3216, 3047, 2981, 3291, 3291, 2981, 3310, 2991, + 5227, 5014, 2540, 3338, 3526, 3385, 3197, 3094, 3376, 2981, 2700, 2647, 2687, 2793, 2846, 2673, + 5081, 5174, 4615, 4428, 2460, 2897, 3047, 3207, 3169, 2687, 2740, 2888, 2846, 2793, 2846, 2700, + 3122, 2888, 2963, 2925, 2925, 2925, 2925, 2963, 2963, 2963, 2963, 2925, 2925, 2963, 2963, 2963, + 4202, 3207, 2981, 3103, 3010, 2888, 2888, 2925, 2972, 2873, 2916, 3019, 2972, 3010, 3197, 2873, + 3760, 3760, 3244, 3103, 2981, 2888, 2925, 2888, 2972, 2934, 2793, 2793, 2846, 2888, 2888, 2660, + 3854, 4014, 3207, 3122, 3244, 2934, 3047, 2963, 2963, 3085, 2846, 2793, 2793, 2793, 2793, 2580, + 3845, 4080, 3357, 3516, 3094, 2740, 3010, 2934, 3122, 3085, 2846, 2846, 2647, 2647, 2846, 2806, + 5147, 4894, 3225, 3845, 3441, 3169, 2897, 3413, 3451, 2700, 2580, 2673, 2740, 2846, 2806, 2753, + 4109, 3789, 3291, 3160, 2925, 2888, 2888, 2925, 2793, 2740, 2793, 2740, 2793, 2846, 2888, 2806, + 5081, 5054, 3047, 3545, 3244, 3056, 3085, 2944, 3103, 2897, 2740, 2740, 2740, 2846, 2793, 2620, + 4309, 4309, 2860, 2527, 3207, 3376, 3376, 3075, 3075, 3376, 3056, 2846, 2647, 2580, 2726, 2753, + 3056, 2916, 2806, 2888, 2740, 2687, 2897, 3103, 3150, 3150, 3216, 3169, 3056, 3010, 2963, 2846, + 4375, 3882, 2925, 2888, 2846, 2888, 2846, 2846, 2888, 2888, 2888, 2846, 2888, 2925, 2888, 2846, + 2981, 2916, 2916, 2981, 2981, 3056, 3122, 3216, 3150, 3056, 3010, 2972, 2972, 2972, 2925, 2740, + 4229, 4149, 3310, 3347, 2925, 2963, 2888, 2981, 2981, 2846, 2793, 2740, 2846, 2846, 2846, 2793, + 4080, 4014, 3103, 3010, 2925, 2925, 2925, 2888, 2925, 2925, 2846, 2846, 2846, 2793, 2888, 2780, + 4615, 4575, 3169, 3441, 3207, 2981, 2897, 3038, 3122, 2740, 2687, 2687, 2687, 2740, 2793, 2700, + 4149, 4269, 3789, 3657, 2726, 2780, 2888, 2888, 3010, 2972, 2925, 2846, 2687, 2687, 2793, 2888, + 4215, 3554, 2753, 2846, 2846, 2888, 2888, 2888, 2925, 2925, 2888, 2925, 2925, 2925, 2963, 2888, + 5174, 4921, 2261, 3432, 3789, 3479, 3347, 2846, 3310, 3479, 3150, 2897, 2460, 2487, 2753, 2925, + 3451, 3685, 3122, 3197, 3357, 3047, 3207, 3207, 2981, 3216, 3085, 2925, 2925, 2687, 2540, 2434, + 2981, 3010, 2793, 2793, 2740, 2793, 2846, 2972, 3056, 3103, 3150, 3150, 3150, 3103, 3010, 3010, + 2944, 2873, 2687, 2726, 2780, 3010, 3432, 3545, 3357, 3244, 3056, 3010, 2963, 2925, 2888, 2846, + 3019, 2944, 2897, 3010, 3010, 2972, 3019, 3103, 3056, 3056, 3010, 2888, 2846, 2925, 2925, 2888, + 3920, 3967, 3010, 3197, 3357, 3216, 3291, 3291, 3479, 3704, 3441, 2726, 2181, 2460, 2580, 2607 +}; + static const opus_uint8 silk_NLSF_CB1_iCDF_WB[ 64 ] = { 225, 204, 201, 184, 183, 175, 158, 154, 153, 135, 119, 115, 113, 110, 109, 99, @@ -188,6 +223,7 @@ const silk_NLSF_CB_struct silk_NLSF_CB_WB = SILK_FIX_CONST( 0.15, 16 ), SILK_FIX_CONST( 1.0 / 0.15, 6 ), silk_NLSF_CB1_WB_Q8, + silk_NLSF_CB1_WB_Wght_Q9, silk_NLSF_CB1_iCDF_WB, silk_NLSF_PRED_WB_Q8, silk_NLSF_CB2_SELECT_WB, diff --git a/libs/libopus/silk/tables_other.c b/libs/libopus/silk/tables_other.c index 398686bf2..e34d90777 100644 --- a/libs/libopus/silk/tables_other.c +++ b/libs/libopus/silk/tables_other.c @@ -38,20 +38,6 @@ extern "C" { #endif -/* Piece-wise linear mapping from bitrate in kbps to coding quality in dB SNR */ -const opus_int32 silk_TargetRate_table_NB[ TARGET_RATE_TAB_SZ ] = { - 0, 8000, 9400, 11500, 13500, 17500, 25000, MAX_TARGET_RATE_BPS -}; -const opus_int32 silk_TargetRate_table_MB[ TARGET_RATE_TAB_SZ ] = { - 0, 9000, 12000, 14500, 18500, 24500, 35500, MAX_TARGET_RATE_BPS -}; -const opus_int32 silk_TargetRate_table_WB[ TARGET_RATE_TAB_SZ ] = { - 0, 10500, 14000, 17000, 21500, 28500, 42000, MAX_TARGET_RATE_BPS -}; -const opus_int16 silk_SNR_table_Q1[ TARGET_RATE_TAB_SZ ] = { - 18, 29, 38, 40, 46, 52, 62, 84 -}; - /* Tables for stereo predictor coding */ const opus_int16 silk_stereo_pred_quant_Q13[ STEREO_QUANT_TAB_SIZE ] = { -13732, -10050, -8266, -7526, -6500, -5000, -2950, -820, diff --git a/libs/libopus/silk/tuning_parameters.h b/libs/libopus/silk/tuning_parameters.h index 5b8f40423..d70275fd8 100644 --- a/libs/libopus/silk/tuning_parameters.h +++ b/libs/libopus/silk/tuning_parameters.h @@ -53,19 +53,12 @@ extern "C" /* LPC analysis regularization */ #define FIND_LPC_COND_FAC 1e-5f -/* LTP analysis defines */ -#define FIND_LTP_COND_FAC 1e-5f -#define LTP_DAMPING 0.05f -#define LTP_SMOOTHING 0.1f - -/* LTP quantization settings */ -#define MU_LTP_QUANT_NB 0.03f -#define MU_LTP_QUANT_MB 0.025f -#define MU_LTP_QUANT_WB 0.02f - /* Max cumulative LTP gain */ #define MAX_SUM_LOG_GAIN_DB 250.0f +/* LTP analysis defines */ +#define LTP_CORR_INV_MAX 0.03f + /***********************/ /* High pass filtering */ /***********************/ @@ -103,25 +96,16 @@ extern "C" #define SPARSE_SNR_INCR_dB 2.0f /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */ -#define SPARSENESS_THRESHOLD_QNT_OFFSET 0.75f +#define ENERGY_VARIATION_THRESHOLD_QNT_OFFSET 0.6f /* warping control */ #define WARPING_MULTIPLIER 0.015f /* fraction added to first autocorrelation value */ -#define SHAPE_WHITE_NOISE_FRACTION 5e-5f +#define SHAPE_WHITE_NOISE_FRACTION 3e-5f /* noise shaping filter chirp factor */ -#define BANDWIDTH_EXPANSION 0.95f - -/* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */ -#define LOW_RATE_BANDWIDTH_EXPANSION_DELTA 0.01f - -/* extra harmonic boosting (signal shaping) at low bitrates */ -#define LOW_RATE_HARMONIC_BOOST 0.1f - -/* extra harmonic boosting (signal shaping) for noisy input signals */ -#define LOW_INPUT_QUALITY_HARMONIC_BOOST 0.1f +#define BANDWIDTH_EXPANSION 0.94f /* harmonic noise shaping */ #define HARMONIC_SHAPING 0.3f diff --git a/libs/libopus/silk/typedef.h b/libs/libopus/silk/typedef.h index 97b7e709b..793d2c0c1 100644 --- a/libs/libopus/silk/typedef.h +++ b/libs/libopus/silk/typedef.h @@ -67,6 +67,9 @@ __attribute__((noreturn)) static OPUS_INLINE void _silk_fatal(const char *str, const char *file, int line) { fprintf (stderr, "Fatal (internal) error in %s, line %d: %s\n", file, line, str); +#if defined(_MSC_VER) + _set_abort_behavior( 0, _WRITE_ABORT_MSG); +#endif abort(); } # define silk_assert(COND) {if (!(COND)) {silk_fatal("assertion failed: " #COND);}} diff --git a/libs/libopus/silk/x86/NSQ_del_dec_sse.c b/libs/libopus/silk/x86/NSQ_del_dec_sse.c deleted file mode 100644 index 21d4a8bc1..000000000 --- a/libs/libopus/silk/x86/NSQ_del_dec_sse.c +++ /dev/null @@ -1,857 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "main.h" -#include "celt/x86/x86cpu.h" - -#include "stack_alloc.h" - -typedef struct { - opus_int32 sLPC_Q14[ MAX_SUB_FRAME_LENGTH + NSQ_LPC_BUF_LENGTH ]; - opus_int32 RandState[ DECISION_DELAY ]; - opus_int32 Q_Q10[ DECISION_DELAY ]; - opus_int32 Xq_Q14[ DECISION_DELAY ]; - opus_int32 Pred_Q15[ DECISION_DELAY ]; - opus_int32 Shape_Q14[ DECISION_DELAY ]; - opus_int32 sAR2_Q14[ MAX_SHAPE_LPC_ORDER ]; - opus_int32 LF_AR_Q14; - opus_int32 Seed; - opus_int32 SeedInit; - opus_int32 RD_Q10; -} NSQ_del_dec_struct; - -typedef struct { - opus_int32 Q_Q10; - opus_int32 RD_Q10; - opus_int32 xq_Q14; - opus_int32 LF_AR_Q14; - opus_int32 sLTP_shp_Q14; - opus_int32 LPC_exc_Q14; -} NSQ_sample_struct; - -typedef NSQ_sample_struct NSQ_sample_pair[ 2 ]; - -static OPUS_INLINE void silk_nsq_del_dec_scale_states_sse4_1( - const silk_encoder_state *psEncC, /* I Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ - const opus_int32 x_Q3[], /* I Input in Q3 */ - opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ - const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ - opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ - opus_int subfr, /* I Subframe number */ - opus_int nStatesDelayedDecision, /* I Number of del dec states */ - const opus_int LTP_scale_Q14, /* I LTP state scaling */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ - const opus_int signal_type, /* I Signal type */ - const opus_int decisionDelay /* I Decision delay */ -); - -/******************************************/ -/* Noise shape quantizer for one subframe */ -/******************************************/ -static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_sse4_1( - silk_nsq_state *NSQ, /* I/O NSQ state */ - NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ - opus_int signalType, /* I Signal type */ - const opus_int32 x_Q10[], /* I */ - opus_int8 pulses[], /* O */ - opus_int16 xq[], /* O */ - opus_int32 sLTP_Q15[], /* I/O LTP filter state */ - opus_int32 delayedGain_Q10[], /* I/O Gain delay buffer */ - const opus_int16 a_Q12[], /* I Short term prediction coefs */ - const opus_int16 b_Q14[], /* I Long term prediction coefs */ - const opus_int16 AR_shp_Q13[], /* I Noise shaping coefs */ - opus_int lag, /* I Pitch lag */ - opus_int32 HarmShapeFIRPacked_Q14, /* I */ - opus_int Tilt_Q14, /* I Spectral tilt */ - opus_int32 LF_shp_Q14, /* I */ - opus_int32 Gain_Q16, /* I */ - opus_int Lambda_Q10, /* I */ - opus_int offset_Q10, /* I */ - opus_int length, /* I Input length */ - opus_int subfr, /* I Subframe number */ - opus_int shapingLPCOrder, /* I Shaping LPC filter order */ - opus_int predictLPCOrder, /* I Prediction filter order */ - opus_int warping_Q16, /* I */ - opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ - opus_int *smpl_buf_idx, /* I Index to newest samples in buffers */ - opus_int decisionDelay /* I */ -); - -void silk_NSQ_del_dec_sse4_1( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ -) -{ - opus_int i, k, lag, start_idx, LSF_interpolation_flag, Winner_ind, subfr; - opus_int last_smple_idx, smpl_buf_idx, decisionDelay; - const opus_int16 *A_Q12, *B_Q14, *AR_shp_Q13; - opus_int16 *pxq; - VARDECL( opus_int32, sLTP_Q15 ); - VARDECL( opus_int16, sLTP ); - opus_int32 HarmShapeFIRPacked_Q14; - opus_int offset_Q10; - opus_int32 RDmin_Q10, Gain_Q10; - VARDECL( opus_int32, x_sc_Q10 ); - VARDECL( opus_int32, delayedGain_Q10 ); - VARDECL( NSQ_del_dec_struct, psDelDec ); - NSQ_del_dec_struct *psDD; - SAVE_STACK; - - /* Set unvoiced lag to the previous one, overwrite later for voiced */ - lag = NSQ->lagPrev; - - silk_assert( NSQ->prev_gain_Q16 != 0 ); - - /* Initialize delayed decision states */ - ALLOC( psDelDec, psEncC->nStatesDelayedDecision, NSQ_del_dec_struct ); - silk_memset( psDelDec, 0, psEncC->nStatesDelayedDecision * sizeof( NSQ_del_dec_struct ) ); - for( k = 0; k < psEncC->nStatesDelayedDecision; k++ ) { - psDD = &psDelDec[ k ]; - psDD->Seed = ( k + psIndices->Seed ) & 3; - psDD->SeedInit = psDD->Seed; - psDD->RD_Q10 = 0; - psDD->LF_AR_Q14 = NSQ->sLF_AR_shp_Q14; - psDD->Shape_Q14[ 0 ] = NSQ->sLTP_shp_Q14[ psEncC->ltp_mem_length - 1 ]; - silk_memcpy( psDD->sLPC_Q14, NSQ->sLPC_Q14, NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); - silk_memcpy( psDD->sAR2_Q14, NSQ->sAR2_Q14, sizeof( NSQ->sAR2_Q14 ) ); - } - - offset_Q10 = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ]; - smpl_buf_idx = 0; /* index of oldest samples */ - - decisionDelay = silk_min_int( DECISION_DELAY, psEncC->subfr_length ); - - /* For voiced frames limit the decision delay to lower than the pitch lag */ - if( psIndices->signalType == TYPE_VOICED ) { - for( k = 0; k < psEncC->nb_subfr; k++ ) { - decisionDelay = silk_min_int( decisionDelay, pitchL[ k ] - LTP_ORDER / 2 - 1 ); - } - } else { - if( lag > 0 ) { - decisionDelay = silk_min_int( decisionDelay, lag - LTP_ORDER / 2 - 1 ); - } - } - - if( psIndices->NLSFInterpCoef_Q2 == 4 ) { - LSF_interpolation_flag = 0; - } else { - LSF_interpolation_flag = 1; - } - - ALLOC( sLTP_Q15, - psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); - ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); - ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); - ALLOC( delayedGain_Q10, DECISION_DELAY, opus_int32 ); - /* Set up pointers to start of sub frame */ - pxq = &NSQ->xq[ psEncC->ltp_mem_length ]; - NSQ->sLTP_shp_buf_idx = psEncC->ltp_mem_length; - NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; - subfr = 0; - for( k = 0; k < psEncC->nb_subfr; k++ ) { - A_Q12 = &PredCoef_Q12[ ( ( k >> 1 ) | ( 1 - LSF_interpolation_flag ) ) * MAX_LPC_ORDER ]; - B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; - AR_shp_Q13 = &AR2_Q13[ k * MAX_SHAPE_LPC_ORDER ]; - - /* Noise shape parameters */ - silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); - HarmShapeFIRPacked_Q14 = silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 ); - HarmShapeFIRPacked_Q14 |= silk_LSHIFT( (opus_int32)silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 ); - - NSQ->rewhite_flag = 0; - if( psIndices->signalType == TYPE_VOICED ) { - /* Voiced */ - lag = pitchL[ k ]; - - /* Re-whitening */ - if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) { - if( k == 2 ) { - /* RESET DELAYED DECISIONS */ - /* Find winner */ - RDmin_Q10 = psDelDec[ 0 ].RD_Q10; - Winner_ind = 0; - for( i = 1; i < psEncC->nStatesDelayedDecision; i++ ) { - if( psDelDec[ i ].RD_Q10 < RDmin_Q10 ) { - RDmin_Q10 = psDelDec[ i ].RD_Q10; - Winner_ind = i; - } - } - for( i = 0; i < psEncC->nStatesDelayedDecision; i++ ) { - if( i != Winner_ind ) { - psDelDec[ i ].RD_Q10 += ( silk_int32_MAX >> 4 ); - silk_assert( psDelDec[ i ].RD_Q10 >= 0 ); - } - } - - /* Copy final part of signals from winner state to output and long-term filter states */ - psDD = &psDelDec[ Winner_ind ]; - last_smple_idx = smpl_buf_idx + decisionDelay; - for( i = 0; i < decisionDelay; i++ ) { - last_smple_idx = ( last_smple_idx - 1 ) & DECISION_DELAY_MASK; - pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); - pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( - silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], Gains_Q16[ 1 ] ), 14 ) ); - NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDD->Shape_Q14[ last_smple_idx ]; - } - - subfr = 0; - } - - /* Rewhiten with new A coefs */ - start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; - silk_assert( start_idx > 0 ); - - silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], - A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); - - NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; - NSQ->rewhite_flag = 1; - } - } - - silk_nsq_del_dec_scale_states_sse4_1( psEncC, NSQ, psDelDec, x_Q3, x_sc_Q10, sLTP, sLTP_Q15, k, - psEncC->nStatesDelayedDecision, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType, decisionDelay ); - - silk_noise_shape_quantizer_del_dec_sse4_1( NSQ, psDelDec, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, - delayedGain_Q10, A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], - Gains_Q16[ k ], Lambda_Q10, offset_Q10, psEncC->subfr_length, subfr++, psEncC->shapingLPCOrder, - psEncC->predictLPCOrder, psEncC->warping_Q16, psEncC->nStatesDelayedDecision, &smpl_buf_idx, decisionDelay ); - - x_Q3 += psEncC->subfr_length; - pulses += psEncC->subfr_length; - pxq += psEncC->subfr_length; - } - - /* Find winner */ - RDmin_Q10 = psDelDec[ 0 ].RD_Q10; - Winner_ind = 0; - for( k = 1; k < psEncC->nStatesDelayedDecision; k++ ) { - if( psDelDec[ k ].RD_Q10 < RDmin_Q10 ) { - RDmin_Q10 = psDelDec[ k ].RD_Q10; - Winner_ind = k; - } - } - - /* Copy final part of signals from winner state to output and long-term filter states */ - psDD = &psDelDec[ Winner_ind ]; - psIndices->Seed = psDD->SeedInit; - last_smple_idx = smpl_buf_idx + decisionDelay; - Gain_Q10 = silk_RSHIFT32( Gains_Q16[ psEncC->nb_subfr - 1 ], 6 ); - for( i = 0; i < decisionDelay; i++ ) { - last_smple_idx = ( last_smple_idx - 1 ) & DECISION_DELAY_MASK; - pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); - pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( - silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], Gain_Q10 ), 8 ) ); - NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDD->Shape_Q14[ last_smple_idx ]; - } - silk_memcpy( NSQ->sLPC_Q14, &psDD->sLPC_Q14[ psEncC->subfr_length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); - silk_memcpy( NSQ->sAR2_Q14, psDD->sAR2_Q14, sizeof( psDD->sAR2_Q14 ) ); - - /* Update states */ - NSQ->sLF_AR_shp_Q14 = psDD->LF_AR_Q14; - NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; - - /* Save quantized speech signal */ - /* DEBUG_STORE_DATA( enc.pcm, &NSQ->xq[psEncC->ltp_mem_length], psEncC->frame_length * sizeof( opus_int16 ) ) */ - silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); - silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); - RESTORE_STACK; -} - -/******************************************/ -/* Noise shape quantizer for one subframe */ -/******************************************/ -static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_sse4_1( - silk_nsq_state *NSQ, /* I/O NSQ state */ - NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ - opus_int signalType, /* I Signal type */ - const opus_int32 x_Q10[], /* I */ - opus_int8 pulses[], /* O */ - opus_int16 xq[], /* O */ - opus_int32 sLTP_Q15[], /* I/O LTP filter state */ - opus_int32 delayedGain_Q10[], /* I/O Gain delay buffer */ - const opus_int16 a_Q12[], /* I Short term prediction coefs */ - const opus_int16 b_Q14[], /* I Long term prediction coefs */ - const opus_int16 AR_shp_Q13[], /* I Noise shaping coefs */ - opus_int lag, /* I Pitch lag */ - opus_int32 HarmShapeFIRPacked_Q14, /* I */ - opus_int Tilt_Q14, /* I Spectral tilt */ - opus_int32 LF_shp_Q14, /* I */ - opus_int32 Gain_Q16, /* I */ - opus_int Lambda_Q10, /* I */ - opus_int offset_Q10, /* I */ - opus_int length, /* I Input length */ - opus_int subfr, /* I Subframe number */ - opus_int shapingLPCOrder, /* I Shaping LPC filter order */ - opus_int predictLPCOrder, /* I Prediction filter order */ - opus_int warping_Q16, /* I */ - opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ - opus_int *smpl_buf_idx, /* I Index to newest samples in buffers */ - opus_int decisionDelay /* I */ -) -{ - opus_int i, j, k, Winner_ind, RDmin_ind, RDmax_ind, last_smple_idx; - opus_int32 Winner_rand_state; - opus_int32 LTP_pred_Q14, LPC_pred_Q14, n_AR_Q14, n_LTP_Q14; - opus_int32 n_LF_Q14, r_Q10, rr_Q10, rd1_Q10, rd2_Q10, RDmin_Q10, RDmax_Q10; - opus_int32 q1_Q0, q1_Q10, q2_Q10, exc_Q14, LPC_exc_Q14, xq_Q14, Gain_Q10; - opus_int32 tmp1, tmp2, sLF_AR_shp_Q14; - opus_int32 *pred_lag_ptr, *shp_lag_ptr, *psLPC_Q14; - VARDECL( NSQ_sample_pair, psSampleState ); - NSQ_del_dec_struct *psDD; - NSQ_sample_struct *psSS; - - __m128i a_Q12_0123, a_Q12_4567, a_Q12_89AB, a_Q12_CDEF; - __m128i b_Q12_0123, b_sr_Q12_0123; - SAVE_STACK; - - silk_assert( nStatesDelayedDecision > 0 ); - ALLOC( psSampleState, nStatesDelayedDecision, NSQ_sample_pair ); - - shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ]; - pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; - Gain_Q10 = silk_RSHIFT( Gain_Q16, 6 ); - - a_Q12_0123 = OP_CVTEPI16_EPI32_M64( a_Q12 ); - a_Q12_4567 = OP_CVTEPI16_EPI32_M64( a_Q12 + 4 ); - - if( opus_likely( predictLPCOrder == 16 ) ) { - a_Q12_89AB = OP_CVTEPI16_EPI32_M64( a_Q12 + 8 ); - a_Q12_CDEF = OP_CVTEPI16_EPI32_M64( a_Q12 + 12 ); - } - - if( signalType == TYPE_VOICED ){ - b_Q12_0123 = OP_CVTEPI16_EPI32_M64( b_Q14 ); - b_sr_Q12_0123 = _mm_shuffle_epi32( b_Q12_0123, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - } - for( i = 0; i < length; i++ ) { - /* Perform common calculations used in all states */ - - /* Long-term prediction */ - if( signalType == TYPE_VOICED ) { - /* Unrolled loop */ - /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ - LTP_pred_Q14 = 2; - { - __m128i tmpa, tmpb, pred_lag_ptr_tmp; - pred_lag_ptr_tmp = _mm_loadu_si128( (__m128i *)(&pred_lag_ptr[ -3 ] ) ); - pred_lag_ptr_tmp = _mm_shuffle_epi32( pred_lag_ptr_tmp, 0x1B ); - tmpa = _mm_mul_epi32( pred_lag_ptr_tmp, b_Q12_0123 ); - tmpa = _mm_srli_si128( tmpa, 2 ); - - pred_lag_ptr_tmp = _mm_shuffle_epi32( pred_lag_ptr_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) );/* equal shift right 4 bytes */ - pred_lag_ptr_tmp = _mm_mul_epi32( pred_lag_ptr_tmp, b_sr_Q12_0123 ); - pred_lag_ptr_tmp = _mm_srli_si128( pred_lag_ptr_tmp, 2 ); - pred_lag_ptr_tmp = _mm_add_epi32( pred_lag_ptr_tmp, tmpa ); - - tmpb = _mm_shuffle_epi32( pred_lag_ptr_tmp, _MM_SHUFFLE( 0, 0, 3, 2 ) );/* equal shift right 8 bytes */ - pred_lag_ptr_tmp = _mm_add_epi32( pred_lag_ptr_tmp, tmpb ); - LTP_pred_Q14 += _mm_cvtsi128_si32( pred_lag_ptr_tmp ); - - LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); - LTP_pred_Q14 = silk_LSHIFT( LTP_pred_Q14, 1 ); /* Q13 -> Q14 */ - pred_lag_ptr++; - } - } else { - LTP_pred_Q14 = 0; - } - - /* Long-term shaping */ - if( lag > 0 ) { - /* Symmetric, packed FIR coefficients */ - n_LTP_Q14 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); - n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); - n_LTP_Q14 = silk_SUB_LSHIFT32( LTP_pred_Q14, n_LTP_Q14, 2 ); /* Q12 -> Q14 */ - shp_lag_ptr++; - } else { - n_LTP_Q14 = 0; - } - { - __m128i tmpa, tmpb, psLPC_Q14_tmp, a_Q12_tmp; - - for( k = 0; k < nStatesDelayedDecision; k++ ) { - /* Delayed decision state */ - psDD = &psDelDec[ k ]; - - /* Sample state */ - psSS = psSampleState[ k ]; - - /* Generate dither */ - psDD->Seed = silk_RAND( psDD->Seed ); - - /* Pointer used in short term prediction and shaping */ - psLPC_Q14 = &psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 + i ]; - /* Short-term prediction */ - silk_assert( predictLPCOrder == 10 || predictLPCOrder == 16 ); - /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ - LPC_pred_Q14 = silk_RSHIFT( predictLPCOrder, 1 ); - - tmpb = _mm_setzero_si128(); - - /* step 1 */ - psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -3 ] ) ); /* -3, -2 , -1, 0 */ - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); /* 0, -1, -2, -3 */ - tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_0123 ); /* 0, -1, -2, -3 * 0123 -> 0*0, 2*-2 */ - - tmpa = _mm_srli_epi64( tmpa, 16 ); - tmpb = _mm_add_epi32( tmpb, tmpa ); - - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - a_Q12_tmp = _mm_shuffle_epi32( a_Q12_0123, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); /* 1*-1, 3*-3 */ - psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); - tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); - - /* step 2 */ - psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -7 ] ) ); - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); - tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_4567 ); - tmpa = _mm_srli_epi64( tmpa, 16 ); - tmpb = _mm_add_epi32( tmpb, tmpa ); - - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - a_Q12_tmp = _mm_shuffle_epi32( a_Q12_4567, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); - psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); - tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); - - if ( opus_likely( predictLPCOrder == 16 ) ) - { - /* step 3 */ - psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -11 ] ) ); - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); - tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_89AB ); - tmpa = _mm_srli_epi64( tmpa, 16 ); - tmpb = _mm_add_epi32( tmpb, tmpa ); - - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - a_Q12_tmp = _mm_shuffle_epi32( a_Q12_89AB, _MM_SHUFFLE(0, 3, 2, 1 ) );/* equal shift right 4 bytes */ - psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); - psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); - tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); - - /* setp 4 */ - psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -15 ] ) ); - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); - tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_CDEF ); - tmpa = _mm_srli_epi64( tmpa, 16 ); - tmpb = _mm_add_epi32( tmpb, tmpa ); - - psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - a_Q12_tmp = _mm_shuffle_epi32( a_Q12_CDEF, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ - psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); - psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); - tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); - - /* add at last */ - /* equal shift right 8 bytes*/ - tmpa = _mm_shuffle_epi32( tmpb, _MM_SHUFFLE( 0, 0, 3, 2 ) ); - tmpb = _mm_add_epi32( tmpb, tmpa ); - LPC_pred_Q14 += _mm_cvtsi128_si32( tmpb ); - } - else - { - /* add at last */ - tmpa = _mm_shuffle_epi32( tmpb, _MM_SHUFFLE( 0, 0, 3, 2 ) ); /* equal shift right 8 bytes*/ - tmpb = _mm_add_epi32( tmpb, tmpa ); - LPC_pred_Q14 += _mm_cvtsi128_si32( tmpb ); - - LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -8 ], a_Q12[ 8 ] ); - LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -9 ], a_Q12[ 9 ] ); - } - - LPC_pred_Q14 = silk_LSHIFT( LPC_pred_Q14, 4 ); /* Q10 -> Q14 */ - - /* Noise shape feedback */ - silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ - /* Output of lowpass section */ - tmp2 = silk_SMLAWB( psLPC_Q14[ 0 ], psDD->sAR2_Q14[ 0 ], warping_Q16 ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 ); - psDD->sAR2_Q14[ 0 ] = tmp2; - n_AR_Q14 = silk_RSHIFT( shapingLPCOrder, 1 ); - n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ 0 ] ); - /* Loop over allpass sections */ - for( j = 2; j < shapingLPCOrder; j += 2 ) { - /* Output of allpass section */ - tmp2 = silk_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 ); - psDD->sAR2_Q14[ j - 1 ] = tmp1; - n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ j - 1 ] ); - /* Output of allpass section */ - tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 ); - psDD->sAR2_Q14[ j + 0 ] = tmp2; - n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ j ] ); - } - psDD->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1; - n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] ); - - n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 1 ); /* Q11 -> Q12 */ - n_AR_Q14 = silk_SMLAWB( n_AR_Q14, psDD->LF_AR_Q14, Tilt_Q14 ); /* Q12 */ - n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 2 ); /* Q12 -> Q14 */ - - n_LF_Q14 = silk_SMULWB( psDD->Shape_Q14[ *smpl_buf_idx ], LF_shp_Q14 ); /* Q12 */ - n_LF_Q14 = silk_SMLAWT( n_LF_Q14, psDD->LF_AR_Q14, LF_shp_Q14 ); /* Q12 */ - n_LF_Q14 = silk_LSHIFT( n_LF_Q14, 2 ); /* Q12 -> Q14 */ - - /* Input minus prediction plus noise feedback */ - /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP */ - tmp1 = silk_ADD32( n_AR_Q14, n_LF_Q14 ); /* Q14 */ - tmp2 = silk_ADD32( n_LTP_Q14, LPC_pred_Q14 ); /* Q13 */ - tmp1 = silk_SUB32( tmp2, tmp1 ); /* Q13 */ - tmp1 = silk_RSHIFT_ROUND( tmp1, 4 ); /* Q10 */ - - r_Q10 = silk_SUB32( x_Q10[ i ], tmp1 ); /* residual error Q10 */ - - /* Flip sign depending on dither */ - if ( psDD->Seed < 0 ) { - r_Q10 = -r_Q10; - } - r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); - - /* Find two quantization level candidates and measure their rate-distortion */ - q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); - q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); - if( q1_Q0 > 0 ) { - q1_Q10 = silk_SUB32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); - q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); - q2_Q10 = silk_ADD32( q1_Q10, 1024 ); - rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); - rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); - } else if( q1_Q0 == 0 ) { - q1_Q10 = offset_Q10; - q2_Q10 = silk_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); - rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); - rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); - } else if( q1_Q0 == -1 ) { - q2_Q10 = offset_Q10; - q1_Q10 = silk_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); - rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); - rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); - } else { /* q1_Q0 < -1 */ - q1_Q10 = silk_ADD32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); - q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); - q2_Q10 = silk_ADD32( q1_Q10, 1024 ); - rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); - rd2_Q10 = silk_SMULBB( -q2_Q10, Lambda_Q10 ); - } - rr_Q10 = silk_SUB32( r_Q10, q1_Q10 ); - rd1_Q10 = silk_RSHIFT( silk_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 ); - rr_Q10 = silk_SUB32( r_Q10, q2_Q10 ); - rd2_Q10 = silk_RSHIFT( silk_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 ); - - if( rd1_Q10 < rd2_Q10 ) { - psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); - psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); - psSS[ 0 ].Q_Q10 = q1_Q10; - psSS[ 1 ].Q_Q10 = q2_Q10; - } else { - psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); - psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); - psSS[ 0 ].Q_Q10 = q2_Q10; - psSS[ 1 ].Q_Q10 = q1_Q10; - } - - /* Update states for best quantization */ - - /* Quantized excitation */ - exc_Q14 = silk_LSHIFT32( psSS[ 0 ].Q_Q10, 4 ); - if ( psDD->Seed < 0 ) { - exc_Q14 = -exc_Q14; - } - - /* Add predictions */ - LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); - xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); - - /* Update states */ - sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); - psSS[ 0 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); - psSS[ 0 ].LF_AR_Q14 = sLF_AR_shp_Q14; - psSS[ 0 ].LPC_exc_Q14 = LPC_exc_Q14; - psSS[ 0 ].xq_Q14 = xq_Q14; - - /* Update states for second best quantization */ - - /* Quantized excitation */ - exc_Q14 = silk_LSHIFT32( psSS[ 1 ].Q_Q10, 4 ); - if ( psDD->Seed < 0 ) { - exc_Q14 = -exc_Q14; - } - - - /* Add predictions */ - LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); - xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); - - /* Update states */ - sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); - psSS[ 1 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); - psSS[ 1 ].LF_AR_Q14 = sLF_AR_shp_Q14; - psSS[ 1 ].LPC_exc_Q14 = LPC_exc_Q14; - psSS[ 1 ].xq_Q14 = xq_Q14; - } - } - *smpl_buf_idx = ( *smpl_buf_idx - 1 ) & DECISION_DELAY_MASK; /* Index to newest samples */ - last_smple_idx = ( *smpl_buf_idx + decisionDelay ) & DECISION_DELAY_MASK; /* Index to decisionDelay old samples */ - - /* Find winner */ - RDmin_Q10 = psSampleState[ 0 ][ 0 ].RD_Q10; - Winner_ind = 0; - for( k = 1; k < nStatesDelayedDecision; k++ ) { - if( psSampleState[ k ][ 0 ].RD_Q10 < RDmin_Q10 ) { - RDmin_Q10 = psSampleState[ k ][ 0 ].RD_Q10; - Winner_ind = k; - } - } - - /* Increase RD values of expired states */ - Winner_rand_state = psDelDec[ Winner_ind ].RandState[ last_smple_idx ]; - for( k = 0; k < nStatesDelayedDecision; k++ ) { - if( psDelDec[ k ].RandState[ last_smple_idx ] != Winner_rand_state ) { - psSampleState[ k ][ 0 ].RD_Q10 = silk_ADD32( psSampleState[ k ][ 0 ].RD_Q10, silk_int32_MAX >> 4 ); - psSampleState[ k ][ 1 ].RD_Q10 = silk_ADD32( psSampleState[ k ][ 1 ].RD_Q10, silk_int32_MAX >> 4 ); - silk_assert( psSampleState[ k ][ 0 ].RD_Q10 >= 0 ); - } - } - - /* Find worst in first set and best in second set */ - RDmax_Q10 = psSampleState[ 0 ][ 0 ].RD_Q10; - RDmin_Q10 = psSampleState[ 0 ][ 1 ].RD_Q10; - RDmax_ind = 0; - RDmin_ind = 0; - for( k = 1; k < nStatesDelayedDecision; k++ ) { - /* find worst in first set */ - if( psSampleState[ k ][ 0 ].RD_Q10 > RDmax_Q10 ) { - RDmax_Q10 = psSampleState[ k ][ 0 ].RD_Q10; - RDmax_ind = k; - } - /* find best in second set */ - if( psSampleState[ k ][ 1 ].RD_Q10 < RDmin_Q10 ) { - RDmin_Q10 = psSampleState[ k ][ 1 ].RD_Q10; - RDmin_ind = k; - } - } - - /* Replace a state if best from second set outperforms worst in first set */ - if( RDmin_Q10 < RDmax_Q10 ) { - silk_memcpy( ( (opus_int32 *)&psDelDec[ RDmax_ind ] ) + i, - ( (opus_int32 *)&psDelDec[ RDmin_ind ] ) + i, sizeof( NSQ_del_dec_struct ) - i * sizeof( opus_int32) ); - silk_memcpy( &psSampleState[ RDmax_ind ][ 0 ], &psSampleState[ RDmin_ind ][ 1 ], sizeof( NSQ_sample_struct ) ); - } - - /* Write samples from winner to output and long-term filter states */ - psDD = &psDelDec[ Winner_ind ]; - if( subfr > 0 || i >= decisionDelay ) { - pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); - xq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( - silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], delayedGain_Q10[ last_smple_idx ] ), 8 ) ); - NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay ] = psDD->Shape_Q14[ last_smple_idx ]; - sLTP_Q15[ NSQ->sLTP_buf_idx - decisionDelay ] = psDD->Pred_Q15[ last_smple_idx ]; - } - NSQ->sLTP_shp_buf_idx++; - NSQ->sLTP_buf_idx++; - - /* Update states */ - for( k = 0; k < nStatesDelayedDecision; k++ ) { - psDD = &psDelDec[ k ]; - psSS = &psSampleState[ k ][ 0 ]; - psDD->LF_AR_Q14 = psSS->LF_AR_Q14; - psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH + i ] = psSS->xq_Q14; - psDD->Xq_Q14[ *smpl_buf_idx ] = psSS->xq_Q14; - psDD->Q_Q10[ *smpl_buf_idx ] = psSS->Q_Q10; - psDD->Pred_Q15[ *smpl_buf_idx ] = silk_LSHIFT32( psSS->LPC_exc_Q14, 1 ); - psDD->Shape_Q14[ *smpl_buf_idx ] = psSS->sLTP_shp_Q14; - psDD->Seed = silk_ADD32_ovflw( psDD->Seed, silk_RSHIFT_ROUND( psSS->Q_Q10, 10 ) ); - psDD->RandState[ *smpl_buf_idx ] = psDD->Seed; - psDD->RD_Q10 = psSS->RD_Q10; - } - delayedGain_Q10[ *smpl_buf_idx ] = Gain_Q10; - } - /* Update LPC states */ - for( k = 0; k < nStatesDelayedDecision; k++ ) { - psDD = &psDelDec[ k ]; - silk_memcpy( psDD->sLPC_Q14, &psDD->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); - } - RESTORE_STACK; -} - -static OPUS_INLINE void silk_nsq_del_dec_scale_states_sse4_1( - const silk_encoder_state *psEncC, /* I Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ - const opus_int32 x_Q3[], /* I Input in Q3 */ - opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ - const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ - opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ - opus_int subfr, /* I Subframe number */ - opus_int nStatesDelayedDecision, /* I Number of del dec states */ - const opus_int LTP_scale_Q14, /* I LTP state scaling */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ - const opus_int signal_type, /* I Signal type */ - const opus_int decisionDelay /* I Decision delay */ -) -{ - opus_int i, k, lag; - opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q23; - NSQ_del_dec_struct *psDD; - __m128i xmm_inv_gain_Q23, xmm_x_Q3_x2x0, xmm_x_Q3_x3x1; - - lag = pitchL[ subfr ]; - inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); - - silk_assert( inv_gain_Q31 != 0 ); - - /* Calculate gain adjustment factor */ - if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { - gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); - } else { - gain_adj_Q16 = (opus_int32)1 << 16; - } - - /* Scale input */ - inv_gain_Q23 = silk_RSHIFT_ROUND( inv_gain_Q31, 8 ); - - /* prepare inv_gain_Q23 in packed 4 32-bits */ - xmm_inv_gain_Q23 = _mm_set1_epi32(inv_gain_Q23); - - for( i = 0; i < psEncC->subfr_length - 3; i += 4 ) { - xmm_x_Q3_x2x0 = _mm_loadu_si128( (__m128i *)(&(x_Q3[ i ] ) ) ); - /* equal shift right 4 bytes*/ - xmm_x_Q3_x3x1 = _mm_shuffle_epi32( xmm_x_Q3_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - - xmm_x_Q3_x2x0 = _mm_mul_epi32( xmm_x_Q3_x2x0, xmm_inv_gain_Q23 ); - xmm_x_Q3_x3x1 = _mm_mul_epi32( xmm_x_Q3_x3x1, xmm_inv_gain_Q23 ); - - xmm_x_Q3_x2x0 = _mm_srli_epi64( xmm_x_Q3_x2x0, 16 ); - xmm_x_Q3_x3x1 = _mm_slli_epi64( xmm_x_Q3_x3x1, 16 ); - - xmm_x_Q3_x2x0 = _mm_blend_epi16( xmm_x_Q3_x2x0, xmm_x_Q3_x3x1, 0xCC ); - - _mm_storeu_si128( (__m128i *)(&(x_sc_Q10[ i ])), xmm_x_Q3_x2x0 ); - } - - for( ; i < psEncC->subfr_length; i++ ) { - x_sc_Q10[ i ] = silk_SMULWW( x_Q3[ i ], inv_gain_Q23 ); - } - - /* Save inverse gain */ - NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; - - /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ - if( NSQ->rewhite_flag ) { - if( subfr == 0 ) { - /* Do LTP downscaling */ - inv_gain_Q31 = silk_LSHIFT( silk_SMULWB( inv_gain_Q31, LTP_scale_Q14 ), 2 ); - } - for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) { - silk_assert( i < MAX_FRAME_LENGTH ); - sLTP_Q15[ i ] = silk_SMULWB( inv_gain_Q31, sLTP[ i ] ); - } - } - - /* Adjust for changing gain */ - if( gain_adj_Q16 != (opus_int32)1 << 16 ) { - /* Scale long-term shaping state */ - { - __m128i xmm_gain_adj_Q16, xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1; - - /* prepare gain_adj_Q16 in packed 4 32-bits */ - xmm_gain_adj_Q16 = _mm_set1_epi32( gain_adj_Q16 ); - - for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 3; i += 4 ) - { - xmm_sLTP_shp_Q14_x2x0 = _mm_loadu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ) ); - /* equal shift right 4 bytes*/ - xmm_sLTP_shp_Q14_x3x1 = _mm_shuffle_epi32( xmm_sLTP_shp_Q14_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - - xmm_sLTP_shp_Q14_x2x0 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x2x0, xmm_gain_adj_Q16 ); - xmm_sLTP_shp_Q14_x3x1 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x3x1, xmm_gain_adj_Q16 ); - - xmm_sLTP_shp_Q14_x2x0 = _mm_srli_epi64( xmm_sLTP_shp_Q14_x2x0, 16 ); - xmm_sLTP_shp_Q14_x3x1 = _mm_slli_epi64( xmm_sLTP_shp_Q14_x3x1, 16 ); - - xmm_sLTP_shp_Q14_x2x0 = _mm_blend_epi16( xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1, 0xCC ); - - _mm_storeu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ), xmm_sLTP_shp_Q14_x2x0 ); - } - - for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { - NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); - } - - /* Scale long-term prediction state */ - if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { - for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx - decisionDelay; i++ ) { - sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); - } - } - - for( k = 0; k < nStatesDelayedDecision; k++ ) { - psDD = &psDelDec[ k ]; - - /* Scale scalar states */ - psDD->LF_AR_Q14 = silk_SMULWW( gain_adj_Q16, psDD->LF_AR_Q14 ); - - /* Scale short-term prediction and shaping states */ - for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { - psDD->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sLPC_Q14[ i ] ); - } - for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { - psDD->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sAR2_Q14[ i ] ); - } - for( i = 0; i < DECISION_DELAY; i++ ) { - psDD->Pred_Q15[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Pred_Q15[ i ] ); - psDD->Shape_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Shape_Q14[ i ] ); - } - } - } - } -} diff --git a/libs/libopus/silk/x86/NSQ_del_dec_sse4_1.c b/libs/libopus/silk/x86/NSQ_del_dec_sse4_1.c new file mode 100644 index 000000000..42735c528 --- /dev/null +++ b/libs/libopus/silk/x86/NSQ_del_dec_sse4_1.c @@ -0,0 +1,914 @@ +/* Copyright (c) 2014-2020, Cisco Systems, INC + Written by XiangMingZhu WeiZhou MinPeng YanWang FrancisQuiers + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include +#include "main.h" +#include "celt/x86/x86cpu.h" + +#include "stack_alloc.h" + +typedef struct { + opus_int32 sLPC_Q14[ MAX_SUB_FRAME_LENGTH + NSQ_LPC_BUF_LENGTH ]; + opus_int32 RandState[ DECISION_DELAY ]; + opus_int32 Q_Q10[ DECISION_DELAY ]; + opus_int32 Xq_Q14[ DECISION_DELAY ]; + opus_int32 Pred_Q15[ DECISION_DELAY ]; + opus_int32 Shape_Q14[ DECISION_DELAY ]; + opus_int32 sAR2_Q14[ MAX_SHAPE_LPC_ORDER ]; + opus_int32 LF_AR_Q14; + opus_int32 Diff_Q14; + opus_int32 Seed; + opus_int32 SeedInit; + opus_int32 RD_Q10; +} NSQ_del_dec_struct; + +typedef struct { + opus_int32 Q_Q10; + opus_int32 RD_Q10; + opus_int32 xq_Q14; + opus_int32 LF_AR_Q14; + opus_int32 Diff_Q14; + opus_int32 sLTP_shp_Q14; + opus_int32 LPC_exc_Q14; +} NSQ_sample_struct; + +typedef NSQ_sample_struct NSQ_sample_pair[ 2 ]; + +static OPUS_INLINE void silk_nsq_del_dec_scale_states_sse4_1( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ + const opus_int16 x16[], /* I Input */ + opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ + const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ + opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ + opus_int subfr, /* I Subframe number */ + opus_int nStatesDelayedDecision, /* I Number of del dec states */ + const opus_int LTP_scale_Q14, /* I LTP state scaling */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ + const opus_int signal_type, /* I Signal type */ + const opus_int decisionDelay /* I Decision delay */ +); + +/******************************************/ +/* Noise shape quantizer for one subframe */ +/******************************************/ +static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_sse4_1( + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ + opus_int signalType, /* I Signal type */ + const opus_int32 x_Q10[], /* I */ + opus_int8 pulses[], /* O */ + opus_int16 xq[], /* O */ + opus_int32 sLTP_Q15[], /* I/O LTP filter state */ + opus_int32 delayedGain_Q10[], /* I/O Gain delay buffer */ + const opus_int16 a_Q12[], /* I Short term prediction coefs */ + const opus_int16 b_Q14[], /* I Long term prediction coefs */ + const opus_int16 AR_shp_Q13[], /* I Noise shaping coefs */ + opus_int lag, /* I Pitch lag */ + opus_int32 HarmShapeFIRPacked_Q14, /* I */ + opus_int Tilt_Q14, /* I Spectral tilt */ + opus_int32 LF_shp_Q14, /* I */ + opus_int32 Gain_Q16, /* I */ + opus_int Lambda_Q10, /* I */ + opus_int offset_Q10, /* I */ + opus_int length, /* I Input length */ + opus_int subfr, /* I Subframe number */ + opus_int shapingLPCOrder, /* I Shaping LPC filter order */ + opus_int predictLPCOrder, /* I Prediction filter order */ + opus_int warping_Q16, /* I */ + opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ + opus_int decisionDelay /* I */ +); + +void silk_NSQ_del_dec_sse4_1( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ +) +{ + opus_int i, k, lag, start_idx, LSF_interpolation_flag, Winner_ind, subfr; + opus_int last_smple_idx, smpl_buf_idx, decisionDelay; + const opus_int16 *A_Q12, *B_Q14, *AR_shp_Q13; + opus_int16 *pxq; + VARDECL( opus_int32, sLTP_Q15 ); + VARDECL( opus_int16, sLTP ); + opus_int32 HarmShapeFIRPacked_Q14; + opus_int offset_Q10; + opus_int32 RDmin_Q10, Gain_Q10; + VARDECL( opus_int32, x_sc_Q10 ); + VARDECL( opus_int32, delayedGain_Q10 ); + VARDECL( NSQ_del_dec_struct, psDelDec ); + NSQ_del_dec_struct *psDD; +#ifdef OPUS_CHECK_ASM + silk_nsq_state NSQ_c; + SideInfoIndices psIndices_c; + opus_int8 pulses_c[ MAX_FRAME_LENGTH ]; + const opus_int8 *const pulses_a = pulses; +#endif + SAVE_STACK; + +#ifdef OPUS_CHECK_ASM + ( void )pulses_a; + silk_memcpy( &NSQ_c, NSQ, sizeof( NSQ_c ) ); + silk_memcpy( &psIndices_c, psIndices, sizeof( psIndices_c ) ); + silk_assert( psEncC->nb_subfr * psEncC->subfr_length <= MAX_FRAME_LENGTH ); + silk_memcpy( pulses_c, pulses, psEncC->nb_subfr * psEncC->subfr_length * sizeof( pulses[0] ) ); + silk_NSQ_del_dec_c( + psEncC, + &NSQ_c, + &psIndices_c, + x16, + pulses_c, + PredCoef_Q12, + LTPCoef_Q14, + AR_Q13, + HarmShapeGain_Q14, + Tilt_Q14, + LF_shp_Q14, + Gains_Q16, + pitchL, + Lambda_Q10, + LTP_scale_Q14 + ); +#endif + + /* Set unvoiced lag to the previous one, overwrite later for voiced */ + lag = NSQ->lagPrev; + + silk_assert( NSQ->prev_gain_Q16 != 0 ); + + /* Initialize delayed decision states */ + ALLOC( psDelDec, psEncC->nStatesDelayedDecision, NSQ_del_dec_struct ); + silk_memset( psDelDec, 0, psEncC->nStatesDelayedDecision * sizeof( NSQ_del_dec_struct ) ); + for( k = 0; k < psEncC->nStatesDelayedDecision; k++ ) { + psDD = &psDelDec[ k ]; + psDD->Seed = ( k + psIndices->Seed ) & 3; + psDD->SeedInit = psDD->Seed; + psDD->RD_Q10 = 0; + psDD->LF_AR_Q14 = NSQ->sLF_AR_shp_Q14; + psDD->Diff_Q14 = NSQ->sDiff_shp_Q14; + psDD->Shape_Q14[ 0 ] = NSQ->sLTP_shp_Q14[ psEncC->ltp_mem_length - 1 ]; + silk_memcpy( psDD->sLPC_Q14, NSQ->sLPC_Q14, NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); + silk_memcpy( psDD->sAR2_Q14, NSQ->sAR2_Q14, sizeof( NSQ->sAR2_Q14 ) ); + } + + offset_Q10 = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ]; + smpl_buf_idx = 0; /* index of oldest samples */ + + decisionDelay = silk_min_int( DECISION_DELAY, psEncC->subfr_length ); + + /* For voiced frames limit the decision delay to lower than the pitch lag */ + if( psIndices->signalType == TYPE_VOICED ) { + for( k = 0; k < psEncC->nb_subfr; k++ ) { + decisionDelay = silk_min_int( decisionDelay, pitchL[ k ] - LTP_ORDER / 2 - 1 ); + } + } else { + if( lag > 0 ) { + decisionDelay = silk_min_int( decisionDelay, lag - LTP_ORDER / 2 - 1 ); + } + } + + if( psIndices->NLSFInterpCoef_Q2 == 4 ) { + LSF_interpolation_flag = 0; + } else { + LSF_interpolation_flag = 1; + } + + ALLOC( sLTP_Q15, psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); + ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); + ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); + ALLOC( delayedGain_Q10, DECISION_DELAY, opus_int32 ); + /* Set up pointers to start of sub frame */ + pxq = &NSQ->xq[ psEncC->ltp_mem_length ]; + NSQ->sLTP_shp_buf_idx = psEncC->ltp_mem_length; + NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; + subfr = 0; + for( k = 0; k < psEncC->nb_subfr; k++ ) { + A_Q12 = &PredCoef_Q12[ ( ( k >> 1 ) | ( 1 - LSF_interpolation_flag ) ) * MAX_LPC_ORDER ]; + B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; + AR_shp_Q13 = &AR_Q13[ k * MAX_SHAPE_LPC_ORDER ]; + + /* Noise shape parameters */ + silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); + HarmShapeFIRPacked_Q14 = silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 ); + HarmShapeFIRPacked_Q14 |= silk_LSHIFT( (opus_int32)silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 ); + + NSQ->rewhite_flag = 0; + if( psIndices->signalType == TYPE_VOICED ) { + /* Voiced */ + lag = pitchL[ k ]; + + /* Re-whitening */ + if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) { + if( k == 2 ) { + /* RESET DELAYED DECISIONS */ + /* Find winner */ + RDmin_Q10 = psDelDec[ 0 ].RD_Q10; + Winner_ind = 0; + for( i = 1; i < psEncC->nStatesDelayedDecision; i++ ) { + if( psDelDec[ i ].RD_Q10 < RDmin_Q10 ) { + RDmin_Q10 = psDelDec[ i ].RD_Q10; + Winner_ind = i; + } + } + for( i = 0; i < psEncC->nStatesDelayedDecision; i++ ) { + if( i != Winner_ind ) { + psDelDec[ i ].RD_Q10 += ( silk_int32_MAX >> 4 ); + silk_assert( psDelDec[ i ].RD_Q10 >= 0 ); + } + } + + /* Copy final part of signals from winner state to output and long-term filter states */ + psDD = &psDelDec[ Winner_ind ]; + last_smple_idx = smpl_buf_idx + decisionDelay; + for( i = 0; i < decisionDelay; i++ ) { + last_smple_idx = ( last_smple_idx - 1 ) % DECISION_DELAY; + if( last_smple_idx < 0 ) last_smple_idx += DECISION_DELAY; + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); + pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( + silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], Gains_Q16[ 1 ] ), 14 ) ); + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDD->Shape_Q14[ last_smple_idx ]; + } + + subfr = 0; + } + + /* Rewhiten with new A coefs */ + start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; + celt_assert( start_idx > 0 ); + + silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], + A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); + + NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; + NSQ->rewhite_flag = 1; + } + } + + silk_nsq_del_dec_scale_states_sse4_1( psEncC, NSQ, psDelDec, x16, x_sc_Q10, sLTP, sLTP_Q15, k, + psEncC->nStatesDelayedDecision, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType, decisionDelay ); + + silk_noise_shape_quantizer_del_dec_sse4_1( NSQ, psDelDec, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, + delayedGain_Q10, A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], + Gains_Q16[ k ], Lambda_Q10, offset_Q10, psEncC->subfr_length, subfr++, psEncC->shapingLPCOrder, + psEncC->predictLPCOrder, psEncC->warping_Q16, psEncC->nStatesDelayedDecision, &smpl_buf_idx, decisionDelay ); + + x16 += psEncC->subfr_length; + pulses += psEncC->subfr_length; + pxq += psEncC->subfr_length; + } + + /* Find winner */ + RDmin_Q10 = psDelDec[ 0 ].RD_Q10; + Winner_ind = 0; + for( k = 1; k < psEncC->nStatesDelayedDecision; k++ ) { + if( psDelDec[ k ].RD_Q10 < RDmin_Q10 ) { + RDmin_Q10 = psDelDec[ k ].RD_Q10; + Winner_ind = k; + } + } + + /* Copy final part of signals from winner state to output and long-term filter states */ + psDD = &psDelDec[ Winner_ind ]; + psIndices->Seed = psDD->SeedInit; + last_smple_idx = smpl_buf_idx + decisionDelay; + Gain_Q10 = silk_RSHIFT32( Gains_Q16[ psEncC->nb_subfr - 1 ], 6 ); + for( i = 0; i < decisionDelay; i++ ) { + last_smple_idx = ( last_smple_idx - 1 ) % DECISION_DELAY; + if( last_smple_idx < 0 ) last_smple_idx += DECISION_DELAY; + + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); + pxq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( + silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], Gain_Q10 ), 8 ) ); + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDD->Shape_Q14[ last_smple_idx ]; + } + silk_memcpy( NSQ->sLPC_Q14, &psDD->sLPC_Q14[ psEncC->subfr_length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); + silk_memcpy( NSQ->sAR2_Q14, psDD->sAR2_Q14, sizeof( psDD->sAR2_Q14 ) ); + + /* Update states */ + NSQ->sLF_AR_shp_Q14 = psDD->LF_AR_Q14; + NSQ->sDiff_shp_Q14 = psDD->Diff_Q14; + NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; + + /* Save quantized speech signal */ + silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); + silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); + +#ifdef OPUS_CHECK_ASM + silk_assert( !memcmp( &NSQ_c, NSQ, sizeof( NSQ_c ) ) ); + silk_assert( !memcmp( &psIndices_c, psIndices, sizeof( psIndices_c ) ) ); + silk_assert( !memcmp( pulses_c, pulses_a, psEncC->nb_subfr * psEncC->subfr_length * sizeof( pulses[0] ) ) ); +#endif + + RESTORE_STACK; +} + +/******************************************/ +/* Noise shape quantizer for one subframe */ +/******************************************/ +static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_sse4_1( + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ + opus_int signalType, /* I Signal type */ + const opus_int32 x_Q10[], /* I */ + opus_int8 pulses[], /* O */ + opus_int16 xq[], /* O */ + opus_int32 sLTP_Q15[], /* I/O LTP filter state */ + opus_int32 delayedGain_Q10[], /* I/O Gain delay buffer */ + const opus_int16 a_Q12[], /* I Short term prediction coefs */ + const opus_int16 b_Q14[], /* I Long term prediction coefs */ + const opus_int16 AR_shp_Q13[], /* I Noise shaping coefs */ + opus_int lag, /* I Pitch lag */ + opus_int32 HarmShapeFIRPacked_Q14, /* I */ + opus_int Tilt_Q14, /* I Spectral tilt */ + opus_int32 LF_shp_Q14, /* I */ + opus_int32 Gain_Q16, /* I */ + opus_int Lambda_Q10, /* I */ + opus_int offset_Q10, /* I */ + opus_int length, /* I Input length */ + opus_int subfr, /* I Subframe number */ + opus_int shapingLPCOrder, /* I Shaping LPC filter order */ + opus_int predictLPCOrder, /* I Prediction filter order */ + opus_int warping_Q16, /* I */ + opus_int nStatesDelayedDecision, /* I Number of states in decision tree */ + opus_int *smpl_buf_idx, /* I/O Index to newest samples in buffers */ + opus_int decisionDelay /* I */ +) +{ + opus_int i, j, k, Winner_ind, RDmin_ind, RDmax_ind, last_smple_idx; + opus_int32 Winner_rand_state; + opus_int32 LTP_pred_Q14, LPC_pred_Q14, n_AR_Q14, n_LTP_Q14; + opus_int32 n_LF_Q14, r_Q10, rr_Q10, rd1_Q10, rd2_Q10, RDmin_Q10, RDmax_Q10; + opus_int32 q1_Q0, q1_Q10, q2_Q10, exc_Q14, LPC_exc_Q14, xq_Q14, Gain_Q10; + opus_int32 tmp1, tmp2, sLF_AR_shp_Q14; + opus_int32 *pred_lag_ptr, *shp_lag_ptr, *psLPC_Q14; + + VARDECL( NSQ_sample_pair, psSampleState ); + NSQ_del_dec_struct *psDD; + NSQ_sample_struct *psSS; + + __m128i a_Q12_0123, a_Q12_4567, a_Q12_89AB, a_Q12_CDEF; + __m128i b_Q12_0123, b_sr_Q12_0123; + SAVE_STACK; + + celt_assert( nStatesDelayedDecision > 0 ); + ALLOC( psSampleState, nStatesDelayedDecision, NSQ_sample_pair ); + + int rdo_offset = (Lambda_Q10 >> 1) - 512; + + shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ]; + pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; + Gain_Q10 = silk_RSHIFT( Gain_Q16, 6 ); + + a_Q12_0123 = OP_CVTEPI16_EPI32_M64( a_Q12 ); + a_Q12_4567 = OP_CVTEPI16_EPI32_M64( a_Q12 + 4 ); + + if( opus_likely( predictLPCOrder == 16 ) ) { + a_Q12_89AB = OP_CVTEPI16_EPI32_M64( a_Q12 + 8 ); + a_Q12_CDEF = OP_CVTEPI16_EPI32_M64( a_Q12 + 12 ); + } + + if( signalType == TYPE_VOICED ){ + b_Q12_0123 = OP_CVTEPI16_EPI32_M64( b_Q14 ); + b_sr_Q12_0123 = _mm_shuffle_epi32( b_Q12_0123, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + } + for( i = 0; i < length; i++ ) { + /* Perform common calculations used in all states */ + + /* Long-term prediction */ + if( signalType == TYPE_VOICED ) { + /* Unrolled loop */ + /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ + LTP_pred_Q14 = 2; + { + __m128i tmpa, tmpb, pred_lag_ptr_tmp; + pred_lag_ptr_tmp = _mm_loadu_si128( (__m128i *)(&pred_lag_ptr[ -3 ] ) ); + pred_lag_ptr_tmp = _mm_shuffle_epi32( pred_lag_ptr_tmp, 0x1B ); + tmpa = _mm_mul_epi32( pred_lag_ptr_tmp, b_Q12_0123 ); + tmpa = _mm_srli_si128( tmpa, 2 ); + + pred_lag_ptr_tmp = _mm_shuffle_epi32( pred_lag_ptr_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) );/* equal shift right 4 bytes */ + pred_lag_ptr_tmp = _mm_mul_epi32( pred_lag_ptr_tmp, b_sr_Q12_0123 ); + pred_lag_ptr_tmp = _mm_srli_si128( pred_lag_ptr_tmp, 2 ); + pred_lag_ptr_tmp = _mm_add_epi32( pred_lag_ptr_tmp, tmpa ); + + tmpb = _mm_shuffle_epi32( pred_lag_ptr_tmp, _MM_SHUFFLE( 0, 0, 3, 2 ) );/* equal shift right 8 bytes */ + pred_lag_ptr_tmp = _mm_add_epi32( pred_lag_ptr_tmp, tmpb ); + LTP_pred_Q14 += _mm_cvtsi128_si32( pred_lag_ptr_tmp ); + + LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); + LTP_pred_Q14 = silk_LSHIFT( LTP_pred_Q14, 1 ); /* Q13 -> Q14 */ + pred_lag_ptr++; + } + } else { + LTP_pred_Q14 = 0; + } + + /* Long-term shaping */ + if( lag > 0 ) { + /* Symmetric, packed FIR coefficients */ + n_LTP_Q14 = silk_SMULWB( silk_ADD_SAT32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); + n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); + n_LTP_Q14 = silk_SUB_LSHIFT32( LTP_pred_Q14, n_LTP_Q14, 2 ); /* Q12 -> Q14 */ + shp_lag_ptr++; + } else { + n_LTP_Q14 = 0; + } + { + __m128i tmpa, tmpb, psLPC_Q14_tmp, a_Q12_tmp; + + for( k = 0; k < nStatesDelayedDecision; k++ ) { + /* Delayed decision state */ + psDD = &psDelDec[ k ]; + + /* Sample state */ + psSS = psSampleState[ k ]; + + /* Generate dither */ + psDD->Seed = silk_RAND( psDD->Seed ); + + /* Pointer used in short term prediction and shaping */ + psLPC_Q14 = &psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 + i ]; + /* Short-term prediction */ + silk_assert( predictLPCOrder == 10 || predictLPCOrder == 16 ); + /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ + LPC_pred_Q14 = silk_RSHIFT( predictLPCOrder, 1 ); + + tmpb = _mm_setzero_si128(); + + /* step 1 */ + psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -3 ] ) ); /* -3, -2 , -1, 0 */ + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); /* 0, -1, -2, -3 */ + tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_0123 ); /* 0, -1, -2, -3 * 0123 -> 0*0, 2*-2 */ + + tmpa = _mm_srli_epi64( tmpa, 16 ); + tmpb = _mm_add_epi32( tmpb, tmpa ); + + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + a_Q12_tmp = _mm_shuffle_epi32( a_Q12_0123, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); /* 1*-1, 3*-3 */ + psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); + tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); + + /* step 2 */ + psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -7 ] ) ); + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); + tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_4567 ); + tmpa = _mm_srli_epi64( tmpa, 16 ); + tmpb = _mm_add_epi32( tmpb, tmpa ); + + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + a_Q12_tmp = _mm_shuffle_epi32( a_Q12_4567, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); + psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); + tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); + + if ( opus_likely( predictLPCOrder == 16 ) ) + { + /* step 3 */ + psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -11 ] ) ); + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); + tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_89AB ); + tmpa = _mm_srli_epi64( tmpa, 16 ); + tmpb = _mm_add_epi32( tmpb, tmpa ); + + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + a_Q12_tmp = _mm_shuffle_epi32( a_Q12_89AB, _MM_SHUFFLE(0, 3, 2, 1 ) );/* equal shift right 4 bytes */ + psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); + psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); + tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); + + /* step 4 */ + psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -15 ] ) ); + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); + tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_CDEF ); + tmpa = _mm_srli_epi64( tmpa, 16 ); + tmpb = _mm_add_epi32( tmpb, tmpa ); + + psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + a_Q12_tmp = _mm_shuffle_epi32( a_Q12_CDEF, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ + psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); + psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); + tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); + + /* add at last */ + /* equal shift right 8 bytes*/ + tmpa = _mm_shuffle_epi32( tmpb, _MM_SHUFFLE( 0, 0, 3, 2 ) ); + tmpb = _mm_add_epi32( tmpb, tmpa ); + LPC_pred_Q14 += _mm_cvtsi128_si32( tmpb ); + } + else + { + /* add at last */ + tmpa = _mm_shuffle_epi32( tmpb, _MM_SHUFFLE( 0, 0, 3, 2 ) ); /* equal shift right 8 bytes*/ + tmpb = _mm_add_epi32( tmpb, tmpa ); + LPC_pred_Q14 += _mm_cvtsi128_si32( tmpb ); + + LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -8 ], a_Q12[ 8 ] ); + LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -9 ], a_Q12[ 9 ] ); + } + + LPC_pred_Q14 = silk_LSHIFT( LPC_pred_Q14, 4 ); /* Q10 -> Q14 */ + + /* Noise shape feedback */ + celt_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ + /* Output of lowpass section */ + tmp2 = silk_SMLAWB( psDD->Diff_Q14, psDD->sAR2_Q14[ 0 ], warping_Q16 ); + /* Output of allpass section */ + tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 ); + psDD->sAR2_Q14[ 0 ] = tmp2; + n_AR_Q14 = silk_RSHIFT( shapingLPCOrder, 1 ); + n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ 0 ] ); + /* Loop over allpass sections */ + for( j = 2; j < shapingLPCOrder; j += 2 ) { + /* Output of allpass section */ + tmp2 = silk_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 ); + psDD->sAR2_Q14[ j - 1 ] = tmp1; + n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ j - 1 ] ); + /* Output of allpass section */ + tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 ); + psDD->sAR2_Q14[ j + 0 ] = tmp2; + n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ j ] ); + } + psDD->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1; + n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] ); + + n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 1 ); /* Q11 -> Q12 */ + n_AR_Q14 = silk_SMLAWB( n_AR_Q14, psDD->LF_AR_Q14, Tilt_Q14 ); /* Q12 */ + n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 2 ); /* Q12 -> Q14 */ + + n_LF_Q14 = silk_SMULWB( psDD->Shape_Q14[ *smpl_buf_idx ], LF_shp_Q14 ); /* Q12 */ + n_LF_Q14 = silk_SMLAWT( n_LF_Q14, psDD->LF_AR_Q14, LF_shp_Q14 ); /* Q12 */ + n_LF_Q14 = silk_LSHIFT( n_LF_Q14, 2 ); /* Q12 -> Q14 */ + + /* Input minus prediction plus noise feedback */ + /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP */ + tmp1 = silk_ADD_SAT32( n_AR_Q14, n_LF_Q14 ); /* Q14 */ + tmp2 = silk_ADD32( n_LTP_Q14, LPC_pred_Q14 ); /* Q13 */ + tmp1 = silk_SUB_SAT32( tmp2, tmp1 ); /* Q13 */ + tmp1 = silk_RSHIFT_ROUND( tmp1, 4 ); /* Q10 */ + + r_Q10 = silk_SUB32( x_Q10[ i ], tmp1 ); /* residual error Q10 */ + + /* Flip sign depending on dither */ + if ( psDD->Seed < 0 ) { + r_Q10 = -r_Q10; + } + r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); + + /* Find two quantization level candidates and measure their rate-distortion */ + q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); + q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); + if (Lambda_Q10 > 2048) { + /* For aggressive RDO, the bias becomes more than one pulse. */ + if (q1_Q10 > rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 - rdo_offset, 10 ); + } else if (q1_Q10 < -rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 + rdo_offset, 10 ); + } else if (q1_Q10 < 0) { + q1_Q0 = -1; + } else { + q1_Q0 = 0; + } + } + if( q1_Q0 > 0 ) { + q1_Q10 = silk_SUB32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); + q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); + q2_Q10 = silk_ADD32( q1_Q10, 1024 ); + rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); + rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); + } else if( q1_Q0 == 0 ) { + q1_Q10 = offset_Q10; + q2_Q10 = silk_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); + rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); + rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); + } else if( q1_Q0 == -1 ) { + q2_Q10 = offset_Q10; + q1_Q10 = silk_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); + rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); + rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); + } else { /* q1_Q0 < -1 */ + q1_Q10 = silk_ADD32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); + q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); + q2_Q10 = silk_ADD32( q1_Q10, 1024 ); + rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); + rd2_Q10 = silk_SMULBB( -q2_Q10, Lambda_Q10 ); + } + rr_Q10 = silk_SUB32( r_Q10, q1_Q10 ); + rd1_Q10 = silk_RSHIFT( silk_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 ); + rr_Q10 = silk_SUB32( r_Q10, q2_Q10 ); + rd2_Q10 = silk_RSHIFT( silk_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 ); + + if( rd1_Q10 < rd2_Q10 ) { + psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); + psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); + psSS[ 0 ].Q_Q10 = q1_Q10; + psSS[ 1 ].Q_Q10 = q2_Q10; + } else { + psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); + psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); + psSS[ 0 ].Q_Q10 = q2_Q10; + psSS[ 1 ].Q_Q10 = q1_Q10; + } + + /* Update states for best quantization */ + + /* Quantized excitation */ + exc_Q14 = silk_LSHIFT32( psSS[ 0 ].Q_Q10, 4 ); + if ( psDD->Seed < 0 ) { + exc_Q14 = -exc_Q14; + } + + /* Add predictions */ + LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); + xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); + + /* Update states */ + psSS[ 0 ].Diff_Q14 = silk_SUB_LSHIFT32( xq_Q14, x_Q10[ i ], 4 ); + sLF_AR_shp_Q14 = silk_SUB32( psSS[ 0 ].Diff_Q14, n_AR_Q14 ); + psSS[ 0 ].sLTP_shp_Q14 = silk_SUB_SAT32( sLF_AR_shp_Q14, n_LF_Q14 ); + psSS[ 0 ].LF_AR_Q14 = sLF_AR_shp_Q14; + psSS[ 0 ].LPC_exc_Q14 = LPC_exc_Q14; + psSS[ 0 ].xq_Q14 = xq_Q14; + + /* Update states for second best quantization */ + + /* Quantized excitation */ + exc_Q14 = silk_LSHIFT32( psSS[ 1 ].Q_Q10, 4 ); + if ( psDD->Seed < 0 ) { + exc_Q14 = -exc_Q14; + } + + /* Add predictions */ + LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); + xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); + + /* Update states */ + psSS[ 1 ].Diff_Q14 = silk_SUB_LSHIFT32( xq_Q14, x_Q10[ i ], 4 ); + sLF_AR_shp_Q14 = silk_SUB32( psSS[ 1 ].Diff_Q14, n_AR_Q14 ); + psSS[ 1 ].sLTP_shp_Q14 = silk_SUB_SAT32( sLF_AR_shp_Q14, n_LF_Q14 ); + psSS[ 1 ].LF_AR_Q14 = sLF_AR_shp_Q14; + psSS[ 1 ].LPC_exc_Q14 = LPC_exc_Q14; + psSS[ 1 ].xq_Q14 = xq_Q14; + } + } + *smpl_buf_idx = ( *smpl_buf_idx - 1 ) % DECISION_DELAY; + if( *smpl_buf_idx < 0 ) *smpl_buf_idx += DECISION_DELAY; + last_smple_idx = ( *smpl_buf_idx + decisionDelay ) % DECISION_DELAY; + + /* Find winner */ + RDmin_Q10 = psSampleState[ 0 ][ 0 ].RD_Q10; + Winner_ind = 0; + for( k = 1; k < nStatesDelayedDecision; k++ ) { + if( psSampleState[ k ][ 0 ].RD_Q10 < RDmin_Q10 ) { + RDmin_Q10 = psSampleState[ k ][ 0 ].RD_Q10; + Winner_ind = k; + } + } + + /* Increase RD values of expired states */ + Winner_rand_state = psDelDec[ Winner_ind ].RandState[ last_smple_idx ]; + for( k = 0; k < nStatesDelayedDecision; k++ ) { + if( psDelDec[ k ].RandState[ last_smple_idx ] != Winner_rand_state ) { + psSampleState[ k ][ 0 ].RD_Q10 = silk_ADD32( psSampleState[ k ][ 0 ].RD_Q10, silk_int32_MAX >> 4 ); + psSampleState[ k ][ 1 ].RD_Q10 = silk_ADD32( psSampleState[ k ][ 1 ].RD_Q10, silk_int32_MAX >> 4 ); + silk_assert( psSampleState[ k ][ 0 ].RD_Q10 >= 0 ); + } + } + + /* Find worst in first set and best in second set */ + RDmax_Q10 = psSampleState[ 0 ][ 0 ].RD_Q10; + RDmin_Q10 = psSampleState[ 0 ][ 1 ].RD_Q10; + RDmax_ind = 0; + RDmin_ind = 0; + for( k = 1; k < nStatesDelayedDecision; k++ ) { + /* find worst in first set */ + if( psSampleState[ k ][ 0 ].RD_Q10 > RDmax_Q10 ) { + RDmax_Q10 = psSampleState[ k ][ 0 ].RD_Q10; + RDmax_ind = k; + } + /* find best in second set */ + if( psSampleState[ k ][ 1 ].RD_Q10 < RDmin_Q10 ) { + RDmin_Q10 = psSampleState[ k ][ 1 ].RD_Q10; + RDmin_ind = k; + } + } + + /* Replace a state if best from second set outperforms worst in first set */ + if( RDmin_Q10 < RDmax_Q10 ) { + silk_memcpy( ( (opus_int32 *)&psDelDec[ RDmax_ind ] ) + i, + ( (opus_int32 *)&psDelDec[ RDmin_ind ] ) + i, sizeof( NSQ_del_dec_struct ) - i * sizeof( opus_int32) ); + silk_memcpy( &psSampleState[ RDmax_ind ][ 0 ], &psSampleState[ RDmin_ind ][ 1 ], sizeof( NSQ_sample_struct ) ); + } + + /* Write samples from winner to output and long-term filter states */ + psDD = &psDelDec[ Winner_ind ]; + if( subfr > 0 || i >= decisionDelay ) { + pulses[ i - decisionDelay ] = (opus_int8)silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 ); + xq[ i - decisionDelay ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( + silk_SMULWW( psDD->Xq_Q14[ last_smple_idx ], delayedGain_Q10[ last_smple_idx ] ), 8 ) ); + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - decisionDelay ] = psDD->Shape_Q14[ last_smple_idx ]; + sLTP_Q15[ NSQ->sLTP_buf_idx - decisionDelay ] = psDD->Pred_Q15[ last_smple_idx ]; + } + NSQ->sLTP_shp_buf_idx++; + NSQ->sLTP_buf_idx++; + + /* Update states */ + for( k = 0; k < nStatesDelayedDecision; k++ ) { + psDD = &psDelDec[ k ]; + psSS = &psSampleState[ k ][ 0 ]; + psDD->LF_AR_Q14 = psSS->LF_AR_Q14; + psDD->Diff_Q14 = psSS->Diff_Q14; + psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH + i ] = psSS->xq_Q14; + psDD->Xq_Q14[ *smpl_buf_idx ] = psSS->xq_Q14; + psDD->Q_Q10[ *smpl_buf_idx ] = psSS->Q_Q10; + psDD->Pred_Q15[ *smpl_buf_idx ] = silk_LSHIFT32( psSS->LPC_exc_Q14, 1 ); + psDD->Shape_Q14[ *smpl_buf_idx ] = psSS->sLTP_shp_Q14; + psDD->Seed = silk_ADD32_ovflw( psDD->Seed, silk_RSHIFT_ROUND( psSS->Q_Q10, 10 ) ); + psDD->RandState[ *smpl_buf_idx ] = psDD->Seed; + psDD->RD_Q10 = psSS->RD_Q10; + } + delayedGain_Q10[ *smpl_buf_idx ] = Gain_Q10; + } + /* Update LPC states */ + for( k = 0; k < nStatesDelayedDecision; k++ ) { + psDD = &psDelDec[ k ]; + silk_memcpy( psDD->sLPC_Q14, &psDD->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); + } + RESTORE_STACK; +} + +static OPUS_INLINE void silk_nsq_del_dec_scale_states_sse4_1( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ + const opus_int16 x16[], /* I Input */ + opus_int32 x_sc_Q10[], /* O Input scaled with 1/Gain in Q10 */ + const opus_int16 sLTP[], /* I Re-whitened LTP state in Q0 */ + opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ + opus_int subfr, /* I Subframe number */ + opus_int nStatesDelayedDecision, /* I Number of del dec states */ + const opus_int LTP_scale_Q14, /* I LTP state scaling */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ + const opus_int signal_type, /* I Signal type */ + const opus_int decisionDelay /* I Decision delay */ +) +{ + opus_int i, k, lag; + opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q26; + NSQ_del_dec_struct *psDD; + __m128i xmm_inv_gain_Q26, xmm_x16_x2x0, xmm_x16_x3x1; + + lag = pitchL[ subfr ]; + inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); + silk_assert( inv_gain_Q31 != 0 ); + + /* Scale input */ + inv_gain_Q26 = silk_RSHIFT_ROUND( inv_gain_Q31, 5 ); + + /* prepare inv_gain_Q26 in packed 4 32-bits */ + xmm_inv_gain_Q26 = _mm_set1_epi32(inv_gain_Q26); + + for( i = 0; i < psEncC->subfr_length - 3; i += 4 ) { + xmm_x16_x2x0 = OP_CVTEPI16_EPI32_M64( &(x16[ i ] ) ); + + /* equal shift right 4 bytes*/ + xmm_x16_x3x1 = _mm_shuffle_epi32( xmm_x16_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + xmm_x16_x2x0 = _mm_mul_epi32( xmm_x16_x2x0, xmm_inv_gain_Q26 ); + xmm_x16_x3x1 = _mm_mul_epi32( xmm_x16_x3x1, xmm_inv_gain_Q26 ); + + xmm_x16_x2x0 = _mm_srli_epi64( xmm_x16_x2x0, 16 ); + xmm_x16_x3x1 = _mm_slli_epi64( xmm_x16_x3x1, 16 ); + + xmm_x16_x2x0 = _mm_blend_epi16( xmm_x16_x2x0, xmm_x16_x3x1, 0xCC ); + + _mm_storeu_si128( (__m128i *)(&(x_sc_Q10[ i ] ) ), xmm_x16_x2x0 ); + } + + for( ; i < psEncC->subfr_length; i++ ) { + x_sc_Q10[ i ] = silk_SMULWW( x16[ i ], inv_gain_Q26 ); + } + + /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ + if( NSQ->rewhite_flag ) { + if( subfr == 0 ) { + /* Do LTP downscaling */ + inv_gain_Q31 = silk_LSHIFT( silk_SMULWB( inv_gain_Q31, LTP_scale_Q14 ), 2 ); + } + for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) { + silk_assert( i < MAX_FRAME_LENGTH ); + sLTP_Q15[ i ] = silk_SMULWB( inv_gain_Q31, sLTP[ i ] ); + } + } + + /* Adjust for changing gain */ + if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { + gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); + + /* Scale long-term shaping state */ + { + __m128i xmm_gain_adj_Q16, xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1; + + /* prepare gain_adj_Q16 in packed 4 32-bits */ + xmm_gain_adj_Q16 = _mm_set1_epi32( gain_adj_Q16 ); + + for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 3; i += 4 ) + { + xmm_sLTP_shp_Q14_x2x0 = _mm_loadu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ) ); + /* equal shift right 4 bytes*/ + xmm_sLTP_shp_Q14_x3x1 = _mm_shuffle_epi32( xmm_sLTP_shp_Q14_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + xmm_sLTP_shp_Q14_x2x0 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x2x0, xmm_gain_adj_Q16 ); + xmm_sLTP_shp_Q14_x3x1 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x3x1, xmm_gain_adj_Q16 ); + + xmm_sLTP_shp_Q14_x2x0 = _mm_srli_epi64( xmm_sLTP_shp_Q14_x2x0, 16 ); + xmm_sLTP_shp_Q14_x3x1 = _mm_slli_epi64( xmm_sLTP_shp_Q14_x3x1, 16 ); + + xmm_sLTP_shp_Q14_x2x0 = _mm_blend_epi16( xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1, 0xCC ); + + _mm_storeu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ), xmm_sLTP_shp_Q14_x2x0 ); + } + + for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { + NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); + } + + /* Scale long-term prediction state */ + if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { + for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx - decisionDelay; i++ ) { + sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); + } + } + + for( k = 0; k < nStatesDelayedDecision; k++ ) { + psDD = &psDelDec[ k ]; + + /* Scale scalar states */ + psDD->LF_AR_Q14 = silk_SMULWW( gain_adj_Q16, psDD->LF_AR_Q14 ); + psDD->Diff_Q14 = silk_SMULWW( gain_adj_Q16, psDD->Diff_Q14 ); + + /* Scale short-term prediction and shaping states */ + for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { + psDD->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sLPC_Q14[ i ] ); + } + for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { + psDD->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sAR2_Q14[ i ] ); + } + for( i = 0; i < DECISION_DELAY; i++ ) { + psDD->Pred_Q15[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Pred_Q15[ i ] ); + psDD->Shape_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Shape_Q14[ i ] ); + } + } + } + + /* Save inverse gain */ + NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; + } +} diff --git a/libs/libopus/silk/x86/NSQ_sse.c b/libs/libopus/silk/x86/NSQ_sse.c deleted file mode 100644 index bb3c5f195..000000000 --- a/libs/libopus/silk/x86/NSQ_sse.c +++ /dev/null @@ -1,720 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "main.h" -#include "celt/x86/x86cpu.h" -#include "stack_alloc.h" - -static OPUS_INLINE void silk_nsq_scale_states_sse4_1( - const silk_encoder_state *psEncC, /* I Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - const opus_int32 x_Q3[], /* I input in Q3 */ - opus_int32 x_sc_Q10[], /* O input scaled with 1/Gain */ - const opus_int16 sLTP[], /* I re-whitened LTP state in Q0 */ - opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ - opus_int subfr, /* I subframe number */ - const opus_int LTP_scale_Q14, /* I */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ - const opus_int signal_type /* I Signal type */ -); - -static OPUS_INLINE void silk_noise_shape_quantizer_10_16_sse4_1( - silk_nsq_state *NSQ, /* I/O NSQ state */ - opus_int signalType, /* I Signal type */ - const opus_int32 x_sc_Q10[], /* I */ - opus_int8 pulses[], /* O */ - opus_int16 xq[], /* O */ - opus_int32 sLTP_Q15[], /* I/O LTP state */ - const opus_int16 a_Q12[], /* I Short term prediction coefs */ - const opus_int16 b_Q14[], /* I Long term prediction coefs */ - const opus_int16 AR_shp_Q13[], /* I Noise shaping AR coefs */ - opus_int lag, /* I Pitch lag */ - opus_int32 HarmShapeFIRPacked_Q14, /* I */ - opus_int Tilt_Q14, /* I Spectral tilt */ - opus_int32 LF_shp_Q14, /* I */ - opus_int32 Gain_Q16, /* I */ - opus_int offset_Q10, /* I */ - opus_int length, /* I Input length */ - opus_int32 table[][4] /* I */ -); - -void silk_NSQ_sse4_1( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ -) -{ - opus_int k, lag, start_idx, LSF_interpolation_flag; - const opus_int16 *A_Q12, *B_Q14, *AR_shp_Q13; - opus_int16 *pxq; - VARDECL( opus_int32, sLTP_Q15 ); - VARDECL( opus_int16, sLTP ); - opus_int32 HarmShapeFIRPacked_Q14; - opus_int offset_Q10; - VARDECL( opus_int32, x_sc_Q10 ); - - opus_int32 table[ 64 ][ 4 ]; - opus_int32 tmp1; - opus_int32 q1_Q10, q2_Q10, rd1_Q20, rd2_Q20; - - SAVE_STACK; - - NSQ->rand_seed = psIndices->Seed; - - /* Set unvoiced lag to the previous one, overwrite later for voiced */ - lag = NSQ->lagPrev; - - silk_assert( NSQ->prev_gain_Q16 != 0 ); - - offset_Q10 = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ]; - - /* 0 */ - q1_Q10 = offset_Q10; - q2_Q10 = offset_Q10 + ( 1024 - QUANT_LEVEL_ADJUST_Q10 ); - rd1_Q20 = q1_Q10 * Lambda_Q10; - rd2_Q20 = q2_Q10 * Lambda_Q10; - - table[ 32 ][ 0 ] = q1_Q10; - table[ 32 ][ 1 ] = q2_Q10; - table[ 32 ][ 2 ] = 2 * (q1_Q10 - q2_Q10); - table[ 32 ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); - - /* -1 */ - q1_Q10 = offset_Q10 - ( 1024 - QUANT_LEVEL_ADJUST_Q10 ); - q2_Q10 = offset_Q10; - rd1_Q20 = - q1_Q10 * Lambda_Q10; - rd2_Q20 = q2_Q10 * Lambda_Q10; - - table[ 31 ][ 0 ] = q1_Q10; - table[ 31 ][ 1 ] = q2_Q10; - table[ 31 ][ 2 ] = 2 * (q1_Q10 - q2_Q10); - table[ 31 ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); - - /* > 0 */ - for (k = 1; k <= 31; k++) - { - tmp1 = offset_Q10 + silk_LSHIFT( k, 10 ); - - q1_Q10 = tmp1 - QUANT_LEVEL_ADJUST_Q10; - q2_Q10 = tmp1 - QUANT_LEVEL_ADJUST_Q10 + 1024; - rd1_Q20 = q1_Q10 * Lambda_Q10; - rd2_Q20 = q2_Q10 * Lambda_Q10; - - table[ 32 + k ][ 0 ] = q1_Q10; - table[ 32 + k ][ 1 ] = q2_Q10; - table[ 32 + k ][ 2 ] = 2 * (q1_Q10 - q2_Q10); - table[ 32 + k ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); - } - - /* < -1 */ - for (k = -32; k <= -2; k++) - { - tmp1 = offset_Q10 + silk_LSHIFT( k, 10 ); - - q1_Q10 = tmp1 + QUANT_LEVEL_ADJUST_Q10; - q2_Q10 = tmp1 + QUANT_LEVEL_ADJUST_Q10 + 1024; - rd1_Q20 = - q1_Q10 * Lambda_Q10; - rd2_Q20 = - q2_Q10 * Lambda_Q10; - - table[ 32 + k ][ 0 ] = q1_Q10; - table[ 32 + k ][ 1 ] = q2_Q10; - table[ 32 + k ][ 2 ] = 2 * (q1_Q10 - q2_Q10); - table[ 32 + k ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); - } - - if( psIndices->NLSFInterpCoef_Q2 == 4 ) { - LSF_interpolation_flag = 0; - } else { - LSF_interpolation_flag = 1; - } - - ALLOC( sLTP_Q15, - psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); - ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); - ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); - /* Set up pointers to start of sub frame */ - NSQ->sLTP_shp_buf_idx = psEncC->ltp_mem_length; - NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; - pxq = &NSQ->xq[ psEncC->ltp_mem_length ]; - for( k = 0; k < psEncC->nb_subfr; k++ ) { - A_Q12 = &PredCoef_Q12[ (( k >> 1 ) | ( 1 - LSF_interpolation_flag )) * MAX_LPC_ORDER ]; - B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; - AR_shp_Q13 = &AR2_Q13[ k * MAX_SHAPE_LPC_ORDER ]; - - /* Noise shape parameters */ - silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); - HarmShapeFIRPacked_Q14 = silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 ); - HarmShapeFIRPacked_Q14 |= silk_LSHIFT( (opus_int32)silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 ); - - NSQ->rewhite_flag = 0; - if( psIndices->signalType == TYPE_VOICED ) { - /* Voiced */ - lag = pitchL[ k ]; - - /* Re-whitening */ - if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) { - /* Rewhiten with new A coefs */ - start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; - silk_assert( start_idx > 0 ); - - silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], - A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); - - NSQ->rewhite_flag = 1; - NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; - } - } - - silk_nsq_scale_states_sse4_1( psEncC, NSQ, x_Q3, x_sc_Q10, sLTP, sLTP_Q15, k, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType ); - - if ( opus_likely( ( 10 == psEncC->shapingLPCOrder ) && ( 16 == psEncC->predictLPCOrder) ) ) - { - silk_noise_shape_quantizer_10_16_sse4_1( NSQ, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, A_Q12, B_Q14, - AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], Gains_Q16[ k ], - offset_Q10, psEncC->subfr_length, &(table[32]) ); - } - else - { - silk_noise_shape_quantizer( NSQ, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, A_Q12, B_Q14, - AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], Gains_Q16[ k ], Lambda_Q10, - offset_Q10, psEncC->subfr_length, psEncC->shapingLPCOrder, psEncC->predictLPCOrder, psEncC->arch ); - } - - x_Q3 += psEncC->subfr_length; - pulses += psEncC->subfr_length; - pxq += psEncC->subfr_length; - } - - /* Update lagPrev for next frame */ - NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; - - /* Save quantized speech and noise shaping signals */ - /* DEBUG_STORE_DATA( enc.pcm, &NSQ->xq[ psEncC->ltp_mem_length ], psEncC->frame_length * sizeof( opus_int16 ) ) */ - silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); - silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); - RESTORE_STACK; -} - -/***********************************/ -/* silk_noise_shape_quantizer_10_16 */ -/***********************************/ -static OPUS_INLINE void silk_noise_shape_quantizer_10_16_sse4_1( - silk_nsq_state *NSQ, /* I/O NSQ state */ - opus_int signalType, /* I Signal type */ - const opus_int32 x_sc_Q10[], /* I */ - opus_int8 pulses[], /* O */ - opus_int16 xq[], /* O */ - opus_int32 sLTP_Q15[], /* I/O LTP state */ - const opus_int16 a_Q12[], /* I Short term prediction coefs */ - const opus_int16 b_Q14[], /* I Long term prediction coefs */ - const opus_int16 AR_shp_Q13[], /* I Noise shaping AR coefs */ - opus_int lag, /* I Pitch lag */ - opus_int32 HarmShapeFIRPacked_Q14, /* I */ - opus_int Tilt_Q14, /* I Spectral tilt */ - opus_int32 LF_shp_Q14, /* I */ - opus_int32 Gain_Q16, /* I */ - opus_int offset_Q10, /* I */ - opus_int length, /* I Input length */ - opus_int32 table[][4] /* I */ -) -{ - opus_int i; - opus_int32 LTP_pred_Q13, LPC_pred_Q10, n_AR_Q12, n_LTP_Q13; - opus_int32 n_LF_Q12, r_Q10, q1_Q0, q1_Q10, q2_Q10; - opus_int32 exc_Q14, LPC_exc_Q14, xq_Q14, Gain_Q10; - opus_int32 tmp1, tmp2, sLF_AR_shp_Q14; - opus_int32 *psLPC_Q14, *shp_lag_ptr, *pred_lag_ptr; - - __m128i xmm_tempa, xmm_tempb; - - __m128i xmm_one; - - __m128i psLPC_Q14_hi_01234567, psLPC_Q14_hi_89ABCDEF; - __m128i psLPC_Q14_lo_01234567, psLPC_Q14_lo_89ABCDEF; - __m128i a_Q12_01234567, a_Q12_89ABCDEF; - - __m128i sAR2_Q14_hi_76543210, sAR2_Q14_lo_76543210; - __m128i AR_shp_Q13_76543210; - - shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ]; - pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; - Gain_Q10 = silk_RSHIFT( Gain_Q16, 6 ); - - /* Set up short term AR state */ - psLPC_Q14 = &NSQ->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 ]; - - sLF_AR_shp_Q14 = NSQ->sLF_AR_shp_Q14; - xq_Q14 = psLPC_Q14[ 0 ]; - LTP_pred_Q13 = 0; - - /* load a_Q12 */ - xmm_one = _mm_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 ); - - /* load a_Q12[0] - a_Q12[7] */ - a_Q12_01234567 = _mm_loadu_si128( (__m128i *)(&a_Q12[ 0 ] ) ); - /* load a_Q12[ 8 ] - a_Q12[ 15 ] */ - a_Q12_89ABCDEF = _mm_loadu_si128( (__m128i *)(&a_Q12[ 8 ] ) ); - - a_Q12_01234567 = _mm_shuffle_epi8( a_Q12_01234567, xmm_one ); - a_Q12_89ABCDEF = _mm_shuffle_epi8( a_Q12_89ABCDEF, xmm_one ); - - /* load AR_shp_Q13 */ - AR_shp_Q13_76543210 = _mm_loadu_si128( (__m128i *)(&AR_shp_Q13[0] ) ); - - /* load psLPC_Q14 */ - xmm_one = _mm_set_epi8(15, 14, 11, 10, 7, 6, 3, 2, 13, 12, 9, 8, 5, 4, 1, 0 ); - - xmm_tempa = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[-16]) ); - xmm_tempb = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[-12]) ); - - xmm_tempa = _mm_shuffle_epi8( xmm_tempa, xmm_one ); - xmm_tempb = _mm_shuffle_epi8( xmm_tempb, xmm_one ); - - psLPC_Q14_hi_89ABCDEF = _mm_unpackhi_epi64( xmm_tempa, xmm_tempb ); - psLPC_Q14_lo_89ABCDEF = _mm_unpacklo_epi64( xmm_tempa, xmm_tempb ); - - xmm_tempa = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -8 ]) ); - xmm_tempb = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -4 ]) ); - - xmm_tempa = _mm_shuffle_epi8( xmm_tempa, xmm_one ); - xmm_tempb = _mm_shuffle_epi8( xmm_tempb, xmm_one ); - - psLPC_Q14_hi_01234567 = _mm_unpackhi_epi64( xmm_tempa, xmm_tempb ); - psLPC_Q14_lo_01234567 = _mm_unpacklo_epi64( xmm_tempa, xmm_tempb ); - - /* load sAR2_Q14 */ - xmm_tempa = _mm_loadu_si128( (__m128i *)(&(NSQ->sAR2_Q14[ 0 ]) ) ); - xmm_tempb = _mm_loadu_si128( (__m128i *)(&(NSQ->sAR2_Q14[ 4 ]) ) ); - - xmm_tempa = _mm_shuffle_epi8( xmm_tempa, xmm_one ); - xmm_tempb = _mm_shuffle_epi8( xmm_tempb, xmm_one ); - - sAR2_Q14_hi_76543210 = _mm_unpackhi_epi64( xmm_tempa, xmm_tempb ); - sAR2_Q14_lo_76543210 = _mm_unpacklo_epi64( xmm_tempa, xmm_tempb ); - - /* prepare 1 in 8 * 16bit */ - xmm_one = _mm_set1_epi16(1); - - for( i = 0; i < length; i++ ) - { - /* Short-term prediction */ - __m128i xmm_hi_07, xmm_hi_8F, xmm_lo_07, xmm_lo_8F; - - /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ - LPC_pred_Q10 = 8; /* silk_RSHIFT( predictLPCOrder, 1 ); */ - - /* shift psLPC_Q14 */ - psLPC_Q14_hi_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_hi_01234567, psLPC_Q14_hi_89ABCDEF, 2 ); - psLPC_Q14_lo_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_lo_01234567, psLPC_Q14_lo_89ABCDEF, 2 ); - - psLPC_Q14_hi_01234567 = _mm_srli_si128( psLPC_Q14_hi_01234567, 2 ); - psLPC_Q14_lo_01234567 = _mm_srli_si128( psLPC_Q14_lo_01234567, 2 ); - - psLPC_Q14_hi_01234567 = _mm_insert_epi16( psLPC_Q14_hi_01234567, (xq_Q14 >> 16), 7 ); - psLPC_Q14_lo_01234567 = _mm_insert_epi16( psLPC_Q14_lo_01234567, (xq_Q14), 7 ); - - /* high part, use pmaddwd, results in 4 32-bit */ - xmm_hi_07 = _mm_madd_epi16( psLPC_Q14_hi_01234567, a_Q12_01234567 ); - xmm_hi_8F = _mm_madd_epi16( psLPC_Q14_hi_89ABCDEF, a_Q12_89ABCDEF ); - - /* low part, use pmulhw, results in 8 16-bit, note we need simulate unsigned * signed, _mm_srai_epi16(psLPC_Q14_lo_01234567, 15) */ - xmm_tempa = _mm_cmpgt_epi16( _mm_setzero_si128(), psLPC_Q14_lo_01234567 ); - xmm_tempb = _mm_cmpgt_epi16( _mm_setzero_si128(), psLPC_Q14_lo_89ABCDEF ); - - xmm_tempa = _mm_and_si128( xmm_tempa, a_Q12_01234567 ); - xmm_tempb = _mm_and_si128( xmm_tempb, a_Q12_89ABCDEF ); - - xmm_lo_07 = _mm_mulhi_epi16( psLPC_Q14_lo_01234567, a_Q12_01234567 ); - xmm_lo_8F = _mm_mulhi_epi16( psLPC_Q14_lo_89ABCDEF, a_Q12_89ABCDEF ); - - xmm_lo_07 = _mm_add_epi16( xmm_lo_07, xmm_tempa ); - xmm_lo_8F = _mm_add_epi16( xmm_lo_8F, xmm_tempb ); - - xmm_lo_07 = _mm_madd_epi16( xmm_lo_07, xmm_one ); - xmm_lo_8F = _mm_madd_epi16( xmm_lo_8F, xmm_one ); - - /* accumulate */ - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, xmm_hi_8F ); - xmm_lo_07 = _mm_add_epi32( xmm_lo_07, xmm_lo_8F ); - - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, xmm_lo_07 ); - - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_unpackhi_epi64(xmm_hi_07, xmm_hi_07 ) ); - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_shufflelo_epi16(xmm_hi_07, 0x0E ) ); - - LPC_pred_Q10 += _mm_cvtsi128_si32( xmm_hi_07 ); - - /* Long-term prediction */ - if ( opus_likely( signalType == TYPE_VOICED ) ) { - /* Unrolled loop */ - /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ - LTP_pred_Q13 = 2; - { - __m128i b_Q14_3210, b_Q14_0123, pred_lag_ptr_0123; - - b_Q14_3210 = OP_CVTEPI16_EPI32_M64( b_Q14 ); - b_Q14_0123 = _mm_shuffle_epi32( b_Q14_3210, 0x1B ); - - /* loaded: [0] [-1] [-2] [-3] */ - pred_lag_ptr_0123 = _mm_loadu_si128( (__m128i *)(&pred_lag_ptr[ -3 ] ) ); - /* shuffle to [-3] [-2] [-1] [0] and to new xmm */ - xmm_tempa = _mm_shuffle_epi32( pred_lag_ptr_0123, 0x1B ); - /*64-bit multiply, a[2] * b[-2], a[0] * b[0] */ - xmm_tempa = _mm_mul_epi32( xmm_tempa, b_Q14_3210 ); - /* right shift 2 bytes (16 bits), zero extended */ - xmm_tempa = _mm_srli_si128( xmm_tempa, 2 ); - - /* a[1] * b[-1], a[3] * b[-3] */ - pred_lag_ptr_0123 = _mm_mul_epi32( pred_lag_ptr_0123, b_Q14_0123 ); - pred_lag_ptr_0123 = _mm_srli_si128( pred_lag_ptr_0123, 2 ); - - pred_lag_ptr_0123 = _mm_add_epi32( pred_lag_ptr_0123, xmm_tempa ); - /* equal shift right 8 bytes*/ - xmm_tempa = _mm_shuffle_epi32( pred_lag_ptr_0123, _MM_SHUFFLE( 0, 0, 3, 2 ) ); - xmm_tempa = _mm_add_epi32( xmm_tempa, pred_lag_ptr_0123 ); - - LTP_pred_Q13 += _mm_cvtsi128_si32( xmm_tempa ); - - LTP_pred_Q13 = silk_SMLAWB( LTP_pred_Q13, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); - pred_lag_ptr++; - } - } - - /* Noise shape feedback */ - NSQ->sAR2_Q14[ 9 ] = NSQ->sAR2_Q14[ 8 ]; - NSQ->sAR2_Q14[ 8 ] = _mm_cvtsi128_si32( _mm_srli_si128(_mm_unpackhi_epi16( sAR2_Q14_lo_76543210, sAR2_Q14_hi_76543210 ), 12 ) ); - - sAR2_Q14_hi_76543210 = _mm_slli_si128( sAR2_Q14_hi_76543210, 2 ); - sAR2_Q14_lo_76543210 = _mm_slli_si128( sAR2_Q14_lo_76543210, 2 ); - - sAR2_Q14_hi_76543210 = _mm_insert_epi16( sAR2_Q14_hi_76543210, (xq_Q14 >> 16), 0 ); - sAR2_Q14_lo_76543210 = _mm_insert_epi16( sAR2_Q14_lo_76543210, (xq_Q14), 0 ); - - /* high part, use pmaddwd, results in 4 32-bit */ - xmm_hi_07 = _mm_madd_epi16( sAR2_Q14_hi_76543210, AR_shp_Q13_76543210 ); - - /* low part, use pmulhw, results in 8 16-bit, note we need simulate unsigned * signed,_mm_srai_epi16(sAR2_Q14_lo_76543210, 15) */ - xmm_tempa = _mm_cmpgt_epi16( _mm_setzero_si128(), sAR2_Q14_lo_76543210 ); - xmm_tempa = _mm_and_si128( xmm_tempa, AR_shp_Q13_76543210 ); - - xmm_lo_07 = _mm_mulhi_epi16( sAR2_Q14_lo_76543210, AR_shp_Q13_76543210 ); - xmm_lo_07 = _mm_add_epi16( xmm_lo_07, xmm_tempa ); - - xmm_lo_07 = _mm_madd_epi16( xmm_lo_07, xmm_one ); - - /* accumulate */ - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, xmm_lo_07 ); - - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_unpackhi_epi64(xmm_hi_07, xmm_hi_07 ) ); - xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_shufflelo_epi16(xmm_hi_07, 0x0E ) ); - - n_AR_Q12 = 5 + _mm_cvtsi128_si32( xmm_hi_07 ); - - n_AR_Q12 = silk_SMLAWB( n_AR_Q12, NSQ->sAR2_Q14[ 8 ], AR_shp_Q13[ 8 ] ); - n_AR_Q12 = silk_SMLAWB( n_AR_Q12, NSQ->sAR2_Q14[ 9 ], AR_shp_Q13[ 9 ] ); - - n_AR_Q12 = silk_LSHIFT32( n_AR_Q12, 1 ); /* Q11 -> Q12 */ - n_AR_Q12 = silk_SMLAWB( n_AR_Q12, sLF_AR_shp_Q14, Tilt_Q14 ); - - n_LF_Q12 = silk_SMULWB( NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - 1 ], LF_shp_Q14 ); - n_LF_Q12 = silk_SMLAWT( n_LF_Q12, sLF_AR_shp_Q14, LF_shp_Q14 ); - - silk_assert( lag > 0 || signalType != TYPE_VOICED ); - - /* Combine prediction and noise shaping signals */ - tmp1 = silk_SUB32( silk_LSHIFT32( LPC_pred_Q10, 2 ), n_AR_Q12 ); /* Q12 */ - tmp1 = silk_SUB32( tmp1, n_LF_Q12 ); /* Q12 */ - if( lag > 0 ) { - /* Symmetric, packed FIR coefficients */ - n_LTP_Q13 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); - n_LTP_Q13 = silk_SMLAWT( n_LTP_Q13, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); - n_LTP_Q13 = silk_LSHIFT( n_LTP_Q13, 1 ); - shp_lag_ptr++; - - tmp2 = silk_SUB32( LTP_pred_Q13, n_LTP_Q13 ); /* Q13 */ - tmp1 = silk_ADD_LSHIFT32( tmp2, tmp1, 1 ); /* Q13 */ - tmp1 = silk_RSHIFT_ROUND( tmp1, 3 ); /* Q10 */ - } else { - tmp1 = silk_RSHIFT_ROUND( tmp1, 2 ); /* Q10 */ - } - - r_Q10 = silk_SUB32( x_sc_Q10[ i ], tmp1 ); /* residual error Q10 */ - - /* Generate dither */ - NSQ->rand_seed = silk_RAND( NSQ->rand_seed ); - - /* Flip sign depending on dither */ - tmp2 = -r_Q10; - if ( NSQ->rand_seed < 0 ) r_Q10 = tmp2; - - r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); - - /* Find two quantization level candidates and measure their rate-distortion */ - q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); - q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); - - q1_Q10 = table[q1_Q0][0]; - q2_Q10 = table[q1_Q0][1]; - - if (r_Q10 * table[q1_Q0][2] - table[q1_Q0][3] < 0) - { - q1_Q10 = q2_Q10; - } - - pulses[ i ] = (opus_int8)silk_RSHIFT_ROUND( q1_Q10, 10 ); - - /* Excitation */ - exc_Q14 = silk_LSHIFT( q1_Q10, 4 ); - - tmp2 = -exc_Q14; - if ( NSQ->rand_seed < 0 ) exc_Q14 = tmp2; - - /* Add predictions */ - LPC_exc_Q14 = silk_ADD_LSHIFT32( exc_Q14, LTP_pred_Q13, 1 ); - xq_Q14 = silk_ADD_LSHIFT32( LPC_exc_Q14, LPC_pred_Q10, 4 ); - - /* Update states */ - psLPC_Q14++; - *psLPC_Q14 = xq_Q14; - sLF_AR_shp_Q14 = silk_SUB_LSHIFT32( xq_Q14, n_AR_Q12, 2 ); - - NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx ] = silk_SUB_LSHIFT32( sLF_AR_shp_Q14, n_LF_Q12, 2 ); - sLTP_Q15[ NSQ->sLTP_buf_idx ] = silk_LSHIFT( LPC_exc_Q14, 1 ); - NSQ->sLTP_shp_buf_idx++; - NSQ->sLTP_buf_idx++; - - /* Make dither dependent on quantized signal */ - NSQ->rand_seed = silk_ADD32_ovflw( NSQ->rand_seed, pulses[ i ] ); - } - - NSQ->sLF_AR_shp_Q14 = sLF_AR_shp_Q14; - - /* Scale XQ back to normal level before saving */ - psLPC_Q14 = &NSQ->sLPC_Q14[ NSQ_LPC_BUF_LENGTH ]; - - /* write back sAR2_Q14 */ - xmm_tempa = _mm_unpackhi_epi16( sAR2_Q14_lo_76543210, sAR2_Q14_hi_76543210 ); - xmm_tempb = _mm_unpacklo_epi16( sAR2_Q14_lo_76543210, sAR2_Q14_hi_76543210 ); - _mm_storeu_si128( (__m128i *)(&NSQ->sAR2_Q14[ 4 ]), xmm_tempa ); - _mm_storeu_si128( (__m128i *)(&NSQ->sAR2_Q14[ 0 ]), xmm_tempb ); - - /* xq[ i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psLPC_Q14[ i ], Gain_Q10 ), 8 ) ); */ - { - __m128i xmm_Gain_Q10; - __m128i xmm_xq_Q14_3210, xmm_xq_Q14_x3x1, xmm_xq_Q14_7654, xmm_xq_Q14_x7x5; - - /* prepare (1 << 7) in packed 4 32-bits */ - xmm_tempa = _mm_set1_epi32( (1 << 7) ); - - /* prepare Gain_Q10 in packed 4 32-bits */ - xmm_Gain_Q10 = _mm_set1_epi32( Gain_Q10 ); - - /* process xq */ - for (i = 0; i < length - 7; i += 8) - { - xmm_xq_Q14_3210 = _mm_loadu_si128( (__m128i *)(&(psLPC_Q14[ i + 0 ] ) ) ); - xmm_xq_Q14_7654 = _mm_loadu_si128( (__m128i *)(&(psLPC_Q14[ i + 4 ] ) ) ); - - /* equal shift right 4 bytes*/ - xmm_xq_Q14_x3x1 = _mm_shuffle_epi32( xmm_xq_Q14_3210, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - /* equal shift right 4 bytes*/ - xmm_xq_Q14_x7x5 = _mm_shuffle_epi32( xmm_xq_Q14_7654, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - - xmm_xq_Q14_3210 = _mm_mul_epi32( xmm_xq_Q14_3210, xmm_Gain_Q10 ); - xmm_xq_Q14_x3x1 = _mm_mul_epi32( xmm_xq_Q14_x3x1, xmm_Gain_Q10 ); - xmm_xq_Q14_7654 = _mm_mul_epi32( xmm_xq_Q14_7654, xmm_Gain_Q10 ); - xmm_xq_Q14_x7x5 = _mm_mul_epi32( xmm_xq_Q14_x7x5, xmm_Gain_Q10 ); - - xmm_xq_Q14_3210 = _mm_srli_epi64( xmm_xq_Q14_3210, 16 ); - xmm_xq_Q14_x3x1 = _mm_slli_epi64( xmm_xq_Q14_x3x1, 16 ); - xmm_xq_Q14_7654 = _mm_srli_epi64( xmm_xq_Q14_7654, 16 ); - xmm_xq_Q14_x7x5 = _mm_slli_epi64( xmm_xq_Q14_x7x5, 16 ); - - xmm_xq_Q14_3210 = _mm_blend_epi16( xmm_xq_Q14_3210, xmm_xq_Q14_x3x1, 0xCC ); - xmm_xq_Q14_7654 = _mm_blend_epi16( xmm_xq_Q14_7654, xmm_xq_Q14_x7x5, 0xCC ); - - /* silk_RSHIFT_ROUND(xq, 8) */ - xmm_xq_Q14_3210 = _mm_add_epi32( xmm_xq_Q14_3210, xmm_tempa ); - xmm_xq_Q14_7654 = _mm_add_epi32( xmm_xq_Q14_7654, xmm_tempa ); - - xmm_xq_Q14_3210 = _mm_srai_epi32( xmm_xq_Q14_3210, 8 ); - xmm_xq_Q14_7654 = _mm_srai_epi32( xmm_xq_Q14_7654, 8 ); - - /* silk_SAT16 */ - xmm_xq_Q14_3210 = _mm_packs_epi32( xmm_xq_Q14_3210, xmm_xq_Q14_7654 ); - - /* save to xq */ - _mm_storeu_si128( (__m128i *)(&xq[ i ] ), xmm_xq_Q14_3210 ); - } - } - for ( ; i < length; i++) - { - xq[i] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psLPC_Q14[ i ], Gain_Q10 ), 8 ) ); - } - - /* Update LPC synth buffer */ - silk_memcpy( NSQ->sLPC_Q14, &NSQ->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); -} - -static OPUS_INLINE void silk_nsq_scale_states_sse4_1( - const silk_encoder_state *psEncC, /* I Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - const opus_int32 x_Q3[], /* I input in Q3 */ - opus_int32 x_sc_Q10[], /* O input scaled with 1/Gain */ - const opus_int16 sLTP[], /* I re-whitened LTP state in Q0 */ - opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ - opus_int subfr, /* I subframe number */ - const opus_int LTP_scale_Q14, /* I */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ - const opus_int signal_type /* I Signal type */ -) -{ - opus_int i, lag; - opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q23; - __m128i xmm_inv_gain_Q23, xmm_x_Q3_x2x0, xmm_x_Q3_x3x1; - - lag = pitchL[ subfr ]; - inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); - silk_assert( inv_gain_Q31 != 0 ); - - /* Calculate gain adjustment factor */ - if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { - gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); - } else { - gain_adj_Q16 = (opus_int32)1 << 16; - } - - /* Scale input */ - inv_gain_Q23 = silk_RSHIFT_ROUND( inv_gain_Q31, 8 ); - - /* prepare inv_gain_Q23 in packed 4 32-bits */ - xmm_inv_gain_Q23 = _mm_set1_epi32(inv_gain_Q23); - - for( i = 0; i < psEncC->subfr_length - 3; i += 4 ) { - xmm_x_Q3_x2x0 = _mm_loadu_si128( (__m128i *)(&(x_Q3[ i ] ) ) ); - - /* equal shift right 4 bytes*/ - xmm_x_Q3_x3x1 = _mm_shuffle_epi32( xmm_x_Q3_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - - xmm_x_Q3_x2x0 = _mm_mul_epi32( xmm_x_Q3_x2x0, xmm_inv_gain_Q23 ); - xmm_x_Q3_x3x1 = _mm_mul_epi32( xmm_x_Q3_x3x1, xmm_inv_gain_Q23 ); - - xmm_x_Q3_x2x0 = _mm_srli_epi64( xmm_x_Q3_x2x0, 16 ); - xmm_x_Q3_x3x1 = _mm_slli_epi64( xmm_x_Q3_x3x1, 16 ); - - xmm_x_Q3_x2x0 = _mm_blend_epi16( xmm_x_Q3_x2x0, xmm_x_Q3_x3x1, 0xCC ); - - _mm_storeu_si128( (__m128i *)(&(x_sc_Q10[ i ] ) ), xmm_x_Q3_x2x0 ); - } - - for( ; i < psEncC->subfr_length; i++ ) { - x_sc_Q10[ i ] = silk_SMULWW( x_Q3[ i ], inv_gain_Q23 ); - } - - /* Save inverse gain */ - NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; - - /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ - if( NSQ->rewhite_flag ) { - if( subfr == 0 ) { - /* Do LTP downscaling */ - inv_gain_Q31 = silk_LSHIFT( silk_SMULWB( inv_gain_Q31, LTP_scale_Q14 ), 2 ); - } - for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) { - silk_assert( i < MAX_FRAME_LENGTH ); - sLTP_Q15[ i ] = silk_SMULWB( inv_gain_Q31, sLTP[ i ] ); - } - } - - /* Adjust for changing gain */ - if( gain_adj_Q16 != (opus_int32)1 << 16 ) { - /* Scale long-term shaping state */ - __m128i xmm_gain_adj_Q16, xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1; - - /* prepare gain_adj_Q16 in packed 4 32-bits */ - xmm_gain_adj_Q16 = _mm_set1_epi32(gain_adj_Q16); - - for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 3; i += 4 ) - { - xmm_sLTP_shp_Q14_x2x0 = _mm_loadu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ) ); - /* equal shift right 4 bytes*/ - xmm_sLTP_shp_Q14_x3x1 = _mm_shuffle_epi32( xmm_sLTP_shp_Q14_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); - - xmm_sLTP_shp_Q14_x2x0 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x2x0, xmm_gain_adj_Q16 ); - xmm_sLTP_shp_Q14_x3x1 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x3x1, xmm_gain_adj_Q16 ); - - xmm_sLTP_shp_Q14_x2x0 = _mm_srli_epi64( xmm_sLTP_shp_Q14_x2x0, 16 ); - xmm_sLTP_shp_Q14_x3x1 = _mm_slli_epi64( xmm_sLTP_shp_Q14_x3x1, 16 ); - - xmm_sLTP_shp_Q14_x2x0 = _mm_blend_epi16( xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1, 0xCC ); - - _mm_storeu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ), xmm_sLTP_shp_Q14_x2x0 ); - } - - for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { - NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); - } - - /* Scale long-term prediction state */ - if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { - for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) { - sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); - } - } - - NSQ->sLF_AR_shp_Q14 = silk_SMULWW( gain_adj_Q16, NSQ->sLF_AR_shp_Q14 ); - - /* Scale short-term prediction and shaping states */ - for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { - NSQ->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLPC_Q14[ i ] ); - } - for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { - NSQ->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sAR2_Q14[ i ] ); - } - } -} diff --git a/libs/libopus/silk/x86/NSQ_sse4_1.c b/libs/libopus/silk/x86/NSQ_sse4_1.c new file mode 100644 index 000000000..a2a74659b --- /dev/null +++ b/libs/libopus/silk/x86/NSQ_sse4_1.c @@ -0,0 +1,772 @@ +/* Copyright (c) 2014-2020, Cisco Systems, INC + Written by XiangMingZhu WeiZhou MinPeng YanWang FrancisQuiers + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include +#include "main.h" +#include "celt/x86/x86cpu.h" +#include "stack_alloc.h" + +static OPUS_INLINE void silk_nsq_scale_states_sse4_1( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + const opus_int16 x16[], /* I input */ + opus_int32 x_sc_Q10[], /* O input scaled with 1/Gain */ + const opus_int16 sLTP[], /* I re-whitened LTP state in Q0 */ + opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ + opus_int subfr, /* I subframe number */ + const opus_int LTP_scale_Q14, /* I */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ + const opus_int signal_type /* I Signal type */ +); + +static OPUS_INLINE void silk_noise_shape_quantizer_10_16_sse4_1( + silk_nsq_state *NSQ, /* I/O NSQ state */ + opus_int signalType, /* I Signal type */ + const opus_int32 x_sc_Q10[], /* I */ + opus_int8 pulses[], /* O */ + opus_int16 xq[], /* O */ + opus_int32 sLTP_Q15[], /* I/O LTP state */ + const opus_int16 a_Q12[], /* I Short term prediction coefs */ + const opus_int16 b_Q14[], /* I Long term prediction coefs */ + const opus_int16 AR_shp_Q13[], /* I Noise shaping AR coefs */ + opus_int lag, /* I Pitch lag */ + opus_int32 HarmShapeFIRPacked_Q14, /* I */ + opus_int Tilt_Q14, /* I Spectral tilt */ + opus_int32 LF_shp_Q14, /* I */ + opus_int32 Gain_Q16, /* I */ + opus_int Lambda_Q10, /* I */ + opus_int offset_Q10, /* I */ + opus_int length, /* I Input length */ + opus_int32 table[][4] /* I */ +); + +void silk_NSQ_sse4_1( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ +) +{ + opus_int k, lag, start_idx, LSF_interpolation_flag; + const opus_int16 *A_Q12, *B_Q14, *AR_shp_Q13; + opus_int16 *pxq; + VARDECL( opus_int32, sLTP_Q15 ); + VARDECL( opus_int16, sLTP ); + opus_int32 HarmShapeFIRPacked_Q14; + opus_int offset_Q10; + VARDECL( opus_int32, x_sc_Q10 ); + + opus_int32 table[ 64 ][ 4 ]; + opus_int32 tmp1; + opus_int32 q1_Q10, q2_Q10, rd1_Q20, rd2_Q20; + +#ifdef OPUS_CHECK_ASM + silk_nsq_state NSQ_c; + SideInfoIndices psIndices_c; + opus_int8 pulses_c[ MAX_FRAME_LENGTH ]; + const opus_int8 *const pulses_a = pulses; +#endif + + SAVE_STACK; + +#ifdef OPUS_CHECK_ASM + ( void )pulses_a; + silk_memcpy( &NSQ_c, NSQ, sizeof( NSQ_c ) ); + silk_memcpy( &psIndices_c, psIndices, sizeof( psIndices_c ) ); + silk_assert( psEncC->nb_subfr * psEncC->subfr_length <= MAX_FRAME_LENGTH ); + silk_memcpy( pulses_c, pulses, psEncC->nb_subfr * psEncC->subfr_length * sizeof( pulses[0] ) ); + + silk_NSQ_c( + psEncC, + &NSQ_c, + &psIndices_c, + x16, + pulses_c, + PredCoef_Q12, + LTPCoef_Q14, + AR_Q13, + HarmShapeGain_Q14, + Tilt_Q14, + LF_shp_Q14, + Gains_Q16, + pitchL, + Lambda_Q10, + LTP_scale_Q14 + ); +#endif + + NSQ->rand_seed = psIndices->Seed; + + /* Set unvoiced lag to the previous one, overwrite later for voiced */ + lag = NSQ->lagPrev; + + silk_assert( NSQ->prev_gain_Q16 != 0 ); + + offset_Q10 = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ]; + + /* 0 */ + q1_Q10 = offset_Q10; + q2_Q10 = offset_Q10 + ( 1024 - QUANT_LEVEL_ADJUST_Q10 ); + rd1_Q20 = q1_Q10 * Lambda_Q10; + rd2_Q20 = q2_Q10 * Lambda_Q10; + + table[ 32 ][ 0 ] = q1_Q10; + table[ 32 ][ 1 ] = q2_Q10; + table[ 32 ][ 2 ] = 2 * (q1_Q10 - q2_Q10); + table[ 32 ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); + + /* -1 */ + q1_Q10 = offset_Q10 - ( 1024 - QUANT_LEVEL_ADJUST_Q10 ); + q2_Q10 = offset_Q10; + rd1_Q20 = - q1_Q10 * Lambda_Q10; + rd2_Q20 = q2_Q10 * Lambda_Q10; + + table[ 31 ][ 0 ] = q1_Q10; + table[ 31 ][ 1 ] = q2_Q10; + table[ 31 ][ 2 ] = 2 * (q1_Q10 - q2_Q10); + table[ 31 ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); + + /* > 0 */ + for (k = 1; k <= 31; k++) + { + tmp1 = offset_Q10 + silk_LSHIFT( k, 10 ); + + q1_Q10 = tmp1 - QUANT_LEVEL_ADJUST_Q10; + q2_Q10 = tmp1 - QUANT_LEVEL_ADJUST_Q10 + 1024; + rd1_Q20 = q1_Q10 * Lambda_Q10; + rd2_Q20 = q2_Q10 * Lambda_Q10; + + table[ 32 + k ][ 0 ] = q1_Q10; + table[ 32 + k ][ 1 ] = q2_Q10; + table[ 32 + k ][ 2 ] = 2 * (q1_Q10 - q2_Q10); + table[ 32 + k ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); + } + + /* < -1 */ + for (k = -32; k <= -2; k++) + { + tmp1 = offset_Q10 + silk_LSHIFT( k, 10 ); + + q1_Q10 = tmp1 + QUANT_LEVEL_ADJUST_Q10; + q2_Q10 = tmp1 + QUANT_LEVEL_ADJUST_Q10 + 1024; + rd1_Q20 = - q1_Q10 * Lambda_Q10; + rd2_Q20 = - q2_Q10 * Lambda_Q10; + + table[ 32 + k ][ 0 ] = q1_Q10; + table[ 32 + k ][ 1 ] = q2_Q10; + table[ 32 + k ][ 2 ] = 2 * (q1_Q10 - q2_Q10); + table[ 32 + k ][ 3 ] = (rd1_Q20 - rd2_Q20) + (q1_Q10 * q1_Q10 - q2_Q10 * q2_Q10); + } + + if( psIndices->NLSFInterpCoef_Q2 == 4 ) { + LSF_interpolation_flag = 0; + } else { + LSF_interpolation_flag = 1; + } + + ALLOC( sLTP_Q15, psEncC->ltp_mem_length + psEncC->frame_length, opus_int32 ); + ALLOC( sLTP, psEncC->ltp_mem_length + psEncC->frame_length, opus_int16 ); + ALLOC( x_sc_Q10, psEncC->subfr_length, opus_int32 ); + /* Set up pointers to start of sub frame */ + NSQ->sLTP_shp_buf_idx = psEncC->ltp_mem_length; + NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; + pxq = &NSQ->xq[ psEncC->ltp_mem_length ]; + for( k = 0; k < psEncC->nb_subfr; k++ ) { + A_Q12 = &PredCoef_Q12[ (( k >> 1 ) | ( 1 - LSF_interpolation_flag )) * MAX_LPC_ORDER ]; + B_Q14 = <PCoef_Q14[ k * LTP_ORDER ]; + AR_shp_Q13 = &AR_Q13[ k * MAX_SHAPE_LPC_ORDER ]; + + /* Noise shape parameters */ + silk_assert( HarmShapeGain_Q14[ k ] >= 0 ); + HarmShapeFIRPacked_Q14 = silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 ); + HarmShapeFIRPacked_Q14 |= silk_LSHIFT( (opus_int32)silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 ); + + NSQ->rewhite_flag = 0; + if( psIndices->signalType == TYPE_VOICED ) { + /* Voiced */ + lag = pitchL[ k ]; + + /* Re-whitening */ + if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) { + /* Rewhiten with new A coefs */ + start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2; + celt_assert( start_idx > 0 ); + + silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], + A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); + + NSQ->rewhite_flag = 1; + NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; + } + } + + silk_nsq_scale_states_sse4_1( psEncC, NSQ, x16, x_sc_Q10, sLTP, sLTP_Q15, k, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType ); + + if ( opus_likely( ( 10 == psEncC->shapingLPCOrder ) && ( 16 == psEncC->predictLPCOrder) ) ) + { + silk_noise_shape_quantizer_10_16_sse4_1( NSQ, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, A_Q12, B_Q14, + AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], Gains_Q16[ k ], Lambda_Q10, + offset_Q10, psEncC->subfr_length, &(table[32]) ); + } + else + { + silk_noise_shape_quantizer( NSQ, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, A_Q12, B_Q14, + AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], Gains_Q16[ k ], Lambda_Q10, + offset_Q10, psEncC->subfr_length, psEncC->shapingLPCOrder, psEncC->predictLPCOrder, psEncC->arch ); + } + + x16 += psEncC->subfr_length; + pulses += psEncC->subfr_length; + pxq += psEncC->subfr_length; + } + + /* Update lagPrev for next frame */ + NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ]; + + /* Save quantized speech and noise shaping signals */ + silk_memmove( NSQ->xq, &NSQ->xq[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) ); + silk_memmove( NSQ->sLTP_shp_Q14, &NSQ->sLTP_shp_Q14[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) ); + +#ifdef OPUS_CHECK_ASM + silk_assert( !memcmp( &NSQ_c, NSQ, sizeof( NSQ_c ) ) ); + silk_assert( !memcmp( &psIndices_c, psIndices, sizeof( psIndices_c ) ) ); + silk_assert( !memcmp( pulses_c, pulses_a, psEncC->nb_subfr * psEncC->subfr_length * sizeof( pulses[0] ) ) ); +#endif + + RESTORE_STACK; +} + +/************************************/ +/* silk_noise_shape_quantizer_10_16 */ +/************************************/ +static OPUS_INLINE void silk_noise_shape_quantizer_10_16_sse4_1( + silk_nsq_state *NSQ, /* I/O NSQ state */ + opus_int signalType, /* I Signal type */ + const opus_int32 x_sc_Q10[], /* I */ + opus_int8 pulses[], /* O */ + opus_int16 xq[], /* O */ + opus_int32 sLTP_Q15[], /* I/O LTP state */ + const opus_int16 a_Q12[], /* I Short term prediction coefs */ + const opus_int16 b_Q14[], /* I Long term prediction coefs */ + const opus_int16 AR_shp_Q13[], /* I Noise shaping AR coefs */ + opus_int lag, /* I Pitch lag */ + opus_int32 HarmShapeFIRPacked_Q14, /* I */ + opus_int Tilt_Q14, /* I Spectral tilt */ + opus_int32 LF_shp_Q14, /* I */ + opus_int32 Gain_Q16, /* I */ + opus_int Lambda_Q10, /* I */ + opus_int offset_Q10, /* I */ + opus_int length, /* I Input length */ + opus_int32 table[][4] /* I */ +) +{ + opus_int i; + opus_int32 LTP_pred_Q13, LPC_pred_Q10, n_AR_Q12, n_LTP_Q13; + opus_int32 n_LF_Q12, r_Q10, q1_Q0, q1_Q10, q2_Q10; + opus_int32 exc_Q14, LPC_exc_Q14, xq_Q14, Gain_Q10, sDiff_shp_Q14; + opus_int32 tmp1, tmp2, sLF_AR_shp_Q14; + opus_int32 *psLPC_Q14, *shp_lag_ptr, *pred_lag_ptr; + + __m128i xmm_tempa, xmm_tempb; + + __m128i xmm_one; + + __m128i psLPC_Q14_hi_01234567, psLPC_Q14_hi_89ABCDEF; + __m128i psLPC_Q14_lo_01234567, psLPC_Q14_lo_89ABCDEF; + __m128i a_Q12_01234567, a_Q12_89ABCDEF; + + __m128i sAR2_Q14_hi_76543210, sAR2_Q14_lo_76543210; + __m128i AR_shp_Q13_76543210; + + int rdo_offset = (Lambda_Q10 >> 1) - 512; + + shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ]; + pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; + Gain_Q10 = silk_RSHIFT( Gain_Q16, 6 ); + + /* Set up short term AR state */ + psLPC_Q14 = &NSQ->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 ]; + + sLF_AR_shp_Q14 = NSQ->sLF_AR_shp_Q14; + xq_Q14 = psLPC_Q14[ 0 ]; + sDiff_shp_Q14 = NSQ->sDiff_shp_Q14; + LTP_pred_Q13 = 0; + + /* load a_Q12 */ + xmm_one = _mm_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 ); + + /* load a_Q12[0] - a_Q12[7] */ + a_Q12_01234567 = _mm_loadu_si128( (__m128i *)(&a_Q12[ 0 ] ) ); + /* load a_Q12[ 8 ] - a_Q12[ 15 ] */ + a_Q12_89ABCDEF = _mm_loadu_si128( (__m128i *)(&a_Q12[ 8 ] ) ); + + a_Q12_01234567 = _mm_shuffle_epi8( a_Q12_01234567, xmm_one ); + a_Q12_89ABCDEF = _mm_shuffle_epi8( a_Q12_89ABCDEF, xmm_one ); + + /* load AR_shp_Q13 */ + AR_shp_Q13_76543210 = _mm_loadu_si128( (__m128i *)(&AR_shp_Q13[0] ) ); + + /* load psLPC_Q14 */ + xmm_one = _mm_set_epi8(15, 14, 11, 10, 7, 6, 3, 2, 13, 12, 9, 8, 5, 4, 1, 0 ); + + xmm_tempa = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[-16]) ); + xmm_tempb = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[-12]) ); + + xmm_tempa = _mm_shuffle_epi8( xmm_tempa, xmm_one ); + xmm_tempb = _mm_shuffle_epi8( xmm_tempb, xmm_one ); + + psLPC_Q14_hi_89ABCDEF = _mm_unpackhi_epi64( xmm_tempa, xmm_tempb ); + psLPC_Q14_lo_89ABCDEF = _mm_unpacklo_epi64( xmm_tempa, xmm_tempb ); + + xmm_tempa = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -8 ]) ); + xmm_tempb = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -4 ]) ); + + xmm_tempa = _mm_shuffle_epi8( xmm_tempa, xmm_one ); + xmm_tempb = _mm_shuffle_epi8( xmm_tempb, xmm_one ); + + psLPC_Q14_hi_01234567 = _mm_unpackhi_epi64( xmm_tempa, xmm_tempb ); + psLPC_Q14_lo_01234567 = _mm_unpacklo_epi64( xmm_tempa, xmm_tempb ); + + /* load sAR2_Q14 */ + xmm_tempa = _mm_loadu_si128( (__m128i *)(&(NSQ->sAR2_Q14[ 0 ]) ) ); + xmm_tempb = _mm_loadu_si128( (__m128i *)(&(NSQ->sAR2_Q14[ 4 ]) ) ); + + xmm_tempa = _mm_shuffle_epi8( xmm_tempa, xmm_one ); + xmm_tempb = _mm_shuffle_epi8( xmm_tempb, xmm_one ); + + sAR2_Q14_hi_76543210 = _mm_unpackhi_epi64( xmm_tempa, xmm_tempb ); + sAR2_Q14_lo_76543210 = _mm_unpacklo_epi64( xmm_tempa, xmm_tempb ); + + /* prepare 1 in 8 * 16bit */ + xmm_one = _mm_set1_epi16(1); + + for( i = 0; i < length; i++ ) + { + /* Short-term prediction */ + __m128i xmm_hi_07, xmm_hi_8F, xmm_lo_07, xmm_lo_8F; + + /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ + LPC_pred_Q10 = 8; /* silk_RSHIFT( predictLPCOrder, 1 ); */ + + /* shift psLPC_Q14 */ + psLPC_Q14_hi_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_hi_01234567, psLPC_Q14_hi_89ABCDEF, 2 ); + psLPC_Q14_lo_89ABCDEF = _mm_alignr_epi8( psLPC_Q14_lo_01234567, psLPC_Q14_lo_89ABCDEF, 2 ); + + psLPC_Q14_hi_01234567 = _mm_srli_si128( psLPC_Q14_hi_01234567, 2 ); + psLPC_Q14_lo_01234567 = _mm_srli_si128( psLPC_Q14_lo_01234567, 2 ); + + psLPC_Q14_hi_01234567 = _mm_insert_epi16( psLPC_Q14_hi_01234567, (xq_Q14 >> 16), 7 ); + psLPC_Q14_lo_01234567 = _mm_insert_epi16( psLPC_Q14_lo_01234567, (xq_Q14), 7 ); + + /* high part, use pmaddwd, results in 4 32-bit */ + xmm_hi_07 = _mm_madd_epi16( psLPC_Q14_hi_01234567, a_Q12_01234567 ); + xmm_hi_8F = _mm_madd_epi16( psLPC_Q14_hi_89ABCDEF, a_Q12_89ABCDEF ); + + /* low part, use pmulhw, results in 8 16-bit, note we need simulate unsigned * signed, _mm_srai_epi16(psLPC_Q14_lo_01234567, 15) */ + xmm_tempa = _mm_cmpgt_epi16( _mm_setzero_si128(), psLPC_Q14_lo_01234567 ); + xmm_tempb = _mm_cmpgt_epi16( _mm_setzero_si128(), psLPC_Q14_lo_89ABCDEF ); + + xmm_tempa = _mm_and_si128( xmm_tempa, a_Q12_01234567 ); + xmm_tempb = _mm_and_si128( xmm_tempb, a_Q12_89ABCDEF ); + + xmm_lo_07 = _mm_mulhi_epi16( psLPC_Q14_lo_01234567, a_Q12_01234567 ); + xmm_lo_8F = _mm_mulhi_epi16( psLPC_Q14_lo_89ABCDEF, a_Q12_89ABCDEF ); + + xmm_lo_07 = _mm_add_epi16( xmm_lo_07, xmm_tempa ); + xmm_lo_8F = _mm_add_epi16( xmm_lo_8F, xmm_tempb ); + + xmm_lo_07 = _mm_madd_epi16( xmm_lo_07, xmm_one ); + xmm_lo_8F = _mm_madd_epi16( xmm_lo_8F, xmm_one ); + + /* accumulate */ + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, xmm_hi_8F ); + xmm_lo_07 = _mm_add_epi32( xmm_lo_07, xmm_lo_8F ); + + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, xmm_lo_07 ); + + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_unpackhi_epi64(xmm_hi_07, xmm_hi_07 ) ); + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_shufflelo_epi16(xmm_hi_07, 0x0E ) ); + + LPC_pred_Q10 += _mm_cvtsi128_si32( xmm_hi_07 ); + + /* Long-term prediction */ + if ( opus_likely( signalType == TYPE_VOICED ) ) { + /* Unrolled loop */ + /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ + LTP_pred_Q13 = 2; + { + __m128i b_Q14_3210, b_Q14_0123, pred_lag_ptr_0123; + + b_Q14_3210 = OP_CVTEPI16_EPI32_M64( b_Q14 ); + b_Q14_0123 = _mm_shuffle_epi32( b_Q14_3210, 0x1B ); + + /* loaded: [0] [-1] [-2] [-3] */ + pred_lag_ptr_0123 = _mm_loadu_si128( (__m128i *)(&pred_lag_ptr[ -3 ] ) ); + /* shuffle to [-3] [-2] [-1] [0] and to new xmm */ + xmm_tempa = _mm_shuffle_epi32( pred_lag_ptr_0123, 0x1B ); + /*64-bit multiply, a[2] * b[-2], a[0] * b[0] */ + xmm_tempa = _mm_mul_epi32( xmm_tempa, b_Q14_3210 ); + /* right shift 2 bytes (16 bits), zero extended */ + xmm_tempa = _mm_srli_si128( xmm_tempa, 2 ); + + /* a[1] * b[-1], a[3] * b[-3] */ + pred_lag_ptr_0123 = _mm_mul_epi32( pred_lag_ptr_0123, b_Q14_0123 ); + pred_lag_ptr_0123 = _mm_srli_si128( pred_lag_ptr_0123, 2 ); + + pred_lag_ptr_0123 = _mm_add_epi32( pred_lag_ptr_0123, xmm_tempa ); + /* equal shift right 8 bytes*/ + xmm_tempa = _mm_shuffle_epi32( pred_lag_ptr_0123, _MM_SHUFFLE( 0, 0, 3, 2 ) ); + xmm_tempa = _mm_add_epi32( xmm_tempa, pred_lag_ptr_0123 ); + + LTP_pred_Q13 += _mm_cvtsi128_si32( xmm_tempa ); + + LTP_pred_Q13 = silk_SMLAWB( LTP_pred_Q13, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); + pred_lag_ptr++; + } + } + + /* Noise shape feedback */ + NSQ->sAR2_Q14[ 9 ] = NSQ->sAR2_Q14[ 8 ]; + NSQ->sAR2_Q14[ 8 ] = _mm_cvtsi128_si32( _mm_srli_si128(_mm_unpackhi_epi16( sAR2_Q14_lo_76543210, sAR2_Q14_hi_76543210 ), 12 ) ); + + sAR2_Q14_hi_76543210 = _mm_slli_si128( sAR2_Q14_hi_76543210, 2 ); + sAR2_Q14_lo_76543210 = _mm_slli_si128( sAR2_Q14_lo_76543210, 2 ); + + sAR2_Q14_hi_76543210 = _mm_insert_epi16( sAR2_Q14_hi_76543210, (sDiff_shp_Q14 >> 16), 0 ); + sAR2_Q14_lo_76543210 = _mm_insert_epi16( sAR2_Q14_lo_76543210, (sDiff_shp_Q14), 0 ); + + /* high part, use pmaddwd, results in 4 32-bit */ + xmm_hi_07 = _mm_madd_epi16( sAR2_Q14_hi_76543210, AR_shp_Q13_76543210 ); + + /* low part, use pmulhw, results in 8 16-bit, note we need simulate unsigned * signed,_mm_srai_epi16(sAR2_Q14_lo_76543210, 15) */ + xmm_tempa = _mm_cmpgt_epi16( _mm_setzero_si128(), sAR2_Q14_lo_76543210 ); + xmm_tempa = _mm_and_si128( xmm_tempa, AR_shp_Q13_76543210 ); + + xmm_lo_07 = _mm_mulhi_epi16( sAR2_Q14_lo_76543210, AR_shp_Q13_76543210 ); + xmm_lo_07 = _mm_add_epi16( xmm_lo_07, xmm_tempa ); + + xmm_lo_07 = _mm_madd_epi16( xmm_lo_07, xmm_one ); + + /* accumulate */ + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, xmm_lo_07 ); + + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_unpackhi_epi64(xmm_hi_07, xmm_hi_07 ) ); + xmm_hi_07 = _mm_add_epi32( xmm_hi_07, _mm_shufflelo_epi16(xmm_hi_07, 0x0E ) ); + + n_AR_Q12 = 5 + _mm_cvtsi128_si32( xmm_hi_07 ); + + n_AR_Q12 = silk_SMLAWB( n_AR_Q12, NSQ->sAR2_Q14[ 8 ], AR_shp_Q13[ 8 ] ); + n_AR_Q12 = silk_SMLAWB( n_AR_Q12, NSQ->sAR2_Q14[ 9 ], AR_shp_Q13[ 9 ] ); + + n_AR_Q12 = silk_LSHIFT32( n_AR_Q12, 1 ); /* Q11 -> Q12 */ + n_AR_Q12 = silk_SMLAWB( n_AR_Q12, sLF_AR_shp_Q14, Tilt_Q14 ); + + n_LF_Q12 = silk_SMULWB( NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - 1 ], LF_shp_Q14 ); + n_LF_Q12 = silk_SMLAWT( n_LF_Q12, sLF_AR_shp_Q14, LF_shp_Q14 ); + + celt_assert( lag > 0 || signalType != TYPE_VOICED ); + + /* Combine prediction and noise shaping signals */ + tmp1 = silk_SUB32( silk_LSHIFT32( LPC_pred_Q10, 2 ), n_AR_Q12 ); /* Q12 */ + tmp1 = silk_SUB32( tmp1, n_LF_Q12 ); /* Q12 */ + if( lag > 0 ) { + /* Symmetric, packed FIR coefficients */ + n_LTP_Q13 = silk_SMULWB( silk_ADD_SAT32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 ); + n_LTP_Q13 = silk_SMLAWT( n_LTP_Q13, shp_lag_ptr[ -1 ], HarmShapeFIRPacked_Q14 ); + n_LTP_Q13 = silk_LSHIFT( n_LTP_Q13, 1 ); + shp_lag_ptr++; + + tmp2 = silk_SUB32( LTP_pred_Q13, n_LTP_Q13 ); /* Q13 */ + tmp1 = silk_ADD_LSHIFT32( tmp2, tmp1, 1 ); /* Q13 */ + tmp1 = silk_RSHIFT_ROUND( tmp1, 3 ); /* Q10 */ + } else { + tmp1 = silk_RSHIFT_ROUND( tmp1, 2 ); /* Q10 */ + } + + r_Q10 = silk_SUB32( x_sc_Q10[ i ], tmp1 ); /* residual error Q10 */ + + /* Generate dither */ + NSQ->rand_seed = silk_RAND( NSQ->rand_seed ); + + /* Flip sign depending on dither */ + tmp2 = -r_Q10; + if ( NSQ->rand_seed < 0 ) r_Q10 = tmp2; + + r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); + + /* Find two quantization level candidates and measure their rate-distortion */ + q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); + q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); + if (Lambda_Q10 > 2048) { + /* For aggressive RDO, the bias becomes more than one pulse. */ + if (q1_Q10 > rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 - rdo_offset, 10 ); + } else if (q1_Q10 < -rdo_offset) { + q1_Q0 = silk_RSHIFT( q1_Q10 + rdo_offset, 10 ); + } else if (q1_Q10 < 0) { + q1_Q0 = -1; + } else { + q1_Q0 = 0; + } + } + + q1_Q10 = table[q1_Q0][0]; + q2_Q10 = table[q1_Q0][1]; + + if (r_Q10 * table[q1_Q0][2] - table[q1_Q0][3] < 0) + { + q1_Q10 = q2_Q10; + } + + pulses[ i ] = (opus_int8)silk_RSHIFT_ROUND( q1_Q10, 10 ); + + /* Excitation */ + exc_Q14 = silk_LSHIFT( q1_Q10, 4 ); + + tmp2 = -exc_Q14; + if ( NSQ->rand_seed < 0 ) exc_Q14 = tmp2; + + /* Add predictions */ + LPC_exc_Q14 = silk_ADD_LSHIFT32( exc_Q14, LTP_pred_Q13, 1 ); + xq_Q14 = silk_ADD_LSHIFT32( LPC_exc_Q14, LPC_pred_Q10, 4 ); + + /* Update states */ + psLPC_Q14++; + *psLPC_Q14 = xq_Q14; + NSQ->sDiff_shp_Q14 = silk_SUB_LSHIFT32( xq_Q14, x_sc_Q10[ i ], 4 ); + sLF_AR_shp_Q14 = silk_SUB_LSHIFT32( NSQ->sDiff_shp_Q14, n_AR_Q12, 2 ); + + NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx ] = silk_SUB_LSHIFT32( sLF_AR_shp_Q14, n_LF_Q12, 2 ); + sLTP_Q15[ NSQ->sLTP_buf_idx ] = silk_LSHIFT( LPC_exc_Q14, 1 ); + NSQ->sLTP_shp_buf_idx++; + NSQ->sLTP_buf_idx++; + + /* Make dither dependent on quantized signal */ + NSQ->rand_seed = silk_ADD32_ovflw( NSQ->rand_seed, pulses[ i ] ); + } + + NSQ->sLF_AR_shp_Q14 = sLF_AR_shp_Q14; + + /* Scale XQ back to normal level before saving */ + psLPC_Q14 = &NSQ->sLPC_Q14[ NSQ_LPC_BUF_LENGTH ]; + + /* write back sAR2_Q14 */ + xmm_tempa = _mm_unpackhi_epi16( sAR2_Q14_lo_76543210, sAR2_Q14_hi_76543210 ); + xmm_tempb = _mm_unpacklo_epi16( sAR2_Q14_lo_76543210, sAR2_Q14_hi_76543210 ); + _mm_storeu_si128( (__m128i *)(&NSQ->sAR2_Q14[ 4 ]), xmm_tempa ); + _mm_storeu_si128( (__m128i *)(&NSQ->sAR2_Q14[ 0 ]), xmm_tempb ); + + /* xq[ i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psLPC_Q14[ i ], Gain_Q10 ), 8 ) ); */ + { + __m128i xmm_Gain_Q10; + __m128i xmm_xq_Q14_3210, xmm_xq_Q14_x3x1, xmm_xq_Q14_7654, xmm_xq_Q14_x7x5; + + /* prepare (1 << 7) in packed 4 32-bits */ + xmm_tempa = _mm_set1_epi32( (1 << 7) ); + + /* prepare Gain_Q10 in packed 4 32-bits */ + xmm_Gain_Q10 = _mm_set1_epi32( Gain_Q10 ); + + /* process xq */ + for (i = 0; i < length - 7; i += 8) + { + xmm_xq_Q14_3210 = _mm_loadu_si128( (__m128i *)(&(psLPC_Q14[ i + 0 ] ) ) ); + xmm_xq_Q14_7654 = _mm_loadu_si128( (__m128i *)(&(psLPC_Q14[ i + 4 ] ) ) ); + + /* equal shift right 4 bytes*/ + xmm_xq_Q14_x3x1 = _mm_shuffle_epi32( xmm_xq_Q14_3210, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + /* equal shift right 4 bytes*/ + xmm_xq_Q14_x7x5 = _mm_shuffle_epi32( xmm_xq_Q14_7654, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + xmm_xq_Q14_3210 = _mm_mul_epi32( xmm_xq_Q14_3210, xmm_Gain_Q10 ); + xmm_xq_Q14_x3x1 = _mm_mul_epi32( xmm_xq_Q14_x3x1, xmm_Gain_Q10 ); + xmm_xq_Q14_7654 = _mm_mul_epi32( xmm_xq_Q14_7654, xmm_Gain_Q10 ); + xmm_xq_Q14_x7x5 = _mm_mul_epi32( xmm_xq_Q14_x7x5, xmm_Gain_Q10 ); + + xmm_xq_Q14_3210 = _mm_srli_epi64( xmm_xq_Q14_3210, 16 ); + xmm_xq_Q14_x3x1 = _mm_slli_epi64( xmm_xq_Q14_x3x1, 16 ); + xmm_xq_Q14_7654 = _mm_srli_epi64( xmm_xq_Q14_7654, 16 ); + xmm_xq_Q14_x7x5 = _mm_slli_epi64( xmm_xq_Q14_x7x5, 16 ); + + xmm_xq_Q14_3210 = _mm_blend_epi16( xmm_xq_Q14_3210, xmm_xq_Q14_x3x1, 0xCC ); + xmm_xq_Q14_7654 = _mm_blend_epi16( xmm_xq_Q14_7654, xmm_xq_Q14_x7x5, 0xCC ); + + /* silk_RSHIFT_ROUND(xq, 8) */ + xmm_xq_Q14_3210 = _mm_add_epi32( xmm_xq_Q14_3210, xmm_tempa ); + xmm_xq_Q14_7654 = _mm_add_epi32( xmm_xq_Q14_7654, xmm_tempa ); + + xmm_xq_Q14_3210 = _mm_srai_epi32( xmm_xq_Q14_3210, 8 ); + xmm_xq_Q14_7654 = _mm_srai_epi32( xmm_xq_Q14_7654, 8 ); + + /* silk_SAT16 */ + xmm_xq_Q14_3210 = _mm_packs_epi32( xmm_xq_Q14_3210, xmm_xq_Q14_7654 ); + + /* save to xq */ + _mm_storeu_si128( (__m128i *)(&xq[ i ] ), xmm_xq_Q14_3210 ); + } + } + for ( ; i < length; i++) + { + xq[i] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( psLPC_Q14[ i ], Gain_Q10 ), 8 ) ); + } + + /* Update LPC synth buffer */ + silk_memcpy( NSQ->sLPC_Q14, &NSQ->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) ); +} + +static OPUS_INLINE void silk_nsq_scale_states_sse4_1( + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + const opus_int16 x16[], /* I input */ + opus_int32 x_sc_Q10[], /* O input scaled with 1/Gain */ + const opus_int16 sLTP[], /* I re-whitened LTP state in Q0 */ + opus_int32 sLTP_Q15[], /* O LTP state matching scaled input */ + opus_int subfr, /* I subframe number */ + const opus_int LTP_scale_Q14, /* I */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lag */ + const opus_int signal_type /* I Signal type */ +) +{ + opus_int i, lag; + opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q26; + __m128i xmm_inv_gain_Q26, xmm_x16_x2x0, xmm_x16_x3x1; + + lag = pitchL[ subfr ]; + inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); + silk_assert( inv_gain_Q31 != 0 ); + + /* Scale input */ + inv_gain_Q26 = silk_RSHIFT_ROUND( inv_gain_Q31, 5 ); + + /* prepare inv_gain_Q26 in packed 4 32-bits */ + xmm_inv_gain_Q26 = _mm_set1_epi32(inv_gain_Q26); + + for( i = 0; i < psEncC->subfr_length - 3; i += 4 ) { + xmm_x16_x2x0 = OP_CVTEPI16_EPI32_M64( &(x16[ i ] ) ); + + /* equal shift right 4 bytes*/ + xmm_x16_x3x1 = _mm_shuffle_epi32( xmm_x16_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + xmm_x16_x2x0 = _mm_mul_epi32( xmm_x16_x2x0, xmm_inv_gain_Q26 ); + xmm_x16_x3x1 = _mm_mul_epi32( xmm_x16_x3x1, xmm_inv_gain_Q26 ); + + xmm_x16_x2x0 = _mm_srli_epi64( xmm_x16_x2x0, 16 ); + xmm_x16_x3x1 = _mm_slli_epi64( xmm_x16_x3x1, 16 ); + + xmm_x16_x2x0 = _mm_blend_epi16( xmm_x16_x2x0, xmm_x16_x3x1, 0xCC ); + + _mm_storeu_si128( (__m128i *)(&(x_sc_Q10[ i ] ) ), xmm_x16_x2x0 ); + } + + for( ; i < psEncC->subfr_length; i++ ) { + x_sc_Q10[ i ] = silk_SMULWW( x16[ i ], inv_gain_Q26 ); + } + + /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */ + if( NSQ->rewhite_flag ) { + if( subfr == 0 ) { + /* Do LTP downscaling */ + inv_gain_Q31 = silk_LSHIFT( silk_SMULWB( inv_gain_Q31, LTP_scale_Q14 ), 2 ); + } + for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) { + silk_assert( i < MAX_FRAME_LENGTH ); + sLTP_Q15[ i ] = silk_SMULWB( inv_gain_Q31, sLTP[ i ] ); + } + } + + /* Adjust for changing gain */ + if( Gains_Q16[ subfr ] != NSQ->prev_gain_Q16 ) { + gain_adj_Q16 = silk_DIV32_varQ( NSQ->prev_gain_Q16, Gains_Q16[ subfr ], 16 ); + + /* Scale long-term shaping state */ + __m128i xmm_gain_adj_Q16, xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1; + + /* prepare gain_adj_Q16 in packed 4 32-bits */ + xmm_gain_adj_Q16 = _mm_set1_epi32(gain_adj_Q16); + + for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 3; i += 4 ) + { + xmm_sLTP_shp_Q14_x2x0 = _mm_loadu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ) ); + /* equal shift right 4 bytes*/ + xmm_sLTP_shp_Q14_x3x1 = _mm_shuffle_epi32( xmm_sLTP_shp_Q14_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + xmm_sLTP_shp_Q14_x2x0 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x2x0, xmm_gain_adj_Q16 ); + xmm_sLTP_shp_Q14_x3x1 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x3x1, xmm_gain_adj_Q16 ); + + xmm_sLTP_shp_Q14_x2x0 = _mm_srli_epi64( xmm_sLTP_shp_Q14_x2x0, 16 ); + xmm_sLTP_shp_Q14_x3x1 = _mm_slli_epi64( xmm_sLTP_shp_Q14_x3x1, 16 ); + + xmm_sLTP_shp_Q14_x2x0 = _mm_blend_epi16( xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1, 0xCC ); + + _mm_storeu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ), xmm_sLTP_shp_Q14_x2x0 ); + } + + for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { + NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); + } + + /* Scale long-term prediction state */ + if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { + for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) { + sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); + } + } + + NSQ->sLF_AR_shp_Q14 = silk_SMULWW( gain_adj_Q16, NSQ->sLF_AR_shp_Q14 ); + NSQ->sDiff_shp_Q14 = silk_SMULWW( gain_adj_Q16, NSQ->sDiff_shp_Q14 ); + + /* Scale short-term prediction and shaping states */ + for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { + NSQ->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLPC_Q14[ i ] ); + } + for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { + NSQ->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sAR2_Q14[ i ] ); + } + + /* Save inverse gain */ + NSQ->prev_gain_Q16 = Gains_Q16[ subfr ]; + } +} diff --git a/libs/libopus/silk/x86/SigProc_FIX_sse.h b/libs/libopus/silk/x86/SigProc_FIX_sse.h index 61efa8da4..e49d5d4ec 100644 --- a/libs/libopus/silk/x86/SigProc_FIX_sse.h +++ b/libs/libopus/silk/x86/SigProc_FIX_sse.h @@ -67,7 +67,7 @@ extern void (*const SILK_BURG_MODIFIED_IMPL[OPUS_ARCHMASK + 1])( #endif -opus_int64 silk_inner_prod16_aligned_64_sse4_1( +opus_int64 silk_inner_prod16_sse4_1( const opus_int16 *inVec1, const opus_int16 *inVec2, const opus_int len @@ -76,18 +76,18 @@ opus_int64 silk_inner_prod16_aligned_64_sse4_1( #if defined(OPUS_X86_PRESUME_SSE4_1) -#define silk_inner_prod16_aligned_64(inVec1, inVec2, len, arch) \ - ((void)(arch),silk_inner_prod16_aligned_64_sse4_1(inVec1, inVec2, len)) +#define silk_inner_prod16(inVec1, inVec2, len, arch) \ + ((void)(arch),silk_inner_prod16_sse4_1(inVec1, inVec2, len)) #else -extern opus_int64 (*const SILK_INNER_PROD16_ALIGNED_64_IMPL[OPUS_ARCHMASK + 1])( +extern opus_int64 (*const SILK_INNER_PROD16_IMPL[OPUS_ARCHMASK + 1])( const opus_int16 *inVec1, const opus_int16 *inVec2, const opus_int len); -# define silk_inner_prod16_aligned_64(inVec1, inVec2, len, arch) \ - ((*SILK_INNER_PROD16_ALIGNED_64_IMPL[(arch) & OPUS_ARCHMASK])(inVec1, inVec2, len)) +# define silk_inner_prod16(inVec1, inVec2, len, arch) \ + ((*SILK_INNER_PROD16_IMPL[(arch) & OPUS_ARCHMASK])(inVec1, inVec2, len)) #endif #endif diff --git a/libs/libopus/silk/x86/VAD_sse.c b/libs/libopus/silk/x86/VAD_sse.c deleted file mode 100644 index 4e90f4410..000000000 --- a/libs/libopus/silk/x86/VAD_sse.c +++ /dev/null @@ -1,277 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include - -#include "main.h" -#include "stack_alloc.h" - -/* Weighting factors for tilt measure */ -static const opus_int32 tiltWeights[ VAD_N_BANDS ] = { 30000, 6000, -12000, -12000 }; - -/***************************************/ -/* Get the speech activity level in Q8 */ -/***************************************/ -opus_int silk_VAD_GetSA_Q8_sse4_1( /* O Return value, 0 if success */ - silk_encoder_state *psEncC, /* I/O Encoder state */ - const opus_int16 pIn[] /* I PCM input */ -) -{ - opus_int SA_Q15, pSNR_dB_Q7, input_tilt; - opus_int decimated_framelength1, decimated_framelength2; - opus_int decimated_framelength; - opus_int dec_subframe_length, dec_subframe_offset, SNR_Q7, i, b, s; - opus_int32 sumSquared, smooth_coef_Q16; - opus_int16 HPstateTmp; - VARDECL( opus_int16, X ); - opus_int32 Xnrg[ VAD_N_BANDS ]; - opus_int32 NrgToNoiseRatio_Q8[ VAD_N_BANDS ]; - opus_int32 speech_nrg, x_tmp; - opus_int X_offset[ VAD_N_BANDS ]; - opus_int ret = 0; - silk_VAD_state *psSilk_VAD = &psEncC->sVAD; - - SAVE_STACK; - - /* Safety checks */ - silk_assert( VAD_N_BANDS == 4 ); - silk_assert( MAX_FRAME_LENGTH >= psEncC->frame_length ); - silk_assert( psEncC->frame_length <= 512 ); - silk_assert( psEncC->frame_length == 8 * silk_RSHIFT( psEncC->frame_length, 3 ) ); - - /***********************/ - /* Filter and Decimate */ - /***********************/ - decimated_framelength1 = silk_RSHIFT( psEncC->frame_length, 1 ); - decimated_framelength2 = silk_RSHIFT( psEncC->frame_length, 2 ); - decimated_framelength = silk_RSHIFT( psEncC->frame_length, 3 ); - /* Decimate into 4 bands: - 0 L 3L L 3L 5L - - -- - -- -- - 8 8 2 4 4 - - [0-1 kHz| temp. |1-2 kHz| 2-4 kHz | 4-8 kHz | - - They're arranged to allow the minimal ( frame_length / 4 ) extra - scratch space during the downsampling process */ - X_offset[ 0 ] = 0; - X_offset[ 1 ] = decimated_framelength + decimated_framelength2; - X_offset[ 2 ] = X_offset[ 1 ] + decimated_framelength; - X_offset[ 3 ] = X_offset[ 2 ] + decimated_framelength2; - ALLOC( X, X_offset[ 3 ] + decimated_framelength1, opus_int16 ); - - /* 0-8 kHz to 0-4 kHz and 4-8 kHz */ - silk_ana_filt_bank_1( pIn, &psSilk_VAD->AnaState[ 0 ], - X, &X[ X_offset[ 3 ] ], psEncC->frame_length ); - - /* 0-4 kHz to 0-2 kHz and 2-4 kHz */ - silk_ana_filt_bank_1( X, &psSilk_VAD->AnaState1[ 0 ], - X, &X[ X_offset[ 2 ] ], decimated_framelength1 ); - - /* 0-2 kHz to 0-1 kHz and 1-2 kHz */ - silk_ana_filt_bank_1( X, &psSilk_VAD->AnaState2[ 0 ], - X, &X[ X_offset[ 1 ] ], decimated_framelength2 ); - - /*********************************************/ - /* HP filter on lowest band (differentiator) */ - /*********************************************/ - X[ decimated_framelength - 1 ] = silk_RSHIFT( X[ decimated_framelength - 1 ], 1 ); - HPstateTmp = X[ decimated_framelength - 1 ]; - for( i = decimated_framelength - 1; i > 0; i-- ) { - X[ i - 1 ] = silk_RSHIFT( X[ i - 1 ], 1 ); - X[ i ] -= X[ i - 1 ]; - } - X[ 0 ] -= psSilk_VAD->HPstate; - psSilk_VAD->HPstate = HPstateTmp; - - /*************************************/ - /* Calculate the energy in each band */ - /*************************************/ - for( b = 0; b < VAD_N_BANDS; b++ ) { - /* Find the decimated framelength in the non-uniformly divided bands */ - decimated_framelength = silk_RSHIFT( psEncC->frame_length, silk_min_int( VAD_N_BANDS - b, VAD_N_BANDS - 1 ) ); - - /* Split length into subframe lengths */ - dec_subframe_length = silk_RSHIFT( decimated_framelength, VAD_INTERNAL_SUBFRAMES_LOG2 ); - dec_subframe_offset = 0; - - /* Compute energy per sub-frame */ - /* initialize with summed energy of last subframe */ - Xnrg[ b ] = psSilk_VAD->XnrgSubfr[ b ]; - for( s = 0; s < VAD_INTERNAL_SUBFRAMES; s++ ) { - __m128i xmm_X, xmm_acc; - sumSquared = 0; - - xmm_acc = _mm_setzero_si128(); - - for( i = 0; i < dec_subframe_length - 7; i += 8 ) - { - xmm_X = _mm_loadu_si128( (__m128i *)&(X[ X_offset[ b ] + i + dec_subframe_offset ] ) ); - xmm_X = _mm_srai_epi16( xmm_X, 3 ); - xmm_X = _mm_madd_epi16( xmm_X, xmm_X ); - xmm_acc = _mm_add_epi32( xmm_acc, xmm_X ); - } - - xmm_acc = _mm_add_epi32( xmm_acc, _mm_unpackhi_epi64( xmm_acc, xmm_acc ) ); - xmm_acc = _mm_add_epi32( xmm_acc, _mm_shufflelo_epi16( xmm_acc, 0x0E ) ); - - sumSquared += _mm_cvtsi128_si32( xmm_acc ); - - for( ; i < dec_subframe_length; i++ ) { - /* The energy will be less than dec_subframe_length * ( silk_int16_MIN / 8 ) ^ 2. */ - /* Therefore we can accumulate with no risk of overflow (unless dec_subframe_length > 128) */ - x_tmp = silk_RSHIFT( - X[ X_offset[ b ] + i + dec_subframe_offset ], 3 ); - sumSquared = silk_SMLABB( sumSquared, x_tmp, x_tmp ); - - /* Safety check */ - silk_assert( sumSquared >= 0 ); - } - - /* Add/saturate summed energy of current subframe */ - if( s < VAD_INTERNAL_SUBFRAMES - 1 ) { - Xnrg[ b ] = silk_ADD_POS_SAT32( Xnrg[ b ], sumSquared ); - } else { - /* Look-ahead subframe */ - Xnrg[ b ] = silk_ADD_POS_SAT32( Xnrg[ b ], silk_RSHIFT( sumSquared, 1 ) ); - } - - dec_subframe_offset += dec_subframe_length; - } - psSilk_VAD->XnrgSubfr[ b ] = sumSquared; - } - - /********************/ - /* Noise estimation */ - /********************/ - silk_VAD_GetNoiseLevels( &Xnrg[ 0 ], psSilk_VAD ); - - /***********************************************/ - /* Signal-plus-noise to noise ratio estimation */ - /***********************************************/ - sumSquared = 0; - input_tilt = 0; - for( b = 0; b < VAD_N_BANDS; b++ ) { - speech_nrg = Xnrg[ b ] - psSilk_VAD->NL[ b ]; - if( speech_nrg > 0 ) { - /* Divide, with sufficient resolution */ - if( ( Xnrg[ b ] & 0xFF800000 ) == 0 ) { - NrgToNoiseRatio_Q8[ b ] = silk_DIV32( silk_LSHIFT( Xnrg[ b ], 8 ), psSilk_VAD->NL[ b ] + 1 ); - } else { - NrgToNoiseRatio_Q8[ b ] = silk_DIV32( Xnrg[ b ], silk_RSHIFT( psSilk_VAD->NL[ b ], 8 ) + 1 ); - } - - /* Convert to log domain */ - SNR_Q7 = silk_lin2log( NrgToNoiseRatio_Q8[ b ] ) - 8 * 128; - - /* Sum-of-squares */ - sumSquared = silk_SMLABB( sumSquared, SNR_Q7, SNR_Q7 ); /* Q14 */ - - /* Tilt measure */ - if( speech_nrg < ( (opus_int32)1 << 20 ) ) { - /* Scale down SNR value for small subband speech energies */ - SNR_Q7 = silk_SMULWB( silk_LSHIFT( silk_SQRT_APPROX( speech_nrg ), 6 ), SNR_Q7 ); - } - input_tilt = silk_SMLAWB( input_tilt, tiltWeights[ b ], SNR_Q7 ); - } else { - NrgToNoiseRatio_Q8[ b ] = 256; - } - } - - /* Mean-of-squares */ - sumSquared = silk_DIV32_16( sumSquared, VAD_N_BANDS ); /* Q14 */ - - /* Root-mean-square approximation, scale to dBs, and write to output pointer */ - pSNR_dB_Q7 = (opus_int16)( 3 * silk_SQRT_APPROX( sumSquared ) ); /* Q7 */ - - /*********************************/ - /* Speech Probability Estimation */ - /*********************************/ - SA_Q15 = silk_sigm_Q15( silk_SMULWB( VAD_SNR_FACTOR_Q16, pSNR_dB_Q7 ) - VAD_NEGATIVE_OFFSET_Q5 ); - - /**************************/ - /* Frequency Tilt Measure */ - /**************************/ - psEncC->input_tilt_Q15 = silk_LSHIFT( silk_sigm_Q15( input_tilt ) - 16384, 1 ); - - /**************************************************/ - /* Scale the sigmoid output based on power levels */ - /**************************************************/ - speech_nrg = 0; - for( b = 0; b < VAD_N_BANDS; b++ ) { - /* Accumulate signal-without-noise energies, higher frequency bands have more weight */ - speech_nrg += ( b + 1 ) * silk_RSHIFT( Xnrg[ b ] - psSilk_VAD->NL[ b ], 4 ); - } - - /* Power scaling */ - if( speech_nrg <= 0 ) { - SA_Q15 = silk_RSHIFT( SA_Q15, 1 ); - } else if( speech_nrg < 32768 ) { - if( psEncC->frame_length == 10 * psEncC->fs_kHz ) { - speech_nrg = silk_LSHIFT_SAT32( speech_nrg, 16 ); - } else { - speech_nrg = silk_LSHIFT_SAT32( speech_nrg, 15 ); - } - - /* square-root */ - speech_nrg = silk_SQRT_APPROX( speech_nrg ); - SA_Q15 = silk_SMULWB( 32768 + speech_nrg, SA_Q15 ); - } - - /* Copy the resulting speech activity in Q8 */ - psEncC->speech_activity_Q8 = silk_min_int( silk_RSHIFT( SA_Q15, 7 ), silk_uint8_MAX ); - - /***********************************/ - /* Energy Level and SNR estimation */ - /***********************************/ - /* Smoothing coefficient */ - smooth_coef_Q16 = silk_SMULWB( VAD_SNR_SMOOTH_COEF_Q18, silk_SMULWB( (opus_int32)SA_Q15, SA_Q15 ) ); - - if( psEncC->frame_length == 10 * psEncC->fs_kHz ) { - smooth_coef_Q16 >>= 1; - } - - for( b = 0; b < VAD_N_BANDS; b++ ) { - /* compute smoothed energy-to-noise ratio per band */ - psSilk_VAD->NrgRatioSmth_Q8[ b ] = silk_SMLAWB( psSilk_VAD->NrgRatioSmth_Q8[ b ], - NrgToNoiseRatio_Q8[ b ] - psSilk_VAD->NrgRatioSmth_Q8[ b ], smooth_coef_Q16 ); - - /* signal to noise ratio in dB per band */ - SNR_Q7 = 3 * ( silk_lin2log( psSilk_VAD->NrgRatioSmth_Q8[b] ) - 8 * 128 ); - /* quality = sigmoid( 0.25 * ( SNR_dB - 16 ) ); */ - psEncC->input_quality_bands_Q15[ b ] = silk_sigm_Q15( silk_RSHIFT( SNR_Q7 - 16 * 128, 4 ) ); - } - - RESTORE_STACK; - return( ret ); -} diff --git a/libs/libopus/silk/x86/VAD_sse4_1.c b/libs/libopus/silk/x86/VAD_sse4_1.c new file mode 100644 index 000000000..e7eaf9714 --- /dev/null +++ b/libs/libopus/silk/x86/VAD_sse4_1.c @@ -0,0 +1,289 @@ +/* Copyright (c) 2014-2020, Cisco Systems, INC + Written by XiangMingZhu WeiZhou MinPeng YanWang FrancisQuiers + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include + +#include "main.h" +#include "stack_alloc.h" + +/* Weighting factors for tilt measure */ +static const opus_int32 tiltWeights[ VAD_N_BANDS ] = { 30000, 6000, -12000, -12000 }; + +/***************************************/ +/* Get the speech activity level in Q8 */ +/***************************************/ +opus_int silk_VAD_GetSA_Q8_sse4_1( /* O Return value, 0 if success */ + silk_encoder_state *psEncC, /* I/O Encoder state */ + const opus_int16 pIn[] /* I PCM input */ +) +{ + opus_int SA_Q15, pSNR_dB_Q7, input_tilt; + opus_int decimated_framelength1, decimated_framelength2; + opus_int decimated_framelength; + opus_int dec_subframe_length, dec_subframe_offset, SNR_Q7, i, b, s; + opus_int32 sumSquared, smooth_coef_Q16; + opus_int16 HPstateTmp; + VARDECL( opus_int16, X ); + opus_int32 Xnrg[ VAD_N_BANDS ]; + opus_int32 NrgToNoiseRatio_Q8[ VAD_N_BANDS ]; + opus_int32 speech_nrg, x_tmp; + opus_int X_offset[ VAD_N_BANDS ]; + opus_int ret = 0; + silk_VAD_state *psSilk_VAD = &psEncC->sVAD; + + SAVE_STACK; + +#ifdef OPUS_CHECK_ASM + silk_encoder_state psEncC_c; + opus_int ret_c; + + silk_memcpy( &psEncC_c, psEncC, sizeof( psEncC_c ) ); + ret_c = silk_VAD_GetSA_Q8_c( &psEncC_c, pIn ); +#endif + + /* Safety checks */ + silk_assert( VAD_N_BANDS == 4 ); + celt_assert( MAX_FRAME_LENGTH >= psEncC->frame_length ); + celt_assert( psEncC->frame_length <= 512 ); + celt_assert( psEncC->frame_length == 8 * silk_RSHIFT( psEncC->frame_length, 3 ) ); + + /***********************/ + /* Filter and Decimate */ + /***********************/ + decimated_framelength1 = silk_RSHIFT( psEncC->frame_length, 1 ); + decimated_framelength2 = silk_RSHIFT( psEncC->frame_length, 2 ); + decimated_framelength = silk_RSHIFT( psEncC->frame_length, 3 ); + /* Decimate into 4 bands: + 0 L 3L L 3L 5L + - -- - -- -- + 8 8 2 4 4 + + [0-1 kHz| temp. |1-2 kHz| 2-4 kHz | 4-8 kHz | + + They're arranged to allow the minimal ( frame_length / 4 ) extra + scratch space during the downsampling process */ + X_offset[ 0 ] = 0; + X_offset[ 1 ] = decimated_framelength + decimated_framelength2; + X_offset[ 2 ] = X_offset[ 1 ] + decimated_framelength; + X_offset[ 3 ] = X_offset[ 2 ] + decimated_framelength2; + ALLOC( X, X_offset[ 3 ] + decimated_framelength1, opus_int16 ); + + /* 0-8 kHz to 0-4 kHz and 4-8 kHz */ + silk_ana_filt_bank_1( pIn, &psSilk_VAD->AnaState[ 0 ], + X, &X[ X_offset[ 3 ] ], psEncC->frame_length ); + + /* 0-4 kHz to 0-2 kHz and 2-4 kHz */ + silk_ana_filt_bank_1( X, &psSilk_VAD->AnaState1[ 0 ], + X, &X[ X_offset[ 2 ] ], decimated_framelength1 ); + + /* 0-2 kHz to 0-1 kHz and 1-2 kHz */ + silk_ana_filt_bank_1( X, &psSilk_VAD->AnaState2[ 0 ], + X, &X[ X_offset[ 1 ] ], decimated_framelength2 ); + + /*********************************************/ + /* HP filter on lowest band (differentiator) */ + /*********************************************/ + X[ decimated_framelength - 1 ] = silk_RSHIFT( X[ decimated_framelength - 1 ], 1 ); + HPstateTmp = X[ decimated_framelength - 1 ]; + for( i = decimated_framelength - 1; i > 0; i-- ) { + X[ i - 1 ] = silk_RSHIFT( X[ i - 1 ], 1 ); + X[ i ] -= X[ i - 1 ]; + } + X[ 0 ] -= psSilk_VAD->HPstate; + psSilk_VAD->HPstate = HPstateTmp; + + /*************************************/ + /* Calculate the energy in each band */ + /*************************************/ + for( b = 0; b < VAD_N_BANDS; b++ ) { + /* Find the decimated framelength in the non-uniformly divided bands */ + decimated_framelength = silk_RSHIFT( psEncC->frame_length, silk_min_int( VAD_N_BANDS - b, VAD_N_BANDS - 1 ) ); + + /* Split length into subframe lengths */ + dec_subframe_length = silk_RSHIFT( decimated_framelength, VAD_INTERNAL_SUBFRAMES_LOG2 ); + dec_subframe_offset = 0; + + /* Compute energy per sub-frame */ + /* initialize with summed energy of last subframe */ + Xnrg[ b ] = psSilk_VAD->XnrgSubfr[ b ]; + for( s = 0; s < VAD_INTERNAL_SUBFRAMES; s++ ) { + __m128i xmm_X, xmm_acc; + sumSquared = 0; + + xmm_acc = _mm_setzero_si128(); + + for( i = 0; i < dec_subframe_length - 7; i += 8 ) + { + xmm_X = _mm_loadu_si128( (__m128i *)&(X[ X_offset[ b ] + i + dec_subframe_offset ] ) ); + xmm_X = _mm_srai_epi16( xmm_X, 3 ); + xmm_X = _mm_madd_epi16( xmm_X, xmm_X ); + xmm_acc = _mm_add_epi32( xmm_acc, xmm_X ); + } + + xmm_acc = _mm_add_epi32( xmm_acc, _mm_unpackhi_epi64( xmm_acc, xmm_acc ) ); + xmm_acc = _mm_add_epi32( xmm_acc, _mm_shufflelo_epi16( xmm_acc, 0x0E ) ); + + sumSquared += _mm_cvtsi128_si32( xmm_acc ); + + for( ; i < dec_subframe_length; i++ ) { + /* The energy will be less than dec_subframe_length * ( silk_int16_MIN / 8 ) ^ 2. */ + /* Therefore we can accumulate with no risk of overflow (unless dec_subframe_length > 128) */ + x_tmp = silk_RSHIFT( + X[ X_offset[ b ] + i + dec_subframe_offset ], 3 ); + sumSquared = silk_SMLABB( sumSquared, x_tmp, x_tmp ); + + /* Safety check */ + silk_assert( sumSquared >= 0 ); + } + + /* Add/saturate summed energy of current subframe */ + if( s < VAD_INTERNAL_SUBFRAMES - 1 ) { + Xnrg[ b ] = silk_ADD_POS_SAT32( Xnrg[ b ], sumSquared ); + } else { + /* Look-ahead subframe */ + Xnrg[ b ] = silk_ADD_POS_SAT32( Xnrg[ b ], silk_RSHIFT( sumSquared, 1 ) ); + } + + dec_subframe_offset += dec_subframe_length; + } + psSilk_VAD->XnrgSubfr[ b ] = sumSquared; + } + + /********************/ + /* Noise estimation */ + /********************/ + silk_VAD_GetNoiseLevels( &Xnrg[ 0 ], psSilk_VAD ); + + /***********************************************/ + /* Signal-plus-noise to noise ratio estimation */ + /***********************************************/ + sumSquared = 0; + input_tilt = 0; + for( b = 0; b < VAD_N_BANDS; b++ ) { + speech_nrg = Xnrg[ b ] - psSilk_VAD->NL[ b ]; + if( speech_nrg > 0 ) { + /* Divide, with sufficient resolution */ + if( ( Xnrg[ b ] & 0xFF800000 ) == 0 ) { + NrgToNoiseRatio_Q8[ b ] = silk_DIV32( silk_LSHIFT( Xnrg[ b ], 8 ), psSilk_VAD->NL[ b ] + 1 ); + } else { + NrgToNoiseRatio_Q8[ b ] = silk_DIV32( Xnrg[ b ], silk_RSHIFT( psSilk_VAD->NL[ b ], 8 ) + 1 ); + } + + /* Convert to log domain */ + SNR_Q7 = silk_lin2log( NrgToNoiseRatio_Q8[ b ] ) - 8 * 128; + + /* Sum-of-squares */ + sumSquared = silk_SMLABB( sumSquared, SNR_Q7, SNR_Q7 ); /* Q14 */ + + /* Tilt measure */ + if( speech_nrg < ( (opus_int32)1 << 20 ) ) { + /* Scale down SNR value for small subband speech energies */ + SNR_Q7 = silk_SMULWB( silk_LSHIFT( silk_SQRT_APPROX( speech_nrg ), 6 ), SNR_Q7 ); + } + input_tilt = silk_SMLAWB( input_tilt, tiltWeights[ b ], SNR_Q7 ); + } else { + NrgToNoiseRatio_Q8[ b ] = 256; + } + } + + /* Mean-of-squares */ + sumSquared = silk_DIV32_16( sumSquared, VAD_N_BANDS ); /* Q14 */ + + /* Root-mean-square approximation, scale to dBs, and write to output pointer */ + pSNR_dB_Q7 = (opus_int16)( 3 * silk_SQRT_APPROX( sumSquared ) ); /* Q7 */ + + /*********************************/ + /* Speech Probability Estimation */ + /*********************************/ + SA_Q15 = silk_sigm_Q15( silk_SMULWB( VAD_SNR_FACTOR_Q16, pSNR_dB_Q7 ) - VAD_NEGATIVE_OFFSET_Q5 ); + + /**************************/ + /* Frequency Tilt Measure */ + /**************************/ + psEncC->input_tilt_Q15 = silk_LSHIFT( silk_sigm_Q15( input_tilt ) - 16384, 1 ); + + /**************************************************/ + /* Scale the sigmoid output based on power levels */ + /**************************************************/ + speech_nrg = 0; + for( b = 0; b < VAD_N_BANDS; b++ ) { + /* Accumulate signal-without-noise energies, higher frequency bands have more weight */ + speech_nrg += ( b + 1 ) * silk_RSHIFT( Xnrg[ b ] - psSilk_VAD->NL[ b ], 4 ); + } + + if( psEncC->frame_length == 20 * psEncC->fs_kHz ) { + speech_nrg = silk_RSHIFT32( speech_nrg, 1 ); + } + /* Power scaling */ + if( speech_nrg <= 0 ) { + SA_Q15 = silk_RSHIFT( SA_Q15, 1 ); + } else if( speech_nrg < 16384 ) { + speech_nrg = silk_LSHIFT32( speech_nrg, 16 ); + + /* square-root */ + speech_nrg = silk_SQRT_APPROX( speech_nrg ); + SA_Q15 = silk_SMULWB( 32768 + speech_nrg, SA_Q15 ); + } + + /* Copy the resulting speech activity in Q8 */ + psEncC->speech_activity_Q8 = silk_min_int( silk_RSHIFT( SA_Q15, 7 ), silk_uint8_MAX ); + + /***********************************/ + /* Energy Level and SNR estimation */ + /***********************************/ + /* Smoothing coefficient */ + smooth_coef_Q16 = silk_SMULWB( VAD_SNR_SMOOTH_COEF_Q18, silk_SMULWB( (opus_int32)SA_Q15, SA_Q15 ) ); + + if( psEncC->frame_length == 10 * psEncC->fs_kHz ) { + smooth_coef_Q16 >>= 1; + } + + for( b = 0; b < VAD_N_BANDS; b++ ) { + /* compute smoothed energy-to-noise ratio per band */ + psSilk_VAD->NrgRatioSmth_Q8[ b ] = silk_SMLAWB( psSilk_VAD->NrgRatioSmth_Q8[ b ], + NrgToNoiseRatio_Q8[ b ] - psSilk_VAD->NrgRatioSmth_Q8[ b ], smooth_coef_Q16 ); + + /* signal to noise ratio in dB per band */ + SNR_Q7 = 3 * ( silk_lin2log( psSilk_VAD->NrgRatioSmth_Q8[b] ) - 8 * 128 ); + /* quality = sigmoid( 0.25 * ( SNR_dB - 16 ) ); */ + psEncC->input_quality_bands_Q15[ b ] = silk_sigm_Q15( silk_RSHIFT( SNR_Q7 - 16 * 128, 4 ) ); + } + +#ifdef OPUS_CHECK_ASM + silk_assert( ret == ret_c ); + silk_assert( !memcmp( &psEncC_c, psEncC, sizeof( psEncC_c ) ) ); +#endif + + RESTORE_STACK; + return( ret ); +} diff --git a/libs/libopus/silk/x86/VQ_WMat_EC_sse.c b/libs/libopus/silk/x86/VQ_WMat_EC_sse.c deleted file mode 100644 index 74d6c6d0e..000000000 --- a/libs/libopus/silk/x86/VQ_WMat_EC_sse.c +++ /dev/null @@ -1,142 +0,0 @@ -/* Copyright (c) 2014, Cisco Systems, INC - Written by XiangMingZhu WeiZhou MinPeng YanWang - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "main.h" -#include "celt/x86/x86cpu.h" - -/* Entropy constrained matrix-weighted VQ, hard-coded to 5-element vectors, for a single input data vector */ -void silk_VQ_WMat_EC_sse4_1( - opus_int8 *ind, /* O index of best codebook vector */ - opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */ - opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ - const opus_int16 *in_Q14, /* I input vector to be quantized */ - const opus_int32 *W_Q18, /* I weighting matrix */ - const opus_int8 *cb_Q7, /* I codebook */ - const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ - const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ - const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */ - const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ - opus_int L /* I number of vectors in codebook */ -) -{ - opus_int k, gain_tmp_Q7; - const opus_int8 *cb_row_Q7; - opus_int16 diff_Q14[ 5 ]; - opus_int32 sum1_Q14, sum2_Q16; - - __m128i C_tmp1, C_tmp2, C_tmp3, C_tmp4, C_tmp5; - /* Loop over codebook */ - *rate_dist_Q14 = silk_int32_MAX; - cb_row_Q7 = cb_Q7; - for( k = 0; k < L; k++ ) { - gain_tmp_Q7 = cb_gain_Q7[k]; - - diff_Q14[ 0 ] = in_Q14[ 0 ] - silk_LSHIFT( cb_row_Q7[ 0 ], 7 ); - - C_tmp1 = OP_CVTEPI16_EPI32_M64( &in_Q14[ 1 ] ); - C_tmp2 = OP_CVTEPI8_EPI32_M32( &cb_row_Q7[ 1 ] ); - C_tmp2 = _mm_slli_epi32( C_tmp2, 7 ); - C_tmp1 = _mm_sub_epi32( C_tmp1, C_tmp2 ); - - diff_Q14[ 1 ] = _mm_extract_epi16( C_tmp1, 0 ); - diff_Q14[ 2 ] = _mm_extract_epi16( C_tmp1, 2 ); - diff_Q14[ 3 ] = _mm_extract_epi16( C_tmp1, 4 ); - diff_Q14[ 4 ] = _mm_extract_epi16( C_tmp1, 6 ); - - /* Weighted rate */ - sum1_Q14 = silk_SMULBB( mu_Q9, cl_Q5[ k ] ); - - /* Penalty for too large gain */ - sum1_Q14 = silk_ADD_LSHIFT32( sum1_Q14, silk_max( silk_SUB32( gain_tmp_Q7, max_gain_Q7 ), 0 ), 10 ); - - silk_assert( sum1_Q14 >= 0 ); - - /* first row of W_Q18 */ - C_tmp3 = _mm_loadu_si128( (__m128i *)(&W_Q18[ 1 ] ) ); - C_tmp4 = _mm_mul_epi32( C_tmp3, C_tmp1 ); - C_tmp4 = _mm_srli_si128( C_tmp4, 2 ); - - C_tmp1 = _mm_shuffle_epi32( C_tmp1, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* shift right 4 bytes */ - C_tmp3 = _mm_shuffle_epi32( C_tmp3, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* shift right 4 bytes */ - - C_tmp5 = _mm_mul_epi32( C_tmp3, C_tmp1 ); - C_tmp5 = _mm_srli_si128( C_tmp5, 2 ); - - C_tmp5 = _mm_add_epi32( C_tmp4, C_tmp5 ); - C_tmp5 = _mm_slli_epi32( C_tmp5, 1 ); - - C_tmp5 = _mm_add_epi32( C_tmp5, _mm_shuffle_epi32( C_tmp5, _MM_SHUFFLE( 0, 0, 0, 2 ) ) ); - sum2_Q16 = _mm_cvtsi128_si32( C_tmp5 ); - - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 0 ], diff_Q14[ 0 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 0 ] ); - - /* second row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 7 ], diff_Q14[ 2 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 8 ], diff_Q14[ 3 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 9 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 6 ], diff_Q14[ 1 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 1 ] ); - - /* third row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 13 ], diff_Q14[ 3 ] ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 14 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 12 ], diff_Q14[ 2 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 2 ] ); - - /* fourth row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 19 ], diff_Q14[ 4 ] ); - sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 ); - sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 18 ], diff_Q14[ 3 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 3 ] ); - - /* last row of W_Q18 */ - sum2_Q16 = silk_SMULWB( W_Q18[ 24 ], diff_Q14[ 4 ] ); - sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 4 ] ); - - silk_assert( sum1_Q14 >= 0 ); - - /* find best */ - if( sum1_Q14 < *rate_dist_Q14 ) { - *rate_dist_Q14 = sum1_Q14; - *ind = (opus_int8)k; - *gain_Q7 = gain_tmp_Q7; - } - - /* Go to next cbk vector */ - cb_row_Q7 += LTP_ORDER; - } -} diff --git a/libs/libopus/silk/x86/VQ_WMat_EC_sse4_1.c b/libs/libopus/silk/x86/VQ_WMat_EC_sse4_1.c new file mode 100644 index 000000000..2c7d18d05 --- /dev/null +++ b/libs/libopus/silk/x86/VQ_WMat_EC_sse4_1.c @@ -0,0 +1,173 @@ +/* Copyright (c) 2014-2020, Cisco Systems, INC + Written by XiangMingZhu WeiZhou MinPeng YanWang FrancisQuiers + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include +#include "main.h" +#include "celt/x86/x86cpu.h" + +/* Entropy constrained matrix-weighted VQ, hard-coded to 5-element vectors, for a single input data vector */ +void silk_VQ_WMat_EC_sse4_1( + opus_int8 *ind, /* O index of best codebook vector */ + opus_int32 *res_nrg_Q15, /* O best residual energy */ + opus_int32 *rate_dist_Q8, /* O best total bitrate */ + opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ + const opus_int32 *XX_Q17, /* I correlation matrix */ + const opus_int32 *xX_Q17, /* I correlation vector */ + const opus_int8 *cb_Q7, /* I codebook */ + const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ + const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ + const opus_int subfr_len, /* I number of samples per subframe */ + const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ + const opus_int L /* I number of vectors in codebook */ +) +{ + opus_int k, gain_tmp_Q7; + const opus_int8 *cb_row_Q7; + opus_int32 neg_xX_Q24[ 5 ]; + opus_int32 sum1_Q15, sum2_Q24; + opus_int32 bits_res_Q8, bits_tot_Q8; + __m128i v_XX_31_Q17, v_XX_42_Q17, v_cb_row_31_Q7, v_cb_row_42_Q7, v_acc1_Q24, v_acc2_Q24; + + /* Negate and convert to new Q domain */ + neg_xX_Q24[ 0 ] = -silk_LSHIFT32( xX_Q17[ 0 ], 7 ); + neg_xX_Q24[ 1 ] = -silk_LSHIFT32( xX_Q17[ 1 ], 7 ); + neg_xX_Q24[ 2 ] = -silk_LSHIFT32( xX_Q17[ 2 ], 7 ); + neg_xX_Q24[ 3 ] = -silk_LSHIFT32( xX_Q17[ 3 ], 7 ); + neg_xX_Q24[ 4 ] = -silk_LSHIFT32( xX_Q17[ 4 ], 7 ); + + v_XX_31_Q17 = _mm_loadu_si128( (__m128i *)(&XX_Q17[ 1 ] ) ); + v_XX_42_Q17 = _mm_shuffle_epi32( v_XX_31_Q17, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + + /* Loop over codebook */ + *rate_dist_Q8 = silk_int32_MAX; + *res_nrg_Q15 = silk_int32_MAX; + cb_row_Q7 = cb_Q7; + /* If things go really bad, at least *ind is set to something safe. */ + *ind = 0; + for( k = 0; k < L; k++ ) { + opus_int32 penalty; + gain_tmp_Q7 = cb_gain_Q7[k]; + /* Weighted rate */ + /* Quantization error: 1 - 2 * xX * cb + cb' * XX * cb */ + sum1_Q15 = SILK_FIX_CONST( 1.001, 15 ); + + /* Penalty for too large gain */ + penalty = silk_LSHIFT32( silk_max( silk_SUB32( gain_tmp_Q7, max_gain_Q7 ), 0 ), 11 ); + + /* first row of XX_Q17 */ + v_cb_row_31_Q7 = OP_CVTEPI8_EPI32_M32( &cb_row_Q7[ 1 ] ); + v_cb_row_42_Q7 = _mm_shuffle_epi32( v_cb_row_31_Q7, _MM_SHUFFLE( 0, 3, 2, 1 ) ); + v_cb_row_31_Q7 = _mm_mul_epi32( v_XX_31_Q17, v_cb_row_31_Q7 ); + v_cb_row_42_Q7 = _mm_mul_epi32( v_XX_42_Q17, v_cb_row_42_Q7 ); + v_acc1_Q24 = _mm_add_epi64( v_cb_row_31_Q7, v_cb_row_42_Q7); + v_acc2_Q24 = _mm_shuffle_epi32( v_acc1_Q24, _MM_SHUFFLE( 1, 0, 3, 2 ) ); + v_acc1_Q24 = _mm_add_epi64( v_acc1_Q24, v_acc2_Q24); + sum2_Q24 = _mm_cvtsi128_si32( v_acc1_Q24 ); + sum2_Q24 = silk_ADD32( neg_xX_Q24[ 0 ], sum2_Q24 ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 0 ], cb_row_Q7[ 0 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 0 ] ); + + /* second row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 1 ], XX_Q17[ 7 ], cb_row_Q7[ 2 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 8 ], cb_row_Q7[ 3 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 9 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 6 ], cb_row_Q7[ 1 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 1 ] ); + + /* third row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 2 ], XX_Q17[ 13 ], cb_row_Q7[ 3 ] ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 14 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 12 ], cb_row_Q7[ 2 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 2 ] ); + + /* fourth row of XX_Q17 */ + sum2_Q24 = silk_MLA( neg_xX_Q24[ 3 ], XX_Q17[ 19 ], cb_row_Q7[ 4 ] ); + sum2_Q24 = silk_LSHIFT32( sum2_Q24, 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 18 ], cb_row_Q7[ 3 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 3 ] ); + + /* last row of XX_Q17 */ + sum2_Q24 = silk_LSHIFT32( neg_xX_Q24[ 4 ], 1 ); + sum2_Q24 = silk_MLA( sum2_Q24, XX_Q17[ 24 ], cb_row_Q7[ 4 ] ); + sum1_Q15 = silk_SMLAWB( sum1_Q15, sum2_Q24, cb_row_Q7[ 4 ] ); + + /* find best */ + if( sum1_Q15 >= 0 ) { + /* Translate residual energy to bits using high-rate assumption (6 dB ==> 1 bit/sample) */ + bits_res_Q8 = silk_SMULBB( subfr_len, silk_lin2log( sum1_Q15 + penalty) - (15 << 7) ); + /* In the following line we reduce the codelength component by half ("-1"); seems to slightly improve quality */ + bits_tot_Q8 = silk_ADD_LSHIFT32( bits_res_Q8, cl_Q5[ k ], 3-1 ); + if( bits_tot_Q8 <= *rate_dist_Q8 ) { + *rate_dist_Q8 = bits_tot_Q8; + *res_nrg_Q15 = sum1_Q15 + penalty; + *ind = (opus_int8)k; + *gain_Q7 = gain_tmp_Q7; + } + } + + /* Go to next cbk vector */ + cb_row_Q7 += LTP_ORDER; + } + +#ifdef OPUS_CHECK_ASM + { + opus_int8 ind_c = 0; + opus_int32 res_nrg_Q15_c = 0; + opus_int32 rate_dist_Q8_c = 0; + opus_int gain_Q7_c = 0; + + silk_VQ_WMat_EC_c( + &ind_c, + &res_nrg_Q15_c, + &rate_dist_Q8_c, + &gain_Q7_c, + XX_Q17, + xX_Q17, + cb_Q7, + cb_gain_Q7, + cl_Q5, + subfr_len, + max_gain_Q7, + L + ); + + silk_assert( *ind == ind_c ); + silk_assert( *res_nrg_Q15 == res_nrg_Q15_c ); + silk_assert( *rate_dist_Q8 == rate_dist_Q8_c ); + silk_assert( *gain_Q7 == gain_Q7_c ); + } +#endif +} diff --git a/libs/libopus/silk/x86/main_sse.h b/libs/libopus/silk/x86/main_sse.h index d8d61310e..0a0391a2d 100644 --- a/libs/libopus/silk/x86/main_sse.h +++ b/libs/libopus/silk/x86/main_sse.h @@ -38,66 +38,68 @@ void silk_VQ_WMat_EC_sse4_1( opus_int8 *ind, /* O index of best codebook vector */ - opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */ + opus_int32 *res_nrg_Q15, /* O best residual energy */ + opus_int32 *rate_dist_Q8, /* O best total bitrate */ opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ - const opus_int16 *in_Q14, /* I input vector to be quantized */ - const opus_int32 *W_Q18, /* I weighting matrix */ + const opus_int32 *XX_Q17, /* I correlation matrix */ + const opus_int32 *xX_Q17, /* I correlation vector */ const opus_int8 *cb_Q7, /* I codebook */ const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ - const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */ + const opus_int subfr_len, /* I number of samples per subframe */ const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ - opus_int L /* I number of vectors in codebook */ + const opus_int L /* I number of vectors in codebook */ ); #if defined OPUS_X86_PRESUME_SSE4_1 -#define silk_VQ_WMat_EC(ind, rate_dist_Q14, gain_Q7, in_Q14, W_Q18, cb_Q7, cb_gain_Q7, cl_Q5, \ - mu_Q9, max_gain_Q7, L, arch) \ - ((void)(arch),silk_VQ_WMat_EC_sse4_1(ind, rate_dist_Q14, gain_Q7, in_Q14, W_Q18, cb_Q7, cb_gain_Q7, cl_Q5, \ - mu_Q9, max_gain_Q7, L)) +#define silk_VQ_WMat_EC(ind, res_nrg_Q15, rate_dist_Q8, gain_Q7, XX_Q17, xX_Q17, cb_Q7, cb_gain_Q7, cl_Q5, \ + subfr_len, max_gain_Q7, L, arch) \ + ((void)(arch),silk_VQ_WMat_EC_sse4_1(ind, res_nrg_Q15, rate_dist_Q8, gain_Q7, XX_Q17, xX_Q17, cb_Q7, cb_gain_Q7, cl_Q5, \ + subfr_len, max_gain_Q7, L)) #else extern void (*const SILK_VQ_WMAT_EC_IMPL[OPUS_ARCHMASK + 1])( opus_int8 *ind, /* O index of best codebook vector */ - opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */ + opus_int32 *res_nrg_Q15, /* O best residual energy */ + opus_int32 *rate_dist_Q8, /* O best total bitrate */ opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ - const opus_int16 *in_Q14, /* I input vector to be quantized */ - const opus_int32 *W_Q18, /* I weighting matrix */ + const opus_int32 *XX_Q17, /* I correlation matrix */ + const opus_int32 *xX_Q17, /* I correlation vector */ const opus_int8 *cb_Q7, /* I codebook */ const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ - const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */ + const opus_int subfr_len, /* I number of samples per subframe */ const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ - opus_int L /* I number of vectors in codebook */ + const opus_int L /* I number of vectors in codebook */ ); -# define silk_VQ_WMat_EC(ind, rate_dist_Q14, gain_Q7, in_Q14, W_Q18, cb_Q7, cb_gain_Q7, cl_Q5, \ - mu_Q9, max_gain_Q7, L, arch) \ - ((*SILK_VQ_WMAT_EC_IMPL[(arch) & OPUS_ARCHMASK])(ind, rate_dist_Q14, gain_Q7, in_Q14, W_Q18, cb_Q7, cb_gain_Q7, cl_Q5, \ - mu_Q9, max_gain_Q7, L)) +# define silk_VQ_WMat_EC(ind, res_nrg_Q15, rate_dist_Q8, gain_Q7, XX_Q17, xX_Q17, cb_Q7, cb_gain_Q7, cl_Q5, \ + subfr_len, max_gain_Q7, L, arch) \ + ((*SILK_VQ_WMAT_EC_IMPL[(arch) & OPUS_ARCHMASK])(ind, res_nrg_Q15, rate_dist_Q8, gain_Q7, XX_Q17, xX_Q17, cb_Q7, cb_gain_Q7, cl_Q5, \ + subfr_len, max_gain_Q7, L)) #endif # define OVERRIDE_silk_NSQ void silk_NSQ_sse4_1( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ); #if defined OPUS_X86_PRESUME_SSE4_1 @@ -110,21 +112,21 @@ void silk_NSQ_sse4_1( #else extern void (*const SILK_NSQ_IMPL[OPUS_ARCHMASK + 1])( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ); # define silk_NSQ(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ @@ -137,53 +139,53 @@ extern void (*const SILK_NSQ_IMPL[OPUS_ARCHMASK + 1])( # define OVERRIDE_silk_NSQ_del_dec void silk_NSQ_del_dec_sse4_1( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ); #if defined OPUS_X86_PRESUME_SSE4_1 -#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ +#define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14, arch) \ - ((void)(arch),silk_NSQ_del_dec_sse4_1(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ + ((void)(arch),silk_NSQ_del_dec_sse4_1(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14)) #else extern void (*const SILK_NSQ_DEL_DEC_IMPL[OPUS_ARCHMASK + 1])( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ); -# define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ +# define silk_NSQ_del_dec(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14, arch) \ - ((*SILK_NSQ_DEL_DEC_IMPL[(arch) & OPUS_ARCHMASK])(psEncC, NSQ, psIndices, x_Q3, pulses, PredCoef_Q12, LTPCoef_Q14, AR2_Q13, \ + ((*SILK_NSQ_DEL_DEC_IMPL[(arch) & OPUS_ARCHMASK])(psEncC, NSQ, psIndices, x16, pulses, PredCoef_Q12, LTPCoef_Q14, AR_Q13, \ HarmShapeGain_Q14, Tilt_Q14, LF_shp_Q14, Gains_Q16, pitchL, Lambda_Q10, LTP_scale_Q14)) #endif @@ -238,39 +240,6 @@ extern opus_int (*const SILK_VAD_GETSA_Q8_IMPL[OPUS_ARCHMASK + 1])( silk_encoder_state *psEnC, const opus_int16 pIn[]); -# define OVERRIDE_silk_warped_LPC_analysis_filter_FIX - -#endif - -void silk_warped_LPC_analysis_filter_FIX_sse4_1( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -); - -#if defined(OPUS_X86_PRESUME_SSE4_1) -#define silk_warped_LPC_analysis_filter_FIX(state, res_Q2, coef_Q13, input, lambda_Q16, length, order, arch) \ - ((void)(arch),silk_warped_LPC_analysis_filter_FIX_c(state, res_Q2, coef_Q13, input, lambda_Q16, length, order)) - -#else - -extern void (*const SILK_WARPED_LPC_ANALYSIS_FILTER_FIX_IMPL[OPUS_ARCHMASK + 1])( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -); - -# define silk_warped_LPC_analysis_filter_FIX(state, res_Q2, coef_Q13, input, lambda_Q16, length, order, arch) \ - ((*SILK_WARPED_LPC_ANALYSIS_FILTER_FIX_IMPL[(arch) & OPUS_ARCHMASK])(state, res_Q2, coef_Q13, input, lambda_Q16, length, order)) - #endif # endif diff --git a/libs/libopus/silk/x86/x86_silk_map.c b/libs/libopus/silk/x86/x86_silk_map.c index 818841f2c..ca13cde91 100644 --- a/libs/libopus/silk/x86/x86_silk_map.c +++ b/libs/libopus/silk/x86/x86_silk_map.c @@ -41,16 +41,16 @@ #include "fixed/main_FIX.h" -opus_int64 (*const SILK_INNER_PROD16_ALIGNED_64_IMPL[ OPUS_ARCHMASK + 1 ] )( +opus_int64 (*const SILK_INNER_PROD16_IMPL[ OPUS_ARCHMASK + 1 ] )( const opus_int16 *inVec1, const opus_int16 *inVec2, const opus_int len ) = { - silk_inner_prod16_aligned_64_c, /* non-sse */ - silk_inner_prod16_aligned_64_c, - silk_inner_prod16_aligned_64_c, - MAY_HAVE_SSE4_1( silk_inner_prod16_aligned_64 ), /* sse4.1 */ - MAY_HAVE_SSE4_1( silk_inner_prod16_aligned_64 ) /* avx */ + silk_inner_prod16_c, /* non-sse */ + silk_inner_prod16_c, + silk_inner_prod16_c, + MAY_HAVE_SSE4_1( silk_inner_prod16 ), /* sse4.1 */ + MAY_HAVE_SSE4_1( silk_inner_prod16 ) /* avx */ }; #endif @@ -67,21 +67,21 @@ opus_int (*const SILK_VAD_GETSA_Q8_IMPL[ OPUS_ARCHMASK + 1 ] )( }; void (*const SILK_NSQ_IMPL[ OPUS_ARCHMASK + 1 ] )( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ) = { silk_NSQ_c, /* non-sse */ silk_NSQ_c, @@ -92,16 +92,17 @@ void (*const SILK_NSQ_IMPL[ OPUS_ARCHMASK + 1 ] )( void (*const SILK_VQ_WMAT_EC_IMPL[ OPUS_ARCHMASK + 1 ] )( opus_int8 *ind, /* O index of best codebook vector */ - opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */ + opus_int32 *res_nrg_Q15, /* O best residual energy */ + opus_int32 *rate_dist_Q8, /* O best total bitrate */ opus_int *gain_Q7, /* O sum of absolute LTP coefficients */ - const opus_int16 *in_Q14, /* I input vector to be quantized */ - const opus_int32 *W_Q18, /* I weighting matrix */ + const opus_int32 *XX_Q17, /* I correlation matrix */ + const opus_int32 *xX_Q17, /* I correlation vector */ const opus_int8 *cb_Q7, /* I codebook */ const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */ const opus_uint8 *cl_Q5, /* I code length for each codebook vector */ - const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */ + const opus_int subfr_len, /* I number of samples per subframe */ const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */ - opus_int L /* I number of vectors in codebook */ + const opus_int L /* I number of vectors in codebook */ ) = { silk_VQ_WMat_EC_c, /* non-sse */ silk_VQ_WMat_EC_c, @@ -111,21 +112,21 @@ void (*const SILK_VQ_WMAT_EC_IMPL[ OPUS_ARCHMASK + 1 ] )( }; void (*const SILK_NSQ_DEL_DEC_IMPL[ OPUS_ARCHMASK + 1 ] )( - const silk_encoder_state *psEncC, /* I/O Encoder State */ - silk_nsq_state *NSQ, /* I/O NSQ state */ - SideInfoIndices *psIndices, /* I/O Quantization Indices */ - const opus_int32 x_Q3[], /* I Prefiltered input signal */ - opus_int8 pulses[], /* O Quantized pulse signal */ - const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ - const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ - const opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ - const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ - const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ - const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ - const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ - const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ - const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ - const opus_int LTP_scale_Q14 /* I LTP state scaling */ + const silk_encoder_state *psEncC, /* I Encoder State */ + silk_nsq_state *NSQ, /* I/O NSQ state */ + SideInfoIndices *psIndices, /* I/O Quantization Indices */ + const opus_int16 x16[], /* I Input */ + opus_int8 pulses[], /* O Quantized pulse signal */ + const opus_int16 PredCoef_Q12[ 2 * MAX_LPC_ORDER ], /* I Short term prediction coefs */ + const opus_int16 LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ], /* I Long term prediction coefs */ + const opus_int16 AR_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ], /* I Noise shaping coefs */ + const opus_int HarmShapeGain_Q14[ MAX_NB_SUBFR ], /* I Long term shaping coefs */ + const opus_int Tilt_Q14[ MAX_NB_SUBFR ], /* I Spectral tilt */ + const opus_int32 LF_shp_Q14[ MAX_NB_SUBFR ], /* I Low frequency shaping coefs */ + const opus_int32 Gains_Q16[ MAX_NB_SUBFR ], /* I Quantization step sizes */ + const opus_int pitchL[ MAX_NB_SUBFR ], /* I Pitch lags */ + const opus_int Lambda_Q10, /* I Rate/distortion tradeoff */ + const opus_int LTP_scale_Q14 /* I LTP state scaling */ ) = { silk_NSQ_del_dec_c, /* non-sse */ silk_NSQ_del_dec_c, @@ -136,22 +137,6 @@ void (*const SILK_NSQ_DEL_DEC_IMPL[ OPUS_ARCHMASK + 1 ] )( #if defined(FIXED_POINT) -void (*const SILK_WARPED_LPC_ANALYSIS_FILTER_FIX_IMPL[ OPUS_ARCHMASK + 1 ] )( - opus_int32 state[], /* I/O State [order + 1] */ - opus_int32 res_Q2[], /* O Residual signal [length] */ - const opus_int16 coef_Q13[], /* I Coefficients [order] */ - const opus_int16 input[], /* I Input signal [length] */ - const opus_int16 lambda_Q16, /* I Warping factor */ - const opus_int length, /* I Length of input signal */ - const opus_int order /* I Filter order (even) */ -) = { - silk_warped_LPC_analysis_filter_FIX_c, /* non-sse */ - silk_warped_LPC_analysis_filter_FIX_c, - silk_warped_LPC_analysis_filter_FIX_c, - MAY_HAVE_SSE4_1( silk_warped_LPC_analysis_filter_FIX ), /* sse4.1 */ - MAY_HAVE_SSE4_1( silk_warped_LPC_analysis_filter_FIX ) /* avx */ -}; - void (*const SILK_BURG_MODIFIED_IMPL[ OPUS_ARCHMASK + 1 ] )( opus_int32 *res_nrg, /* O Residual energy */ opus_int *res_nrg_Q, /* O Residual energy Q value */ diff --git a/libs/libopus/sources.mozbuild b/libs/libopus/sources.mozbuild index 34de399f4..a939dd53c 100644 --- a/libs/libopus/sources.mozbuild +++ b/libs/libopus/sources.mozbuild @@ -1,7 +1,6 @@ # THIS FILE WAS AUTOMATICALLY GENERATED BY gen-sources.py. DO NOT EDIT. celt_sources = [ 'celt/bands.c', - 'celt/celt.c', 'celt/celt_lpc.c', 'celt/cwrs.c', 'celt/entcode.c', @@ -19,6 +18,8 @@ celt_sources = [ ] opus_nonunified_sources = [ + # Disabled because of undefined reference to celt_fatal at link time + 'celt/celt.c', # Disabled because of name clash of opus_custom_encoder_get_size. 'celt/celt_decoder.c', 'celt/celt_encoder.c', @@ -35,10 +36,11 @@ celt_sources_sse = [ celt_sources_sse2 = [ 'celt/x86/pitch_sse2.c', + 'celt/x86/vq_sse2.c', ] celt_sources_sse4_1 = [ - 'celt/x86/celt_lpc_sse.c', + 'celt/x86/celt_lpc_sse4_1.c', 'celt/x86/pitch_sse4_1.c', ] @@ -56,19 +58,25 @@ celt_am_sources_arm_asm = [ ] celt_sources_arm_neon_intr = [ - 'celt/arm/celt_ne10_fft.c', - 'celt/arm/celt_ne10_mdct.c', 'celt/arm/celt_neon_intr.c', - 'CELT_SOURCES_ARM_NE10=', + 'celt/arm/pitch_neon_intr.c', +] + +celt_sources_arm_ne10 = [ + 'celt/arm/celt_fft_ne10.c', + 'celt/arm/celt_mdct_ne10.c', ] opus_sources = [ + 'src/mapping_matrix.c', 'src/opus.c', 'src/opus_decoder.c', 'src/opus_encoder.c', 'src/opus_multistream.c', 'src/opus_multistream_decoder.c', 'src/opus_multistream_encoder.c', + 'src/opus_projection_decoder.c', + 'src/opus_projection_encoder.c', 'src/repacketizer.c', ] @@ -112,6 +120,7 @@ silk_sources = [ 'silk/log2lin.c', 'silk/LP_variable_cutoff.c', 'silk/LPC_analysis_filter.c', + 'silk/LPC_fit.c', 'silk/NLSF_decode.c', 'silk/NLSF_del_dec_quant.c', 'silk/NLSF_encode.c', @@ -156,15 +165,18 @@ silk_sources = [ ] silk_sources_sse4_1 = [ - 'silk/x86/NSQ_del_dec_sse.c', - 'silk/x86/NSQ_sse.c', - 'silk/x86/VAD_sse.c', - 'silk/x86/VQ_WMat_EC_sse.c', + 'silk/x86/NSQ_del_dec_sse4_1.c', + 'silk/x86/NSQ_sse4_1.c', + 'silk/x86/VAD_sse4_1.c', + 'silk/x86/VQ_WMat_EC_sse4_1.c', 'silk/x86/x86_silk_map.c', ] silk_sources_arm_neon_intr = [ 'silk/arm/arm_silk_map.c', + 'silk/arm/biquad_alt_neon_intr.c', + 'silk/arm/LPC_inv_pred_gain_neon_intr.c', + 'silk/arm/NSQ_del_dec_neon_intr.c', 'silk/arm/NSQ_neon.c', ] @@ -184,22 +196,23 @@ silk_sources_fixed = [ 'silk/fixed/LTP_scale_ctrl_FIX.c', 'silk/fixed/noise_shape_analysis_FIX.c', 'silk/fixed/pitch_analysis_core_FIX.c', - 'silk/fixed/prefilter_FIX.c', 'silk/fixed/process_gains_FIX.c', 'silk/fixed/regularize_correlations_FIX.c', 'silk/fixed/residual_energy16_FIX.c', 'silk/fixed/residual_energy_FIX.c', 'silk/fixed/schur64_FIX.c', 'silk/fixed/schur_FIX.c', - 'silk/fixed/solve_LS_FIX.c', 'silk/fixed/vector_ops_FIX.c', 'silk/fixed/warped_autocorrelation_FIX.c', ] silk_sources_fixed_sse4_1 = [ - 'silk/fixed/x86/burg_modified_FIX_sse.c', - 'silk/fixed/x86/prefilter_FIX_sse.c', - 'silk/fixed/x86/vector_ops_FIX_sse.c', + 'silk/fixed/x86/burg_modified_FIX_sse4_1.c', + 'silk/fixed/x86/vector_ops_FIX_sse4_1.c', +] + +silk_sources_fixed_arm_neon_intr = [ + 'silk/fixed/arm/warped_autocorrelation_FIX_neon_intr.c', ] silk_sources_float = [ @@ -216,21 +229,18 @@ silk_sources_float = [ 'silk/float/find_pred_coefs_FLP.c', 'silk/float/inner_product_FLP.c', 'silk/float/k2a_FLP.c', - 'silk/float/levinsondurbin_FLP.c', 'silk/float/LPC_analysis_filter_FLP.c', 'silk/float/LPC_inv_pred_gain_FLP.c', 'silk/float/LTP_analysis_filter_FLP.c', 'silk/float/LTP_scale_ctrl_FLP.c', 'silk/float/noise_shape_analysis_FLP.c', 'silk/float/pitch_analysis_core_FLP.c', - 'silk/float/prefilter_FLP.c', 'silk/float/process_gains_FLP.c', 'silk/float/regularize_correlations_FLP.c', 'silk/float/residual_energy_FLP.c', 'silk/float/scale_copy_vector_FLP.c', 'silk/float/scale_vector_FLP.c', 'silk/float/schur_FLP.c', - 'silk/float/solve_LS_FLP.c', 'silk/float/sort_FLP.c', 'silk/float/warped_autocorrelation_FLP.c', 'silk/float/wrappers_FLP.c', diff --git a/libs/libopus/src/analysis.c b/libs/libopus/src/analysis.c index 663431a43..058328f0f 100644 --- a/libs/libopus/src/analysis.c +++ b/libs/libopus/src/analysis.c @@ -29,20 +29,31 @@ #include "config.h" #endif +#define ANALYSIS_C + +#ifdef MLP_TRAINING +#include +#endif + +#include "mathops.h" #include "kiss_fft.h" #include "celt.h" #include "modes.h" #include "arch.h" #include "quant_bands.h" -#include #include "analysis.h" #include "mlp.h" #include "stack_alloc.h" +#include "float_cast.h" #ifndef M_PI #define M_PI 3.141592653 #endif +#ifndef DISABLE_FLOAT_API + +#define TRANSITION_PENALTY 10 + static const float dct_table[128] = { 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, 0.250000f, @@ -96,52 +107,118 @@ static const float analysis_window[240] = { }; static const int tbands[NB_TBANDS+1] = { - 2, 4, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 68, 80, 96, 120 -}; - -static const int extra_bands[NB_TOT_BANDS+1] = { - 1, 2, 4, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 68, 80, 96, 120, 160, 200 + 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 136, 160, 192, 240 }; -/*static const float tweight[NB_TBANDS+1] = { - .3, .4, .5, .6, .7, .8, .9, 1., 1., 1., 1., 1., 1., 1., .8, .7, .6, .5 -};*/ - #define NB_TONAL_SKIP_BANDS 9 -#define cA 0.43157974f -#define cB 0.67848403f -#define cC 0.08595542f -#define cE ((float)M_PI/2) -static OPUS_INLINE float fast_atan2f(float y, float x) { - float x2, y2; - /* Should avoid underflow on the values we'll get */ - if (ABS16(x)+ABS16(y)<1e-9f) +static opus_val32 silk_resampler_down2_hp( + opus_val32 *S, /* I/O State vector [ 2 ] */ + opus_val32 *out, /* O Output signal [ floor(len/2) ] */ + const opus_val32 *in, /* I Input signal [ len ] */ + int inLen /* I Number of input samples */ +) +{ + int k, len2 = inLen/2; + opus_val32 in32, out32, out32_hp, Y, X; + opus_val64 hp_ener = 0; + /* Internal variables and state are in Q10 format */ + for( k = 0; k < len2; k++ ) { + /* Convert to Q10 */ + in32 = in[ 2 * k ]; + + /* All-pass section for even input sample */ + Y = SUB32( in32, S[ 0 ] ); + X = MULT16_32_Q15(QCONST16(0.6074371f, 15), Y); + out32 = ADD32( S[ 0 ], X ); + S[ 0 ] = ADD32( in32, X ); + out32_hp = out32; + /* Convert to Q10 */ + in32 = in[ 2 * k + 1 ]; + + /* All-pass section for odd input sample, and add to output of previous section */ + Y = SUB32( in32, S[ 1 ] ); + X = MULT16_32_Q15(QCONST16(0.15063f, 15), Y); + out32 = ADD32( out32, S[ 1 ] ); + out32 = ADD32( out32, X ); + S[ 1 ] = ADD32( in32, X ); + + Y = SUB32( -in32, S[ 2 ] ); + X = MULT16_32_Q15(QCONST16(0.15063f, 15), Y); + out32_hp = ADD32( out32_hp, S[ 2 ] ); + out32_hp = ADD32( out32_hp, X ); + S[ 2 ] = ADD32( -in32, X ); + + hp_ener += out32_hp*(opus_val64)out32_hp; + /* Add, convert back to int16 and store to output */ + out[ k ] = HALF32(out32); + } +#ifdef FIXED_POINT + /* len2 can be up to 480, so we shift by 8 more to make it fit. */ + hp_ener = hp_ener >> (2*SIG_SHIFT + 8); +#endif + return (opus_val32)hp_ener; +} + +static opus_val32 downmix_and_resample(downmix_func downmix, const void *_x, opus_val32 *y, opus_val32 S[3], int subframe, int offset, int c1, int c2, int C, int Fs) +{ + VARDECL(opus_val32, tmp); + opus_val32 scale; + int j; + opus_val32 ret = 0; + SAVE_STACK; + + if (subframe==0) return 0; + if (Fs == 48000) { - x*=1e12f; - y*=1e12f; + subframe *= 2; + offset *= 2; + } else if (Fs == 16000) { + subframe = subframe*2/3; + offset = offset*2/3; } - x2 = x*x; - y2 = y*y; - if(x2-1) + scale /= 2; + for (j=0;jarch = opus_select_arch(); + tonal->Fs = Fs; /* Clear remaining fields. */ tonality_analysis_reset(tonal); } @@ -157,15 +234,34 @@ void tonality_get_info(TonalityAnalysisState *tonal, AnalysisInfo *info_out, int { int pos; int curr_lookahead; - float psum; + float tonality_max; + float tonality_avg; + int tonality_count; int i; + int pos0; + float prob_avg; + float prob_count; + float prob_min, prob_max; + float vad_prob; + int mpos, vpos; + int bandwidth_span; pos = tonal->read_pos; curr_lookahead = tonal->write_pos-tonal->read_pos; if (curr_lookahead<0) curr_lookahead += DETECT_SIZE; - if (len > 480 && pos != tonal->write_pos) + tonal->read_subframe += len/(tonal->Fs/400); + while (tonal->read_subframe>=8) + { + tonal->read_subframe -= 8; + tonal->read_pos++; + } + if (tonal->read_pos>=DETECT_SIZE) + tonal->read_pos-=DETECT_SIZE; + + /* On long frames, look at the second analysis window rather than the first. */ + if (len > tonal->Fs/50 && pos != tonal->write_pos) { pos++; if (pos==DETECT_SIZE) @@ -175,33 +271,178 @@ void tonality_get_info(TonalityAnalysisState *tonal, AnalysisInfo *info_out, int pos--; if (pos<0) pos = DETECT_SIZE-1; + pos0 = pos; OPUS_COPY(info_out, &tonal->info[pos], 1); - tonal->read_subframe += len/120; - while (tonal->read_subframe>=4) + if (!info_out->valid) + return; + tonality_max = tonality_avg = info_out->tonality; + tonality_count = 1; + /* Look at the neighbouring frames and pick largest bandwidth found (to be safe). */ + bandwidth_span = 6; + /* If possible, look ahead for a tone to compensate for the delay in the tone detector. */ + for (i=0;i<3;i++) { - tonal->read_subframe -= 4; - tonal->read_pos++; + pos++; + if (pos==DETECT_SIZE) + pos = 0; + if (pos == tonal->write_pos) + break; + tonality_max = MAX32(tonality_max, tonal->info[pos].tonality); + tonality_avg += tonal->info[pos].tonality; + tonality_count++; + info_out->bandwidth = IMAX(info_out->bandwidth, tonal->info[pos].bandwidth); + bandwidth_span--; } - if (tonal->read_pos>=DETECT_SIZE) - tonal->read_pos-=DETECT_SIZE; + pos = pos0; + /* Look back in time to see if any has a wider bandwidth than the current frame. */ + for (i=0;iwrite_pos) + break; + info_out->bandwidth = IMAX(info_out->bandwidth, tonal->info[pos].bandwidth); + } + info_out->tonality = MAX32(tonality_avg/tonality_count, tonality_max-.2f); + + mpos = vpos = pos0; + /* If we have enough look-ahead, compensate for the ~5-frame delay in the music prob and + ~1 frame delay in the VAD prob. */ + if (curr_lookahead > 15) + { + mpos += 5; + if (mpos>=DETECT_SIZE) + mpos -= DETECT_SIZE; + vpos += 1; + if (vpos>=DETECT_SIZE) + vpos -= DETECT_SIZE; + } + + /* The following calculations attempt to minimize a "badness function" + for the transition. When switching from speech to music, the badness + of switching at frame k is + b_k = S*v_k + \sum_{i=0}^{k-1} v_i*(p_i - T) + where + v_i is the activity probability (VAD) at frame i, + p_i is the music probability at frame i + T is the probability threshold for switching + S is the penalty for switching during active audio rather than silence + the current frame has index i=0 + + Rather than apply badness to directly decide when to switch, what we compute + instead is the threshold for which the optimal switching point is now. When + considering whether to switch now (frame 0) or at frame k, we have: + S*v_0 = S*v_k + \sum_{i=0}^{k-1} v_i*(p_i - T) + which gives us: + T = ( \sum_{i=0}^{k-1} v_i*p_i + S*(v_k-v_0) ) / ( \sum_{i=0}^{k-1} v_i ) + We take the min threshold across all positive values of k (up to the maximum + amount of lookahead we have) to give us the threshold for which the current + frame is the optimal switch point. + + The last step is that we need to consider whether we want to switch at all. + For that we use the average of the music probability over the entire window. + If the threshold is higher than that average we're not going to + switch, so we compute a min with the average as well. The result of all these + min operations is music_prob_min, which gives the threshold for switching to music + if we're currently encoding for speech. + + We do the exact opposite to compute music_prob_max which is used for switching + from music to speech. + */ + prob_min = 1.f; + prob_max = 0.f; + vad_prob = tonal->info[vpos].activity_probability; + prob_count = MAX16(.1f, vad_prob); + prob_avg = MAX16(.1f, vad_prob)*tonal->info[mpos].music_prob; + while (1) + { + float pos_vad; + mpos++; + if (mpos==DETECT_SIZE) + mpos = 0; + if (mpos == tonal->write_pos) + break; + vpos++; + if (vpos==DETECT_SIZE) + vpos = 0; + if (vpos == tonal->write_pos) + break; + pos_vad = tonal->info[vpos].activity_probability; + prob_min = MIN16((prob_avg - TRANSITION_PENALTY*(vad_prob - pos_vad))/prob_count, prob_min); + prob_max = MAX16((prob_avg + TRANSITION_PENALTY*(vad_prob - pos_vad))/prob_count, prob_max); + prob_count += MAX16(.1f, pos_vad); + prob_avg += MAX16(.1f, pos_vad)*tonal->info[mpos].music_prob; + } + info_out->music_prob = prob_avg/prob_count; + prob_min = MIN16(prob_avg/prob_count, prob_min); + prob_max = MAX16(prob_avg/prob_count, prob_max); + prob_min = MAX16(prob_min, 0.f); + prob_max = MIN16(prob_max, 1.f); + + /* If we don't have enough look-ahead, do our best to make a decent decision. */ + if (curr_lookahead < 10) + { + float pmin, pmax; + pmin = prob_min; + pmax = prob_max; + pos = pos0; + /* Look for min/max in the past. */ + for (i=0;icount-1, 15);i++) + { + pos--; + if (pos < 0) + pos = DETECT_SIZE-1; + pmin = MIN16(pmin, tonal->info[pos].music_prob); + pmax = MAX16(pmax, tonal->info[pos].music_prob); + } + /* Bias against switching on active audio. */ + pmin = MAX16(0.f, pmin - .1f*vad_prob); + pmax = MIN16(1.f, pmax + .1f*vad_prob); + prob_min += (1.f-.1f*curr_lookahead)*(pmin - prob_min); + prob_max += (1.f-.1f*curr_lookahead)*(pmax - prob_max); + } + info_out->music_prob_min = prob_min; + info_out->music_prob_max = prob_max; - /* Compensate for the delay in the features themselves. - FIXME: Need a better estimate the 10 I just made up */ - curr_lookahead = IMAX(curr_lookahead-10, 0); - - psum=0; - /* Summing the probability of transition patterns that involve music at - time (DETECT_SIZE-curr_lookahead-1) */ - for (i=0;ipmusic[i]; - for (;ipspeech[i]; - psum = psum*tonal->music_confidence + (1-psum)*tonal->speech_confidence; - /*printf("%f %f %f\n", psum, info_out->music_prob, info_out->tonality);*/ - - info_out->music_prob = psum; + /* printf("%f %f %f %f %f\n", prob_min, prob_max, prob_avg/prob_count, vad_prob, info_out->music_prob); */ } +static const float std_feature_bias[9] = { + 5.684947f, 3.475288f, 1.770634f, 1.599784f, 3.773215f, + 2.163313f, 1.260756f, 1.116868f, 1.918795f +}; + +#define LEAKAGE_OFFSET 2.5f +#define LEAKAGE_SLOPE 2.f + +#ifdef FIXED_POINT +/* For fixed-point, the input is +/-2^15 shifted up by SIG_SHIFT, so we need to + compensate for that in the energy. */ +#define SCALE_COMPENS (1.f/((opus_int32)1<<(15+SIG_SHIFT))) +#define SCALE_ENER(e) ((SCALE_COMPENS*SCALE_COMPENS)*(e)) +#else +#define SCALE_ENER(e) (e) +#endif + +#ifdef FIXED_POINT +static int is_digital_silence32(const opus_val32* pcm, int frame_size, int channels, int lsb_depth) +{ + int silence = 0; + opus_val32 sample_max = 0; +#ifdef MLP_TRAINING + return 0; +#endif + sample_max = celt_maxabs32(pcm, frame_size*channels); + + silence = (sample_max == 0); + (void)lsb_depth; + return silence; +} +#else +#define is_digital_silence32(pcm, frame_size, channels, lsb_depth) is_digital_silence(pcm, frame_size, channels, lsb_depth) +#endif + static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix) { int i, b; @@ -230,24 +471,50 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt float alpha, alphaE, alphaE2; float frame_loudness; float bandwidth_mask; + int is_masked[NB_TBANDS+1]; int bandwidth=0; float maxE = 0; float noise_floor; int remaining; AnalysisInfo *info; + float hp_ener; + float tonality2[240]; + float midE[8]; + float spec_variability=0; + float band_log2[NB_TBANDS+1]; + float leakage_from[NB_TBANDS+1]; + float leakage_to[NB_TBANDS+1]; + float layer_out[MAX_NEURONS]; + float below_max_pitch; + float above_max_pitch; + int is_silence; SAVE_STACK; - tonal->last_transition++; - alpha = 1.f/IMIN(20, 1+tonal->count); - alphaE = 1.f/IMIN(50, 1+tonal->count); - alphaE2 = 1.f/IMIN(1000, 1+tonal->count); + if (!tonal->initialized) + { + tonal->mem_fill = 240; + tonal->initialized = 1; + } + alpha = 1.f/IMIN(10, 1+tonal->count); + alphaE = 1.f/IMIN(25, 1+tonal->count); + /* Noise floor related decay for bandwidth detection: -2.2 dB/second */ + alphaE2 = 1.f/IMIN(100, 1+tonal->count); + if (tonal->count <= 1) alphaE2 = 1; + + if (tonal->Fs == 48000) + { + /* len and offset are now at 24 kHz. */ + len/= 2; + offset /= 2; + } else if (tonal->Fs == 16000) { + len = 3*len/2; + offset = 3*offset/2; + } - if (tonal->count<4) - tonal->music_prob = .5; kfft = celt_mode->mdct.kfft[0]; - if (tonal->count==0) - tonal->mem_fill = 240; - downmix(x, &tonal->inmem[tonal->mem_fill], IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C); + tonal->hp_ener_accum += (float)downmix_and_resample(downmix, x, + &tonal->inmem[tonal->mem_fill], tonal->downmix_state, + IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C, tonal->Fs); if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE) { tonal->mem_fill += len; @@ -255,10 +522,13 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt RESTORE_STACK; return; } + hp_ener = tonal->hp_ener_accum; info = &tonal->info[tonal->write_pos++]; if (tonal->write_pos>=DETECT_SIZE) tonal->write_pos-=DETECT_SIZE; + is_silence = is_digital_silence32(tonal->inmem, ANALYSIS_BUF_SIZE, 1, lsb_depth); + ALLOC(in, 480, kiss_fft_cpx); ALLOC(out, 480, kiss_fft_cpx); ALLOC(tonality, 240, float); @@ -273,8 +543,20 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt } OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240); remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill); - downmix(x, &tonal->inmem[240], remaining, offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C); + tonal->hp_ener_accum = (float)downmix_and_resample(downmix, x, + &tonal->inmem[240], tonal->downmix_state, remaining, + offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C, tonal->Fs); tonal->mem_fill = 240 + remaining; + if (is_silence) + { + /* On silence, copy the previous analysis. */ + int prev_pos = tonal->write_pos-2; + if (prev_pos < 0) + prev_pos += DETECT_SIZE; + OPUS_COPY(info, &tonal->info[prev_pos], 1); + RESTORE_STACK; + return; + } opus_fft(kfft, in, out, tonal->arch); #ifndef FIXED_POINT /* If there's any NaN on the input, the entire output will be NaN, so we only need to check one value. */ @@ -305,24 +587,31 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt d_angle2 = angle2 - angle; d2_angle2 = d_angle2 - d_angle; - mod1 = d2_angle - (float)floor(.5+d2_angle); + mod1 = d2_angle - (float)float2int(d2_angle); noisiness[i] = ABS16(mod1); mod1 *= mod1; mod1 *= mod1; - mod2 = d2_angle2 - (float)floor(.5+d2_angle2); + mod2 = d2_angle2 - (float)float2int(d2_angle2); noisiness[i] += ABS16(mod2); mod2 *= mod2; mod2 *= mod2; - avg_mod = .25f*(d2A[i]+2.f*mod1+mod2); + avg_mod = .25f*(d2A[i]+mod1+2*mod2); + /* This introduces an extra delay of 2 frames in the detection. */ tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f; + /* No delay on this detection, but it's less reliable. */ + tonality2[i] = 1.f/(1.f+40.f*16.f*pi4*mod2)-.015f; A[i] = angle2; dA[i] = d_angle2; d2A[i] = mod2; } - + for (i=2;ilowE[b] = MIN32(logE[b], tonal->lowE[b]+.01f); - tonal->highE[b] = MAX32(logE[b], tonal->highE[b]-.1f); - if (tonal->highE[b] < tonal->lowE[b]+1.f) + band_log2[b+1] = .5f*1.442695f*(float)log(E+1e-10f); + tonal->logE[tonal->E_count][b] = logE[b]; + if (tonal->count==0) + tonal->highE[b] = tonal->lowE[b] = logE[b]; + if (tonal->highE[b] > tonal->lowE[b] + 7.5) { - tonal->highE[b]+=.5f; - tonal->lowE[b]-=.5f; + if (tonal->highE[b] - logE[b] > logE[b] - tonal->lowE[b]) + tonal->highE[b] -= .01f; + else + tonal->lowE[b] += .01f; } - relativeE += (logE[b]-tonal->lowE[b])/(1e-15f+tonal->highE[b]-tonal->lowE[b]); + if (logE[b] > tonal->highE[b]) + { + tonal->highE[b] = logE[b]; + tonal->lowE[b] = MAX32(tonal->highE[b]-15, tonal->lowE[b]); + } else if (logE[b] < tonal->lowE[b]) + { + tonal->lowE[b] = logE[b]; + tonal->highE[b] = MIN32(tonal->lowE[b]+15, tonal->highE[b]); + } + relativeE += (logE[b]-tonal->lowE[b])/(1e-5f + (tonal->highE[b]-tonal->lowE[b])); L1=L2=0; for (i=0;iprev_band_tonality[b] = band_tonality[b]; } + leakage_from[0] = band_log2[0]; + leakage_to[0] = band_log2[0] - LEAKAGE_OFFSET; + for (b=1;b=0;b--) + { + float leak_slope = LEAKAGE_SLOPE*(tbands[b+1]-tbands[b])/4; + leakage_from[b] = MIN16(leakage_from[b+1]+leak_slope, leakage_from[b]); + leakage_to[b] = MAX16(leakage_to[b+1]-leak_slope, leakage_to[b]); + } + celt_assert(NB_TBANDS+1 <= LEAK_BANDS); + for (b=0;bleak_boost[b] = IMIN(255, (int)floor(.5 + 64.f*boost)); + } + for (;bleak_boost[b] = 0; + + for (i=0;ilogE[i][k] - tonal->logE[j][k]; + dist += tmp*tmp; + } + if (j!=i) + mindist = MIN32(mindist, dist); + } + spec_variability += mindist; + } + spec_variability = (float)sqrt(spec_variability/NB_FRAMES/NB_TBANDS); bandwidth_mask = 0; bandwidth = 0; maxE = 0; noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8))); -#ifdef FIXED_POINT - noise_floor *= 1<<(15+SIG_SHIFT); -#endif noise_floor *= noise_floor; - for (b=0;bmeanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E); - E = MAX32(E, tonal->meanE[b]); - /* Use a simple follower with 13 dB/Bark slope for spreading function */ - bandwidth_mask = MAX32(.05f*bandwidth_mask, E); + Em = MAX32(E, tonal->meanE[b]); /* Consider the band "active" only if all these conditions are met: - 1) less than 10 dB below the simple follower - 2) less than 90 dB below the peak band (maximal masking possible considering + 1) less than 90 dB below the peak band (maximal masking possible considering both the ATH and the loudness-dependent slope of the spreading function) - 3) above the PCM quantization noise floor + 2) above the PCM quantization noise floor + We use b+1 because the first CELT band isn't included in tbands[] */ - if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start)) - bandwidth = b; + if (E*1e9f > maxE && (Em > 3*noise_floor*(band_end-band_start) || E > noise_floor*(band_end-band_start))) + bandwidth = b+1; + /* Check if the band is masked (see below). */ + is_masked[b] = E < (tonal->prev_bandwidth >= b+1 ? .01f : .05f)*bandwidth_mask; + /* Use a simple follower with 13 dB/Bark slope for spreading function. */ + bandwidth_mask = MAX32(.05f*bandwidth_mask, E); } + /* Special case for the last two bands, for which we don't have spectrum but only + the energy above 12 kHz. The difficulty here is that the high-pass we use + leaks some LF energy, so we need to increase the threshold without accidentally cutting + off the band. */ + if (tonal->Fs == 48000) { + float noise_ratio; + float Em; + float E = hp_ener*(1.f/(60*60)); + noise_ratio = tonal->prev_bandwidth==20 ? 10.f : 30.f; + +#ifdef FIXED_POINT + /* silk_resampler_down2_hp() shifted right by an extra 8 bits. */ + E *= 256.f*(1.f/Q15ONE)*(1.f/Q15ONE); +#endif + above_max_pitch += E; + tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E); + Em = MAX32(E, tonal->meanE[b]); + if (Em > 3*noise_ratio*noise_floor*160 || E > noise_ratio*noise_floor*160) + bandwidth = 20; + /* Check if the band is masked (see below). */ + is_masked[b] = E < (tonal->prev_bandwidth == 20 ? .01f : .05f)*bandwidth_mask; + } + if (above_max_pitch > below_max_pitch) + info->max_pitch_ratio = below_max_pitch/above_max_pitch; + else + info->max_pitch_ratio = 1; + /* In some cases, resampling aliasing can create a small amount of energy in the first band + being cut. So if the last band is masked, we don't include it. */ + if (bandwidth == 20 && is_masked[NB_TBANDS]) + bandwidth-=2; + else if (bandwidth > 0 && bandwidth <= NB_TBANDS && is_masked[bandwidth-1]) + bandwidth--; if (tonal->count<=2) bandwidth = 20; frame_loudness = 20*(float)log10(frame_loudness); - tonal->Etracker = MAX32(tonal->Etracker-.03f, frame_loudness); + tonal->Etracker = MAX32(tonal->Etracker-.003f, frame_loudness); tonal->lowECount *= (1-alphaE); if (frame_loudness < tonal->Etracker-30) tonal->lowECount += alphaE; @@ -460,11 +865,18 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt sum += dct_table[i*16+b]*logE[b]; BFCC[i] = sum; } + for (i=0;i<8;i++) + { + float sum=0; + for (b=0;b<16;b++) + sum += dct_table[i*16+b]*.5f*(tonal->highE[b]+tonal->lowE[b]); + midE[i] = sum; + } frame_stationarity /= NB_TBANDS; relativeE /= NB_TBANDS; if (tonal->count<10) - relativeE = .5; + relativeE = .5f; frame_noisiness /= NB_TBANDS; #if 1 info->activity = frame_noisiness + (1-frame_noisiness)*relativeE; @@ -479,7 +891,7 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt info->tonality_slope = slope; tonal->E_count = (tonal->E_count+1)%NB_FRAMES; - tonal->count++; + tonal->count = IMIN(tonal->count+1, ANALYSIS_COUNT_MAX); info->tonality = frame_tonality; for (i=0;i<4;i++) @@ -498,6 +910,8 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt for (i=0;i<9;i++) tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i]; } + for (i=0;i<4;i++) + features[i] = BFCC[i]-midE[i]; for (i=0;i<8;i++) { @@ -507,136 +921,31 @@ static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt tonal->mem[i] = BFCC[i]; } for (i=0;i<9;i++) - features[11+i] = (float)sqrt(tonal->std[i]); - features[20] = info->tonality; - features[21] = info->activity; - features[22] = frame_stationarity; - features[23] = info->tonality_slope; - features[24] = tonal->lowECount; - -#ifndef DISABLE_FLOAT_API - mlp_process(&net, features, frame_probs); - frame_probs[0] = .5f*(frame_probs[0]+1); - /* Curve fitting between the MLP probability and the actual probability */ - frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10); - /* Probability of active audio (as opposed to silence) */ - frame_probs[1] = .5f*frame_probs[1]+.5f; - /* Consider that silence has a 50-50 probability. */ - frame_probs[0] = frame_probs[1]*frame_probs[0] + (1-frame_probs[1])*.5f; - - /*printf("%f %f ", frame_probs[0], frame_probs[1]);*/ - { - /* Probability of state transition */ - float tau; - /* Represents independence of the MLP probabilities, where - beta=1 means fully independent. */ - float beta; - /* Denormalized probability of speech (p0) and music (p1) after update */ - float p0, p1; - /* Probabilities for "all speech" and "all music" */ - float s0, m0; - /* Probability sum for renormalisation */ - float psum; - /* Instantaneous probability of speech and music, with beta pre-applied. */ - float speech0; - float music0; - float p, q; - - /* One transition every 3 minutes of active audio */ - tau = .00005f*frame_probs[1]; - /* Adapt beta based on how "unexpected" the new prob is */ - p = MAX16(.05f,MIN16(.95f,frame_probs[0])); - q = MAX16(.05f,MIN16(.95f,tonal->music_prob)); - beta = .01f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p)); - /* p0 and p1 are the probabilities of speech and music at this frame - using only information from previous frame and applying the - state transition model */ - p0 = (1-tonal->music_prob)*(1-tau) + tonal->music_prob *tau; - p1 = tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau; - /* We apply the current probability with exponent beta to work around - the fact that the probability estimates aren't independent. */ - p0 *= (float)pow(1-frame_probs[0], beta); - p1 *= (float)pow(frame_probs[0], beta); - /* Normalise the probabilities to get the Marokv probability of music. */ - tonal->music_prob = p1/(p0+p1); - info->music_prob = tonal->music_prob; - - /* This chunk of code deals with delayed decision. */ - psum=1e-20f; - /* Instantaneous probability of speech and music, with beta pre-applied. */ - speech0 = (float)pow(1-frame_probs[0], beta); - music0 = (float)pow(frame_probs[0], beta); - if (tonal->count==1) - { - tonal->pspeech[0]=.5; - tonal->pmusic [0]=.5; - } - /* Updated probability of having only speech (s0) or only music (m0), - before considering the new observation. */ - s0 = tonal->pspeech[0] + tonal->pspeech[1]; - m0 = tonal->pmusic [0] + tonal->pmusic [1]; - /* Updates s0 and m0 with instantaneous probability. */ - tonal->pspeech[0] = s0*(1-tau)*speech0; - tonal->pmusic [0] = m0*(1-tau)*music0; - /* Propagate the transition probabilities */ - for (i=1;ipspeech[i] = tonal->pspeech[i+1]*speech0; - tonal->pmusic [i] = tonal->pmusic [i+1]*music0; - } - /* Probability that the latest frame is speech, when all the previous ones were music. */ - tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0; - /* Probability that the latest frame is music, when all the previous ones were speech. */ - tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0; - - /* Renormalise probabilities to 1 */ - for (i=0;ipspeech[i] + tonal->pmusic[i]; - psum = 1.f/psum; - for (i=0;ipspeech[i] *= psum; - tonal->pmusic [i] *= psum; - } - psum = tonal->pmusic[0]; - for (i=1;ipspeech[i]; - - /* Estimate our confidence in the speech/music decisions */ - if (frame_probs[1]>.75) - { - if (tonal->music_prob>.9) - { - float adapt; - adapt = 1.f/(++tonal->music_confidence_count); - tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500); - tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence); - } - if (tonal->music_prob<.1) - { - float adapt; - adapt = 1.f/(++tonal->speech_confidence_count); - tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500); - tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence); - } - } else { - if (tonal->music_confidence_count==0) - tonal->music_confidence = .9f; - if (tonal->speech_confidence_count==0) - tonal->speech_confidence = .1f; - } - } - if (tonal->last_music != (tonal->music_prob>.5f)) - tonal->last_transition=0; - tonal->last_music = tonal->music_prob>.5f; -#else - info->music_prob = 0; -#endif - /*for (i=0;i<25;i++) + features[11+i] = (float)sqrt(tonal->std[i]) - std_feature_bias[i]; + features[18] = spec_variability - 0.78f; + features[20] = info->tonality - 0.154723f; + features[21] = info->activity - 0.724643f; + features[22] = frame_stationarity - 0.743717f; + features[23] = info->tonality_slope + 0.069216f; + features[24] = tonal->lowECount - 0.067930f; + + compute_dense(&layer0, layer_out, features); + compute_gru(&layer1, tonal->rnn_state, layer_out); + compute_dense(&layer2, frame_probs, tonal->rnn_state); + + /* Probability of speech or music vs noise */ + info->activity_probability = frame_probs[1]; + info->music_prob = frame_probs[0]; + + /*printf("%f %f %f\n", frame_probs[0], frame_probs[1], info->music_prob);*/ +#ifdef MLP_TRAINING + for (i=0;i<25;i++) printf("%f ", features[i]); - printf("\n");*/ + printf("\n"); +#endif info->bandwidth = bandwidth; + tonal->prev_bandwidth = bandwidth; /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/ info->noisiness = frame_noisiness; info->valid = 1; @@ -650,23 +959,25 @@ void run_analysis(TonalityAnalysisState *analysis, const CELTMode *celt_mode, co int offset; int pcm_len; + analysis_frame_size -= analysis_frame_size&1; if (analysis_pcm != NULL) { /* Avoid overflow/wrap-around of the analysis buffer */ - analysis_frame_size = IMIN((DETECT_SIZE-5)*Fs/100, analysis_frame_size); + analysis_frame_size = IMIN((DETECT_SIZE-5)*Fs/50, analysis_frame_size); pcm_len = analysis_frame_size - analysis->analysis_offset; offset = analysis->analysis_offset; - do { - tonality_analysis(analysis, celt_mode, analysis_pcm, IMIN(480, pcm_len), offset, c1, c2, C, lsb_depth, downmix); - offset += 480; - pcm_len -= 480; - } while (pcm_len>0); + while (pcm_len>0) { + tonality_analysis(analysis, celt_mode, analysis_pcm, IMIN(Fs/50, pcm_len), offset, c1, c2, C, lsb_depth, downmix); + offset += Fs/50; + pcm_len -= Fs/50; + } analysis->analysis_offset = analysis_frame_size; analysis->analysis_offset -= frame_size; } - analysis_info->valid = 0; tonality_get_info(analysis, analysis_info, frame_size); } + +#endif /* DISABLE_FLOAT_API */ diff --git a/libs/libopus/src/analysis.h b/libs/libopus/src/analysis.h index 9eae56a52..0b66555f2 100644 --- a/libs/libopus/src/analysis.h +++ b/libs/libopus/src/analysis.h @@ -30,16 +30,24 @@ #include "celt.h" #include "opus_private.h" +#include "mlp.h" #define NB_FRAMES 8 #define NB_TBANDS 18 -#define NB_TOT_BANDS 21 -#define ANALYSIS_BUF_SIZE 720 /* 15 ms at 48 kHz */ +#define ANALYSIS_BUF_SIZE 720 /* 30 ms at 24 kHz */ -#define DETECT_SIZE 200 +/* At that point we can stop counting frames because it no longer matters. */ +#define ANALYSIS_COUNT_MAX 10000 + +#define DETECT_SIZE 100 + +/* Uncomment this to print the MLP features on stdout. */ +/*#define MLP_TRAINING*/ typedef struct { int arch; + int application; + opus_int32 Fs; #define TONALITY_ANALYSIS_RESET_START angle float angle[240]; float d_angle[240]; @@ -48,35 +56,27 @@ typedef struct { int mem_fill; /* number of usable samples in the buffer */ float prev_band_tonality[NB_TBANDS]; float prev_tonality; + int prev_bandwidth; float E[NB_FRAMES][NB_TBANDS]; + float logE[NB_FRAMES][NB_TBANDS]; float lowE[NB_TBANDS]; float highE[NB_TBANDS]; - float meanE[NB_TOT_BANDS]; + float meanE[NB_TBANDS+1]; float mem[32]; float cmean[8]; float std[9]; - float music_prob; float Etracker; float lowECount; int E_count; - int last_music; - int last_transition; int count; - float subframe_mem[3]; int analysis_offset; - /** Probability of having speech for time i to DETECT_SIZE-1 (and music before). - pspeech[0] is the probability that all frames in the window are speech. */ - float pspeech[DETECT_SIZE]; - /** Probability of having music for time i to DETECT_SIZE-1 (and speech before). - pmusic[0] is the probability that all frames in the window are music. */ - float pmusic[DETECT_SIZE]; - float speech_confidence; - float music_confidence; - int speech_confidence_count; - int music_confidence_count; int write_pos; int read_pos; int read_subframe; + float hp_ener_accum; + int initialized; + float rnn_state[MAX_NEURONS]; + opus_val32 downmix_state[3]; AnalysisInfo info[DETECT_SIZE]; } TonalityAnalysisState; @@ -86,7 +86,7 @@ typedef struct { * not be repeated every analysis step. No allocated memory is retained * by the state struct, so no cleanup call is required. */ -void tonality_analysis_init(TonalityAnalysisState *analysis); +void tonality_analysis_init(TonalityAnalysisState *analysis, opus_int32 Fs); /** Reset a TonalityAnalysisState stuct. * diff --git a/libs/libopus/src/mapping_matrix.c b/libs/libopus/src/mapping_matrix.c new file mode 100644 index 000000000..31298af05 --- /dev/null +++ b/libs/libopus/src/mapping_matrix.c @@ -0,0 +1,378 @@ +/* Copyright (c) 2017 Google Inc. + Written by Andrew Allen */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "arch.h" +#include "float_cast.h" +#include "opus_private.h" +#include "opus_defines.h" +#include "mapping_matrix.h" + +#define MATRIX_INDEX(nb_rows, row, col) (nb_rows * col + row) + +opus_int32 mapping_matrix_get_size(int rows, int cols) +{ + opus_int32 size; + + /* Mapping Matrix must only support up to 255 channels in or out. + * Additionally, the total cell count must be <= 65004 octets in order + * for the matrix to be stored in an OGG header. + */ + if (rows > 255 || cols > 255) + return 0; + size = rows * (opus_int32)cols * sizeof(opus_int16); + if (size > 65004) + return 0; + + return align(sizeof(MappingMatrix)) + align(size); +} + +opus_int16 *mapping_matrix_get_data(const MappingMatrix *matrix) +{ + /* void* cast avoids clang -Wcast-align warning */ + return (opus_int16*)(void*)((char*)matrix + align(sizeof(MappingMatrix))); +} + +void mapping_matrix_init(MappingMatrix * const matrix, + int rows, int cols, int gain, const opus_int16 *data, opus_int32 data_size) +{ + int i; + opus_int16 *ptr; + +#if !defined(ENABLE_ASSERTIONS) + (void)data_size; +#endif + celt_assert(align(data_size) == align(rows * cols * sizeof(opus_int16))); + + matrix->rows = rows; + matrix->cols = cols; + matrix->gain = gain; + ptr = mapping_matrix_get_data(matrix); + for (i = 0; i < rows * cols; i++) + { + ptr[i] = data[i]; + } +} + +#ifndef DISABLE_FLOAT_API +void mapping_matrix_multiply_channel_in_float( + const MappingMatrix *matrix, + const float *input, + int input_rows, + opus_val16 *output, + int output_row, + int output_rows, + int frame_size) +{ + /* Matrix data is ordered col-wise. */ + opus_int16* matrix_data; + int i, col; + + celt_assert(input_rows <= matrix->cols && output_rows <= matrix->rows); + + matrix_data = mapping_matrix_get_data(matrix); + + for (i = 0; i < frame_size; i++) + { + float tmp = 0; + for (col = 0; col < input_rows; col++) + { + tmp += + matrix_data[MATRIX_INDEX(matrix->rows, output_row, col)] * + input[MATRIX_INDEX(input_rows, col, i)]; + } +#if defined(FIXED_POINT) + output[output_rows * i] = FLOAT2INT16((1/32768.f)*tmp); +#else + output[output_rows * i] = (1/32768.f)*tmp; +#endif + } +} + +void mapping_matrix_multiply_channel_out_float( + const MappingMatrix *matrix, + const opus_val16 *input, + int input_row, + int input_rows, + float *output, + int output_rows, + int frame_size +) +{ + /* Matrix data is ordered col-wise. */ + opus_int16* matrix_data; + int i, row; + float input_sample; + + celt_assert(input_rows <= matrix->cols && output_rows <= matrix->rows); + + matrix_data = mapping_matrix_get_data(matrix); + + for (i = 0; i < frame_size; i++) + { +#if defined(FIXED_POINT) + input_sample = (1/32768.f)*input[input_rows * i]; +#else + input_sample = input[input_rows * i]; +#endif + for (row = 0; row < output_rows; row++) + { + float tmp = + (1/32768.f)*matrix_data[MATRIX_INDEX(matrix->rows, row, input_row)] * + input_sample; + output[MATRIX_INDEX(output_rows, row, i)] += tmp; + } + } +} +#endif /* DISABLE_FLOAT_API */ + +void mapping_matrix_multiply_channel_in_short( + const MappingMatrix *matrix, + const opus_int16 *input, + int input_rows, + opus_val16 *output, + int output_row, + int output_rows, + int frame_size) +{ + /* Matrix data is ordered col-wise. */ + opus_int16* matrix_data; + int i, col; + + celt_assert(input_rows <= matrix->cols && output_rows <= matrix->rows); + + matrix_data = mapping_matrix_get_data(matrix); + + for (i = 0; i < frame_size; i++) + { + opus_val32 tmp = 0; + for (col = 0; col < input_rows; col++) + { +#if defined(FIXED_POINT) + tmp += + ((opus_int32)matrix_data[MATRIX_INDEX(matrix->rows, output_row, col)] * + (opus_int32)input[MATRIX_INDEX(input_rows, col, i)]) >> 8; +#else + tmp += + matrix_data[MATRIX_INDEX(matrix->rows, output_row, col)] * + input[MATRIX_INDEX(input_rows, col, i)]; +#endif + } +#if defined(FIXED_POINT) + output[output_rows * i] = (opus_int16)((tmp + 64) >> 7); +#else + output[output_rows * i] = (1/(32768.f*32768.f))*tmp; +#endif + } +} + +void mapping_matrix_multiply_channel_out_short( + const MappingMatrix *matrix, + const opus_val16 *input, + int input_row, + int input_rows, + opus_int16 *output, + int output_rows, + int frame_size) +{ + /* Matrix data is ordered col-wise. */ + opus_int16* matrix_data; + int i, row; + opus_int32 input_sample; + + celt_assert(input_rows <= matrix->cols && output_rows <= matrix->rows); + + matrix_data = mapping_matrix_get_data(matrix); + + for (i = 0; i < frame_size; i++) + { +#if defined(FIXED_POINT) + input_sample = (opus_int32)input[input_rows * i]; +#else + input_sample = (opus_int32)FLOAT2INT16(input[input_rows * i]); +#endif + for (row = 0; row < output_rows; row++) + { + opus_int32 tmp = + (opus_int32)matrix_data[MATRIX_INDEX(matrix->rows, row, input_row)] * + input_sample; + output[MATRIX_INDEX(output_rows, row, i)] += (tmp + 16384) >> 15; + } + } +} + +const MappingMatrix mapping_matrix_foa_mixing = { 6, 6, 0 }; +const opus_int16 mapping_matrix_foa_mixing_data[36] = { + 16384, 0, -16384, 23170, 0, 0, 16384, 23170, + 16384, 0, 0, 0, 16384, 0, -16384, -23170, + 0, 0, 16384, -23170, 16384, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 0, 32767 +}; + +const MappingMatrix mapping_matrix_soa_mixing = { 11, 11, 0 }; +const opus_int16 mapping_matrix_soa_mixing_data[121] = { + 10923, 7723, 13377, -13377, 11585, 9459, 7723, -16384, + -6689, 0, 0, 10923, 7723, 13377, 13377, -11585, + 9459, 7723, 16384, -6689, 0, 0, 10923, -15447, + 13377, 0, 0, -18919, 7723, 0, 13377, 0, + 0, 10923, 7723, -13377, -13377, 11585, -9459, 7723, + 16384, -6689, 0, 0, 10923, -7723, 0, 13377, + -16384, 0, -15447, 0, 9459, 0, 0, 10923, + -7723, 0, -13377, 16384, 0, -15447, 0, 9459, + 0, 0, 10923, 15447, 0, 0, 0, 0, + -15447, 0, -18919, 0, 0, 10923, 7723, -13377, + 13377, -11585, -9459, 7723, -16384, -6689, 0, 0, + 10923, -15447, -13377, 0, 0, 18919, 7723, 0, + 13377, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 32767 +}; + +const MappingMatrix mapping_matrix_toa_mixing = { 18, 18, 0 }; +const opus_int16 mapping_matrix_toa_mixing_data[324] = { + 8208, 0, -881, 14369, 0, 0, -8192, -4163, + 13218, 0, 0, 0, 11095, -8836, -6218, 14833, + 0, 0, 8208, -10161, 881, 10161, -13218, -2944, + -8192, 2944, 0, -10488, -6218, 6248, -11095, -6248, + 0, -10488, 0, 0, 8208, 10161, 881, -10161, + -13218, 2944, -8192, -2944, 0, 10488, -6218, -6248, + -11095, 6248, 0, 10488, 0, 0, 8176, 5566, + -11552, 5566, 9681, -11205, 8192, -11205, 0, 4920, + -15158, 9756, -3334, 9756, 0, -4920, 0, 0, + 8176, 7871, 11552, 0, 0, 15846, 8192, 0, + -9681, -6958, 0, 13797, 3334, 0, -15158, 0, + 0, 0, 8176, 0, 11552, 7871, 0, 0, + 8192, 15846, 9681, 0, 0, 0, 3334, 13797, + 15158, 6958, 0, 0, 8176, 5566, -11552, -5566, + -9681, -11205, 8192, 11205, 0, 4920, 15158, 9756, + -3334, -9756, 0, 4920, 0, 0, 8208, 14369, + -881, 0, 0, -4163, -8192, 0, -13218, -14833, + 0, -8836, 11095, 0, 6218, 0, 0, 0, + 8208, 10161, 881, 10161, 13218, 2944, -8192, 2944, + 0, 10488, 6218, -6248, -11095, -6248, 0, -10488, + 0, 0, 8208, -14369, -881, 0, 0, 4163, + -8192, 0, -13218, 14833, 0, 8836, 11095, 0, + 6218, 0, 0, 0, 8208, 0, -881, -14369, + 0, 0, -8192, 4163, 13218, 0, 0, 0, + 11095, 8836, -6218, -14833, 0, 0, 8176, -5566, + -11552, 5566, -9681, 11205, 8192, -11205, 0, -4920, + 15158, -9756, -3334, 9756, 0, -4920, 0, 0, + 8176, 0, 11552, -7871, 0, 0, 8192, -15846, + 9681, 0, 0, 0, 3334, -13797, 15158, -6958, + 0, 0, 8176, -7871, 11552, 0, 0, -15846, + 8192, 0, -9681, 6958, 0, -13797, 3334, 0, + -15158, 0, 0, 0, 8176, -5566, -11552, -5566, + 9681, 11205, 8192, 11205, 0, -4920, -15158, -9756, + -3334, -9756, 0, 4920, 0, 0, 8208, -10161, + 881, -10161, 13218, -2944, -8192, -2944, 0, -10488, + 6218, 6248, -11095, 6248, 0, 10488, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 32767, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 32767 +}; + +const MappingMatrix mapping_matrix_foa_demixing = { 6, 6, 0 }; +const opus_int16 mapping_matrix_foa_demixing_data[36] = { + 16384, 16384, 16384, 16384, 0, 0, 0, 23170, + 0, -23170, 0, 0, -16384, 16384, -16384, 16384, + 0, 0, 23170, 0, -23170, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 0, 32767 +}; + +const MappingMatrix mapping_matrix_soa_demixing = { 11, 11, 3050 }; +const opus_int16 mapping_matrix_soa_demixing_data[121] = { + 2771, 2771, 2771, 2771, 2771, 2771, 2771, 2771, + 2771, 0, 0, 10033, 10033, -20066, 10033, 14189, + 14189, -28378, 10033, -20066, 0, 0, 3393, 3393, + 3393, -3393, 0, 0, 0, -3393, -3393, 0, + 0, -17378, 17378, 0, -17378, -24576, 24576, 0, + 17378, 0, 0, 0, -14189, 14189, 0, -14189, + -28378, 28378, 0, 14189, 0, 0, 0, 2399, + 2399, -4799, -2399, 0, 0, 0, -2399, 4799, + 0, 0, 1959, 1959, 1959, 1959, -3918, -3918, + -3918, 1959, 1959, 0, 0, -4156, 4156, 0, + 4156, 0, 0, 0, -4156, 0, 0, 0, + 8192, 8192, -16384, 8192, 16384, 16384, -32768, 8192, + -16384, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 8312, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 8312 +}; + +const MappingMatrix mapping_matrix_toa_demixing = { 18, 18, 0 }; +const opus_int16 mapping_matrix_toa_demixing_data[324] = { + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 0, 0, 0, -9779, 9779, 6263, 8857, 0, + 6263, 13829, 9779, -13829, 0, -6263, 0, -8857, + -6263, -9779, 0, 0, -3413, 3413, 3413, -11359, + 11359, 11359, -11359, -3413, 3413, -3413, -3413, -11359, + 11359, 11359, -11359, 3413, 0, 0, 13829, 9779, + -9779, 6263, 0, 8857, -6263, 0, 9779, 0, + -13829, 6263, -8857, 0, -6263, -9779, 0, 0, + 0, -15617, -15617, 6406, 0, 0, -6406, 0, + 15617, 0, 0, -6406, 0, 0, 6406, 15617, + 0, 0, 0, -5003, 5003, -10664, 15081, 0, + -10664, -7075, 5003, 7075, 0, 10664, 0, -15081, + 10664, -5003, 0, 0, -8176, -8176, -8176, 8208, + 8208, 8208, 8208, -8176, -8176, -8176, -8176, 8208, + 8208, 8208, 8208, -8176, 0, 0, -7075, 5003, + -5003, -10664, 0, 15081, 10664, 0, 5003, 0, + 7075, -10664, -15081, 0, 10664, -5003, 0, 0, + 15617, 0, 0, 0, -6406, 6406, 0, -15617, + 0, -15617, 15617, 0, 6406, -6406, 0, 0, + 0, 0, 0, -11393, 11393, 2993, -4233, 0, + 2993, -16112, 11393, 16112, 0, -2993, 0, 4233, + -2993, -11393, 0, 0, 0, -9974, -9974, -13617, + 0, 0, 13617, 0, 9974, 0, 0, 13617, + 0, 0, -13617, 9974, 0, 0, 0, 5579, + -5579, 10185, 14403, 0, 10185, -7890, -5579, 7890, + 0, -10185, 0, -14403, -10185, 5579, 0, 0, + 11826, -11826, -11826, -901, 901, 901, -901, 11826, + -11826, 11826, 11826, -901, 901, 901, -901, -11826, + 0, 0, -7890, -5579, 5579, 10185, 0, 14403, + -10185, 0, -5579, 0, 7890, 10185, -14403, 0, + -10185, 5579, 0, 0, -9974, 0, 0, 0, + -13617, 13617, 0, 9974, 0, 9974, -9974, 0, + 13617, -13617, 0, 0, 0, 0, 16112, -11393, + 11393, -2993, 0, 4233, 2993, 0, -11393, 0, + -16112, -2993, -4233, 0, 2993, 11393, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 32767, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 32767 +}; + diff --git a/libs/libopus/src/mapping_matrix.h b/libs/libopus/src/mapping_matrix.h new file mode 100644 index 000000000..98bc82df3 --- /dev/null +++ b/libs/libopus/src/mapping_matrix.h @@ -0,0 +1,133 @@ +/* Copyright (c) 2017 Google Inc. + Written by Andrew Allen */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file mapping_matrix.h + * @brief Opus reference implementation mapping matrix API + */ + +#ifndef MAPPING_MATRIX_H +#define MAPPING_MATRIX_H + +#include "opus_types.h" +#include "opus_projection.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct MappingMatrix +{ + int rows; /* number of channels outputted from matrix. */ + int cols; /* number of channels inputted to matrix. */ + int gain; /* in dB. S7.8-format. */ + /* Matrix cell data goes here using col-wise ordering. */ +} MappingMatrix; + +opus_int32 mapping_matrix_get_size(int rows, int cols); + +opus_int16 *mapping_matrix_get_data(const MappingMatrix *matrix); + +void mapping_matrix_init( + MappingMatrix * const matrix, + int rows, + int cols, + int gain, + const opus_int16 *data, + opus_int32 data_size +); + +#ifndef DISABLE_FLOAT_API +void mapping_matrix_multiply_channel_in_float( + const MappingMatrix *matrix, + const float *input, + int input_rows, + opus_val16 *output, + int output_row, + int output_rows, + int frame_size +); + +void mapping_matrix_multiply_channel_out_float( + const MappingMatrix *matrix, + const opus_val16 *input, + int input_row, + int input_rows, + float *output, + int output_rows, + int frame_size +); +#endif /* DISABLE_FLOAT_API */ + +void mapping_matrix_multiply_channel_in_short( + const MappingMatrix *matrix, + const opus_int16 *input, + int input_rows, + opus_val16 *output, + int output_row, + int output_rows, + int frame_size +); + +void mapping_matrix_multiply_channel_out_short( + const MappingMatrix *matrix, + const opus_val16 *input, + int input_row, + int input_rows, + opus_int16 *output, + int output_rows, + int frame_size +); + +/* Pre-computed mixing and demixing matrices for 1st to 3rd-order ambisonics. + * foa: first-order ambisonics + * soa: second-order ambisonics + * toa: third-order ambisonics + */ +extern const MappingMatrix mapping_matrix_foa_mixing; +extern const opus_int16 mapping_matrix_foa_mixing_data[36]; + +extern const MappingMatrix mapping_matrix_soa_mixing; +extern const opus_int16 mapping_matrix_soa_mixing_data[121]; + +extern const MappingMatrix mapping_matrix_toa_mixing; +extern const opus_int16 mapping_matrix_toa_mixing_data[324]; + +extern const MappingMatrix mapping_matrix_foa_demixing; +extern const opus_int16 mapping_matrix_foa_demixing_data[36]; + +extern const MappingMatrix mapping_matrix_soa_demixing; +extern const opus_int16 mapping_matrix_soa_demixing_data[121]; + +extern const MappingMatrix mapping_matrix_toa_demixing; +extern const opus_int16 mapping_matrix_toa_demixing_data[324]; + +#ifdef __cplusplus +} +#endif + +#endif /* MAPPING_MATRIX_H */ diff --git a/libs/libopus/src/mlp.c b/libs/libopus/src/mlp.c index ff9e50df4..964c6a98f 100644 --- a/libs/libopus/src/mlp.c +++ b/libs/libopus/src/mlp.c @@ -1,5 +1,5 @@ /* Copyright (c) 2008-2011 Octasic Inc. - Written by Jean-Marc Valin */ + 2012-2017 Jean-Marc Valin */ /* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions @@ -29,42 +29,13 @@ #include "config.h" #endif +#include #include "opus_types.h" #include "opus_defines.h" - -#include -#include "mlp.h" #include "arch.h" #include "tansig_table.h" -#define MAX_NEURONS 100 +#include "mlp.h" -#if 0 -static OPUS_INLINE opus_val16 tansig_approx(opus_val32 _x) /* Q19 */ -{ - int i; - opus_val16 xx; /* Q11 */ - /*double x, y;*/ - opus_val16 dy, yy; /* Q14 */ - /*x = 1.9073e-06*_x;*/ - if (_x>=QCONST32(8,19)) - return QCONST32(1.,14); - if (_x<=-QCONST32(8,19)) - return -QCONST32(1.,14); - xx = EXTRACT16(SHR32(_x, 8)); - /*i = lrint(25*x);*/ - i = SHR32(ADD32(1024,MULT16_16(25, xx)),11); - /*x -= .04*i;*/ - xx -= EXTRACT16(SHR32(MULT16_16(20972,i),8)); - /*x = xx*(1./2048);*/ - /*y = tansig_table[250+i];*/ - yy = tansig_table[250+i]; - /*y = yy*(1./16384);*/ - dy = 16384-MULT16_16_Q14(yy,yy); - yy = yy + MULT16_16_Q14(MULT16_16_Q11(xx,dy),(16384 - MULT16_16_Q11(yy,xx))); - return yy; -} -#else -/*extern const float tansig_table[501];*/ static OPUS_INLINE float tansig_approx(float x) { int i; @@ -92,54 +63,82 @@ static OPUS_INLINE float tansig_approx(float x) y = y + x*dy*(1 - y*x); return sign*y; } -#endif -#if 0 -void mlp_process(const MLP *m, const opus_val16 *in, opus_val16 *out) +static OPUS_INLINE float sigmoid_approx(float x) { - int j; - opus_val16 hidden[MAX_NEURONS]; - const opus_val16 *W = m->weights; - /* Copy to tmp_in */ - for (j=0;jtopo[1];j++) - { - int k; - opus_val32 sum = SHL32(EXTEND32(*W++),8); - for (k=0;ktopo[0];k++) - sum = MAC16_16(sum, in[k],*W++); - hidden[j] = tansig_approx(sum); - } - for (j=0;jtopo[2];j++) - { - int k; - opus_val32 sum = SHL32(EXTEND32(*W++),14); - for (k=0;ktopo[1];k++) - sum = MAC16_16(sum, hidden[k], *W++); - out[j] = tansig_approx(EXTRACT16(PSHR32(sum,17))); - } + return .5f + .5f*tansig_approx(.5f*x); +} + +static void gemm_accum(float *out, const opus_int8 *weights, int rows, int cols, int col_stride, const float *x) +{ + int i, j; + for (i=0;iweights; - /* Copy to tmp_in */ - for (j=0;jtopo[1];j++) - { - int k; - float sum = *W++; - for (k=0;ktopo[0];k++) - sum = sum + in[k]**W++; - hidden[j] = tansig_approx(sum); - } - for (j=0;jtopo[2];j++) - { - int k; - float sum = *W++; - for (k=0;ktopo[1];k++) - sum = sum + hidden[k]**W++; - out[j] = tansig_approx(sum); - } + int i; + int N, M; + int stride; + M = layer->nb_inputs; + N = layer->nb_neurons; + stride = N; + for (i=0;ibias[i]; + gemm_accum(output, layer->input_weights, N, M, stride, input); + for (i=0;isigmoid) { + for (i=0;inb_inputs; + N = gru->nb_neurons; + stride = 3*N; + /* Compute update gate. */ + for (i=0;ibias[i]; + gemm_accum(z, gru->input_weights, N, M, stride, input); + gemm_accum(z, gru->recurrent_weights, N, N, stride, state); + for (i=0;ibias[N + i]; + gemm_accum(r, &gru->input_weights[N], N, M, stride, input); + gemm_accum(r, &gru->recurrent_weights[N], N, N, stride, state); + for (i=0;ibias[2*N + i]; + for (i=0;iinput_weights[2*N], N, M, stride, input); + gemm_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp); + for (i=0;i