summaryrefslogtreecommitdiff
path: root/media/libaom/src/aom_dsp/x86/convolve_avx2.h
diff options
context:
space:
mode:
Diffstat (limited to 'media/libaom/src/aom_dsp/x86/convolve_avx2.h')
-rw-r--r--media/libaom/src/aom_dsp/x86/convolve_avx2.h274
1 files changed, 269 insertions, 5 deletions
diff --git a/media/libaom/src/aom_dsp/x86/convolve_avx2.h b/media/libaom/src/aom_dsp/x86/convolve_avx2.h
index 30253f65c..d516de5f2 100644
--- a/media/libaom/src/aom_dsp/x86/convolve_avx2.h
+++ b/media/libaom/src/aom_dsp/x86/convolve_avx2.h
@@ -34,6 +34,239 @@ DECLARE_ALIGNED(32, static const uint8_t, filt4_d4_global_avx2[]) = {
2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8,
};
+DECLARE_ALIGNED(32, static const uint8_t, filt_center_global_avx2[32]) = {
+ 3, 255, 4, 255, 5, 255, 6, 255, 7, 255, 8, 255, 9, 255, 10, 255,
+ 3, 255, 4, 255, 5, 255, 6, 255, 7, 255, 8, 255, 9, 255, 10, 255
+};
+
+DECLARE_ALIGNED(32, static const uint8_t,
+ filt1_global_avx2[32]) = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5,
+ 6, 6, 7, 7, 8, 0, 1, 1, 2, 2, 3,
+ 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+DECLARE_ALIGNED(32, static const uint8_t,
+ filt2_global_avx2[32]) = { 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10 };
+
+DECLARE_ALIGNED(32, static const uint8_t, filt3_global_avx2[32]) = {
+ 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12,
+ 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+};
+
+DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = {
+ 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14,
+ 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
+};
+
+#define CONVOLVE_SR_HORIZONTAL_FILTER_8TAP \
+ for (i = 0; i < (im_h - 2); i += 2) { \
+ __m256i data = _mm256_castsi128_si256( \
+ _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j])); \
+ data = _mm256_inserti128_si256( \
+ data, \
+ _mm_loadu_si128( \
+ (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]), \
+ 1); \
+ \
+ __m256i res = convolve_lowbd_x(data, coeffs_h, filt); \
+ res = \
+ _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
+ _mm256_store_si256((__m256i *)&im_block[i * im_stride], res); \
+ } \
+ \
+ __m256i data_1 = _mm256_castsi128_si256( \
+ _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j])); \
+ \
+ __m256i res = convolve_lowbd_x(data_1, coeffs_h, filt); \
+ \
+ res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
+ \
+ _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
+
+#define CONVOLVE_SR_VERTICAL_FILTER_8TAP \
+ __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride)); \
+ __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride)); \
+ __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride)); \
+ __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride)); \
+ __m256i src_4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride)); \
+ __m256i src_5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride)); \
+ \
+ __m256i s[8]; \
+ s[0] = _mm256_unpacklo_epi16(src_0, src_1); \
+ s[1] = _mm256_unpacklo_epi16(src_2, src_3); \
+ s[2] = _mm256_unpacklo_epi16(src_4, src_5); \
+ \
+ s[4] = _mm256_unpackhi_epi16(src_0, src_1); \
+ s[5] = _mm256_unpackhi_epi16(src_2, src_3); \
+ s[6] = _mm256_unpackhi_epi16(src_4, src_5); \
+ \
+ for (i = 0; i < h; i += 2) { \
+ const int16_t *data = &im_block[i * im_stride]; \
+ \
+ const __m256i s6 = _mm256_loadu_si256((__m256i *)(data + 6 * im_stride)); \
+ const __m256i s7 = _mm256_loadu_si256((__m256i *)(data + 7 * im_stride)); \
+ \
+ s[3] = _mm256_unpacklo_epi16(s6, s7); \
+ s[7] = _mm256_unpackhi_epi16(s6, s7); \
+ \
+ __m256i res_a = convolve(s, coeffs_v); \
+ __m256i res_b = convolve(s + 4, coeffs_v); \
+ \
+ res_a = \
+ _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v); \
+ res_b = \
+ _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v); \
+ \
+ const __m256i res_a_round = _mm256_sra_epi32( \
+ _mm256_add_epi32(res_a, round_const_v), round_shift_v); \
+ const __m256i res_b_round = _mm256_sra_epi32( \
+ _mm256_add_epi32(res_b, round_const_v), round_shift_v); \
+ \
+ const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round); \
+ const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit); \
+ \
+ const __m128i res_0 = _mm256_castsi256_si128(res_8b); \
+ const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1); \
+ \
+ __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j]; \
+ __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride]; \
+ if (w - j > 4) { \
+ _mm_storel_epi64(p_0, res_0); \
+ _mm_storel_epi64(p_1, res_1); \
+ } else if (w == 4) { \
+ xx_storel_32(p_0, res_0); \
+ xx_storel_32(p_1, res_1); \
+ } else { \
+ *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0); \
+ *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1); \
+ } \
+ \
+ s[0] = s[1]; \
+ s[1] = s[2]; \
+ s[2] = s[3]; \
+ \
+ s[4] = s[5]; \
+ s[5] = s[6]; \
+ s[6] = s[7]; \
+ }
+
+#define DIST_WTD_CONVOLVE_HORIZONTAL_FILTER_8TAP \
+ for (i = 0; i < im_h; i += 2) { \
+ __m256i data = _mm256_castsi128_si256(_mm_loadu_si128((__m128i *)src_h)); \
+ if (i + 1 < im_h) \
+ data = _mm256_inserti128_si256( \
+ data, _mm_loadu_si128((__m128i *)(src_h + src_stride)), 1); \
+ src_h += (src_stride << 1); \
+ __m256i res = convolve_lowbd_x(data, coeffs_x, filt); \
+ \
+ res = \
+ _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
+ \
+ _mm256_store_si256((__m256i *)&im_block[i * im_stride], res); \
+ }
+
+#define DIST_WTD_CONVOLVE_VERTICAL_FILTER_8TAP \
+ __m256i s[8]; \
+ __m256i s0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride)); \
+ __m256i s1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride)); \
+ __m256i s2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride)); \
+ __m256i s3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride)); \
+ __m256i s4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride)); \
+ __m256i s5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride)); \
+ \
+ s[0] = _mm256_unpacklo_epi16(s0, s1); \
+ s[1] = _mm256_unpacklo_epi16(s2, s3); \
+ s[2] = _mm256_unpacklo_epi16(s4, s5); \
+ \
+ s[4] = _mm256_unpackhi_epi16(s0, s1); \
+ s[5] = _mm256_unpackhi_epi16(s2, s3); \
+ s[6] = _mm256_unpackhi_epi16(s4, s5); \
+ \
+ for (i = 0; i < h; i += 2) { \
+ const int16_t *data = &im_block[i * im_stride]; \
+ \
+ const __m256i s6 = _mm256_loadu_si256((__m256i *)(data + 6 * im_stride)); \
+ const __m256i s7 = _mm256_loadu_si256((__m256i *)(data + 7 * im_stride)); \
+ \
+ s[3] = _mm256_unpacklo_epi16(s6, s7); \
+ s[7] = _mm256_unpackhi_epi16(s6, s7); \
+ \
+ const __m256i res_a = convolve(s, coeffs_y); \
+ const __m256i res_a_round = _mm256_sra_epi32( \
+ _mm256_add_epi32(res_a, round_const_v), round_shift_v); \
+ \
+ if (w - j > 4) { \
+ const __m256i res_b = convolve(s + 4, coeffs_y); \
+ const __m256i res_b_round = _mm256_sra_epi32( \
+ _mm256_add_epi32(res_b, round_const_v), round_shift_v); \
+ const __m256i res_16b = _mm256_packs_epi32(res_a_round, res_b_round); \
+ const __m256i res_unsigned = _mm256_add_epi16(res_16b, offset_const); \
+ \
+ if (do_average) { \
+ const __m256i data_ref_0 = load_line2_avx2( \
+ &dst[i * dst_stride + j], &dst[i * dst_stride + j + dst_stride]); \
+ const __m256i comp_avg_res = \
+ comp_avg(&data_ref_0, &res_unsigned, &wt, use_dist_wtd_comp_avg); \
+ \
+ const __m256i round_result = convolve_rounding( \
+ &comp_avg_res, &offset_const, &rounding_const, rounding_shift); \
+ \
+ const __m256i res_8 = _mm256_packus_epi16(round_result, round_result); \
+ const __m128i res_0 = _mm256_castsi256_si128(res_8); \
+ const __m128i res_1 = _mm256_extracti128_si256(res_8, 1); \
+ \
+ _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); \
+ _mm_storel_epi64( \
+ (__m128i *)((&dst0[i * dst_stride0 + j + dst_stride0])), res_1); \
+ } else { \
+ const __m128i res_0 = _mm256_castsi256_si128(res_unsigned); \
+ _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0); \
+ \
+ const __m128i res_1 = _mm256_extracti128_si256(res_unsigned, 1); \
+ _mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]), \
+ res_1); \
+ } \
+ } else { \
+ const __m256i res_16b = _mm256_packs_epi32(res_a_round, res_a_round); \
+ const __m256i res_unsigned = _mm256_add_epi16(res_16b, offset_const); \
+ \
+ if (do_average) { \
+ const __m256i data_ref_0 = load_line2_avx2( \
+ &dst[i * dst_stride + j], &dst[i * dst_stride + j + dst_stride]); \
+ \
+ const __m256i comp_avg_res = \
+ comp_avg(&data_ref_0, &res_unsigned, &wt, use_dist_wtd_comp_avg); \
+ \
+ const __m256i round_result = convolve_rounding( \
+ &comp_avg_res, &offset_const, &rounding_const, rounding_shift); \
+ \
+ const __m256i res_8 = _mm256_packus_epi16(round_result, round_result); \
+ const __m128i res_0 = _mm256_castsi256_si128(res_8); \
+ const __m128i res_1 = _mm256_extracti128_si256(res_8, 1); \
+ \
+ *(uint32_t *)(&dst0[i * dst_stride0 + j]) = _mm_cvtsi128_si32(res_0); \
+ *(uint32_t *)(&dst0[i * dst_stride0 + j + dst_stride0]) = \
+ _mm_cvtsi128_si32(res_1); \
+ \
+ } else { \
+ const __m128i res_0 = _mm256_castsi256_si128(res_unsigned); \
+ _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0); \
+ \
+ const __m128i res_1 = _mm256_extracti128_si256(res_unsigned, 1); \
+ _mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]), \
+ res_1); \
+ } \
+ } \
+ \
+ s[0] = s[1]; \
+ s[1] = s[2]; \
+ s[2] = s[3]; \
+ \
+ s[4] = s[5]; \
+ s[5] = s[6]; \
+ s[6] = s[7]; \
+ }
static INLINE void prepare_coeffs_lowbd(
const InterpFilterParams *const filter_params, const int subpel_q4,
__m256i *const coeffs /* [4] */) {
@@ -48,7 +281,7 @@ static INLINE void prepare_coeffs_lowbd(
// Since all filter co-efficients are even, this change will not affect the
// end result
assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
- _mm_set1_epi16(0xffff)));
+ _mm_set1_epi16((short)0xffff)));
const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1);
@@ -95,6 +328,17 @@ static INLINE __m256i convolve_lowbd(const __m256i *const s,
return res;
}
+static INLINE __m256i convolve_lowbd_4tap(const __m256i *const s,
+ const __m256i *const coeffs) {
+ const __m256i res_23 = _mm256_maddubs_epi16(s[0], coeffs[0]);
+ const __m256i res_45 = _mm256_maddubs_epi16(s[1], coeffs[1]);
+
+ // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ const __m256i res = _mm256_add_epi16(res_45, res_23);
+
+ return res;
+}
+
static INLINE __m256i convolve(const __m256i *const s,
const __m256i *const coeffs) {
const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
@@ -108,6 +352,15 @@ static INLINE __m256i convolve(const __m256i *const s,
return res;
}
+static INLINE __m256i convolve_4tap(const __m256i *const s,
+ const __m256i *const coeffs) {
+ const __m256i res_1 = _mm256_madd_epi16(s[0], coeffs[0]);
+ const __m256i res_2 = _mm256_madd_epi16(s[1], coeffs[1]);
+
+ const __m256i res = _mm256_add_epi32(res_1, res_2);
+ return res;
+}
+
static INLINE __m256i convolve_lowbd_x(const __m256i data,
const __m256i *const coeffs,
const __m256i *const filt) {
@@ -121,6 +374,17 @@ static INLINE __m256i convolve_lowbd_x(const __m256i data,
return convolve_lowbd(s, coeffs);
}
+static INLINE __m256i convolve_lowbd_x_4tap(const __m256i data,
+ const __m256i *const coeffs,
+ const __m256i *const filt) {
+ __m256i s[2];
+
+ s[0] = _mm256_shuffle_epi8(data, filt[0]);
+ s[1] = _mm256_shuffle_epi8(data, filt[1]);
+
+ return convolve_lowbd_4tap(s, coeffs);
+}
+
static INLINE void add_store_aligned_256(CONV_BUF_TYPE *const dst,
const __m256i *const res,
const int do_average) {
@@ -138,9 +402,9 @@ static INLINE void add_store_aligned_256(CONV_BUF_TYPE *const dst,
static INLINE __m256i comp_avg(const __m256i *const data_ref_0,
const __m256i *const res_unsigned,
const __m256i *const wt,
- const int use_jnt_comp_avg) {
+ const int use_dist_wtd_comp_avg) {
__m256i res;
- if (use_jnt_comp_avg) {
+ if (use_dist_wtd_comp_avg) {
const __m256i data_lo = _mm256_unpacklo_epi16(*data_ref_0, *res_unsigned);
const __m256i data_hi = _mm256_unpackhi_epi16(*data_ref_0, *res_unsigned);
@@ -172,9 +436,9 @@ static INLINE __m256i highbd_comp_avg(const __m256i *const data_ref_0,
const __m256i *const res_unsigned,
const __m256i *const wt0,
const __m256i *const wt1,
- const int use_jnt_comp_avg) {
+ const int use_dist_wtd_comp_avg) {
__m256i res;
- if (use_jnt_comp_avg) {
+ if (use_dist_wtd_comp_avg) {
const __m256i wt0_res = _mm256_mullo_epi32(*data_ref_0, *wt0);
const __m256i wt1_res = _mm256_mullo_epi32(*res_unsigned, *wt1);
const __m256i wt_res = _mm256_add_epi32(wt0_res, wt1_res);