summaryrefslogtreecommitdiff
path: root/third_party/aom/aom_dsp
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2018-10-17 05:59:08 -0500
committertrav90 <travawine@palemoon.org>2018-10-17 05:59:08 -0500
commit9fa6624569db3980469289b124d12ecba8b4fbee (patch)
treec4fdd5d1b09d08c0514f208246260fc87372cb56 /third_party/aom/aom_dsp
parent9f182a2351dc7f12ad7f43979665f7dc1ebd7ddb (diff)
downloaduxp-9fa6624569db3980469289b124d12ecba8b4fbee.tar.gz
Update aom to slightly newer commit ID
Diffstat (limited to 'third_party/aom/aom_dsp')
-rw-r--r--third_party/aom/aom_dsp/aom_convolve.c267
-rw-r--r--third_party/aom/aom_dsp/aom_convolve.h5
-rw-r--r--third_party/aom/aom_dsp/aom_dsp.cmake84
-rw-r--r--third_party/aom/aom_dsp/aom_dsp.mk42
-rw-r--r--third_party/aom/aom_dsp/aom_dsp_common.h2
-rwxr-xr-xthird_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl177
-rw-r--r--third_party/aom/aom_dsp/arm/avg_neon.c38
-rw-r--r--third_party/aom/aom_dsp/arm/bilinear_filter_media.asm240
-rw-r--r--third_party/aom/aom_dsp/arm/sad_media.asm98
-rw-r--r--third_party/aom/aom_dsp/arm/subpel_variance_media.c81
-rw-r--r--third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm185
-rw-r--r--third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm225
-rw-r--r--third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm187
-rw-r--r--third_party/aom/aom_dsp/arm/variance_media.asm361
-rw-r--r--third_party/aom/aom_dsp/avg.c42
-rw-r--r--third_party/aom/aom_dsp/binary_codes_reader.c53
-rw-r--r--third_party/aom/aom_dsp/binary_codes_reader.h34
-rw-r--r--third_party/aom/aom_dsp/bitreader.h62
-rw-r--r--third_party/aom/aom_dsp/bitreader_buffer.c2
-rw-r--r--third_party/aom/aom_dsp/bitwriter.h63
-rw-r--r--third_party/aom/aom_dsp/dkboolreader.c110
-rw-r--r--third_party/aom/aom_dsp/dkboolreader.h181
-rw-r--r--third_party/aom/aom_dsp/dkboolwriter.c44
-rw-r--r--third_party/aom/aom_dsp/dkboolwriter.h104
-rw-r--r--third_party/aom/aom_dsp/intrapred.c162
-rw-r--r--third_party/aom/aom_dsp/inv_txfm.c864
-rw-r--r--third_party/aom/aom_dsp/loopfilter.c110
-rw-r--r--third_party/aom/aom_dsp/mips/avg_msa.c57
-rw-r--r--third_party/aom/aom_dsp/prob.c17
-rw-r--r--third_party/aom/aom_dsp/prob.h12
-rw-r--r--third_party/aom/aom_dsp/sad.c54
-rw-r--r--third_party/aom/aom_dsp/simd/v64_intrinsics.h4
-rw-r--r--third_party/aom/aom_dsp/variance.c365
-rw-r--r--third_party/aom/aom_dsp/variance.h13
-rw-r--r--third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c195
-rw-r--r--third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c203
-rw-r--r--third_party/aom/aom_dsp/x86/avg_intrin_sse2.c46
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c656
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_avx2.c1238
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h80
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_sse2.c103
-rw-r--r--third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c498
-rw-r--r--third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c2799
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h45
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_sad_sse4.c1
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_variance_sse4.c3
-rw-r--r--third_party/aom/aom_dsp/x86/synonyms.h28
-rw-r--r--third_party/aom/aom_dsp/x86/txfm_common_avx2.h44
48 files changed, 5193 insertions, 5091 deletions
diff --git a/third_party/aom/aom_dsp/aom_convolve.c b/third_party/aom/aom_dsp/aom_convolve.c
index 74f4c00fbb..4dac6aacc8 100644
--- a/third_party/aom/aom_dsp/aom_convolve.c
+++ b/third_party/aom/aom_dsp/aom_convolve.c
@@ -337,14 +337,14 @@ static void convolve_add_src_horiz(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *x_filters, int x0_q4,
int x_step_q4, int w, int h) {
- int x, y;
+ int x, y, k;
src -= SUBPEL_TAPS / 2 - 1;
for (y = 0; y < h; ++y) {
int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
- int k, sum = 0;
+ int sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS) +
src_x[SUBPEL_TAPS / 2 - 1]);
@@ -359,7 +359,7 @@ static void convolve_add_src_vert(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *y_filters, int y0_q4,
int y_step_q4, int w, int h) {
- int x, y;
+ int x, y, k;
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
for (x = 0; x < w; ++x) {
@@ -367,7 +367,7 @@ static void convolve_add_src_vert(const uint8_t *src, ptrdiff_t src_stride,
for (y = 0; y < h; ++y) {
const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
- int k, sum = 0;
+ int sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k)
sum += src_y[k * src_stride] * y_filter[k];
dst[y * dst_stride] =
@@ -446,6 +446,127 @@ void aom_convolve8_add_src_c(const uint8_t *src, ptrdiff_t src_stride,
convolve_add_src(src, src_stride, dst, dst_stride, filters_x, x0_q4,
x_step_q4, filters_y, y0_q4, y_step_q4, w, h);
}
+
+static void convolve_add_src_horiz_hip(const uint8_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *x_filters, int x0_q4,
+ int x_step_q4, int w, int h) {
+ const int bd = 8;
+ int x, y, k;
+ src -= SUBPEL_TAPS / 2 - 1;
+ for (y = 0; y < h; ++y) {
+ int x_q4 = x0_q4;
+ for (x = 0; x < w; ++x) {
+ const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
+ const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
+ int sum = ((int)src_x[SUBPEL_TAPS / 2 - 1] << FILTER_BITS) +
+ (1 << (bd + FILTER_BITS - 1));
+ for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
+ dst[x] =
+ (uint16_t)clamp(ROUND_POWER_OF_TWO(sum, FILTER_BITS - EXTRAPREC_BITS),
+ 0, EXTRAPREC_CLAMP_LIMIT(bd) - 1);
+ x_q4 += x_step_q4;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void convolve_add_src_vert_hip(const uint16_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *y_filters, int y0_q4,
+ int y_step_q4, int w, int h) {
+ const int bd = 8;
+ int x, y, k;
+ src -= src_stride * (SUBPEL_TAPS / 2 - 1);
+
+ for (x = 0; x < w; ++x) {
+ int y_q4 = y0_q4;
+ for (y = 0; y < h; ++y) {
+ const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
+ const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
+ int sum =
+ ((int)src_y[(SUBPEL_TAPS / 2 - 1) * src_stride] << FILTER_BITS) -
+ (1 << (bd + FILTER_BITS + EXTRAPREC_BITS - 1));
+ for (k = 0; k < SUBPEL_TAPS; ++k)
+ sum += src_y[k * src_stride] * y_filter[k];
+ dst[y * dst_stride] =
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS + EXTRAPREC_BITS));
+ y_q4 += y_step_q4;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
+static void convolve_add_src_hip(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *const x_filters, int x0_q4,
+ int x_step_q4,
+ const InterpKernel *const y_filters, int y0_q4,
+ int y_step_q4, int w, int h) {
+ uint16_t temp[MAX_EXT_SIZE * MAX_SB_SIZE];
+ int intermediate_height =
+ (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
+
+ assert(w <= MAX_SB_SIZE);
+ assert(h <= MAX_SB_SIZE);
+
+ assert(y_step_q4 <= 32);
+ assert(x_step_q4 <= 32);
+
+ convolve_add_src_horiz_hip(src - src_stride * (SUBPEL_TAPS / 2 - 1),
+ src_stride, temp, MAX_SB_SIZE, x_filters, x0_q4,
+ x_step_q4, w, intermediate_height);
+ convolve_add_src_vert_hip(temp + MAX_SB_SIZE * (SUBPEL_TAPS / 2 - 1),
+ MAX_SB_SIZE, dst, dst_stride, y_filters, y0_q4,
+ y_step_q4, w, h);
+}
+
+void aom_convolve8_add_src_horiz_hip_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint16_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+ const int x0_q4 = get_filter_offset(filter_x, filters_x);
+
+ (void)filter_y;
+ (void)y_step_q4;
+
+ convolve_add_src_horiz_hip(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+ x_step_q4, w, h);
+}
+
+void aom_convolve8_add_src_vert_hip_c(const uint16_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+ const int y0_q4 = get_filter_offset(filter_y, filters_y);
+
+ (void)filter_x;
+ (void)x_step_q4;
+
+ convolve_add_src_vert_hip(src, src_stride, dst, dst_stride, filters_y, y0_q4,
+ y_step_q4, w, h);
+}
+
+void aom_convolve8_add_src_hip_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4, int w,
+ int h) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+ const int x0_q4 = get_filter_offset(filter_x, filters_x);
+
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+ const int y0_q4 = get_filter_offset(filter_y, filters_y);
+
+ convolve_add_src_hip(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+ x_step_q4, filters_y, y0_q4, y_step_q4, w, h);
+}
#endif // CONFIG_LOOP_RESTORATION
#if CONFIG_HIGHBITDEPTH
@@ -721,7 +842,7 @@ static void highbd_convolve_add_src_horiz(const uint8_t *src8,
const InterpKernel *x_filters,
int x0_q4, int x_step_q4, int w,
int h, int bd) {
- int x, y;
+ int x, y, k;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= SUBPEL_TAPS / 2 - 1;
@@ -730,7 +851,7 @@ static void highbd_convolve_add_src_horiz(const uint8_t *src8,
for (x = 0; x < w; ++x) {
const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
- int k, sum = 0;
+ int sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
dst[x] = clip_pixel_highbd(
ROUND_POWER_OF_TWO(sum, FILTER_BITS) + src_x[SUBPEL_TAPS / 2 - 1],
@@ -748,7 +869,7 @@ static void highbd_convolve_add_src_vert(const uint8_t *src8,
const InterpKernel *y_filters,
int y0_q4, int y_step_q4, int w, int h,
int bd) {
- int x, y;
+ int x, y, k;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
@@ -757,7 +878,7 @@ static void highbd_convolve_add_src_vert(const uint8_t *src8,
for (y = 0; y < h; ++y) {
const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
- int k, sum = 0;
+ int sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k)
sum += src_y[k * src_stride] * y_filter[k];
dst[y * dst_stride] =
@@ -850,5 +971,135 @@ void aom_highbd_convolve8_add_src_c(const uint8_t *src, ptrdiff_t src_stride,
highbd_convolve_add_src(src, src_stride, dst, dst_stride, filters_x, x0_q4,
x_step_q4, filters_y, y0_q4, y_step_q4, w, h, bd);
}
+
+static void highbd_convolve_add_src_horiz_hip(
+ const uint8_t *src8, ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, const InterpKernel *x_filters, int x0_q4,
+ int x_step_q4, int w, int h, int bd) {
+ const int extraprec_clamp_limit = EXTRAPREC_CLAMP_LIMIT(bd);
+ int x, y, k;
+ uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ src -= SUBPEL_TAPS / 2 - 1;
+ for (y = 0; y < h; ++y) {
+ int x_q4 = x0_q4;
+ for (x = 0; x < w; ++x) {
+ const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
+ const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
+ int sum = ((int)src_x[SUBPEL_TAPS / 2 - 1] << FILTER_BITS) +
+ (1 << (bd + FILTER_BITS - 1));
+ for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
+ dst[x] =
+ (uint16_t)clamp(ROUND_POWER_OF_TWO(sum, FILTER_BITS - EXTRAPREC_BITS),
+ 0, extraprec_clamp_limit - 1);
+ x_q4 += x_step_q4;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void highbd_convolve_add_src_vert_hip(
+ const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst8,
+ ptrdiff_t dst_stride, const InterpKernel *y_filters, int y0_q4,
+ int y_step_q4, int w, int h, int bd) {
+ int x, y, k;
+ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
+ src -= src_stride * (SUBPEL_TAPS / 2 - 1);
+ for (x = 0; x < w; ++x) {
+ int y_q4 = y0_q4;
+ for (y = 0; y < h; ++y) {
+ const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
+ const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
+ int sum =
+ ((int)src_y[(SUBPEL_TAPS / 2 - 1) * src_stride] << FILTER_BITS) -
+ (1 << (bd + FILTER_BITS + EXTRAPREC_BITS - 1));
+ for (k = 0; k < SUBPEL_TAPS; ++k)
+ sum += src_y[k * src_stride] * y_filter[k];
+ dst[y * dst_stride] = clip_pixel_highbd(
+ ROUND_POWER_OF_TWO(sum, FILTER_BITS + EXTRAPREC_BITS), bd);
+ y_q4 += y_step_q4;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
+static void highbd_convolve_add_src_hip(
+ const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
+ ptrdiff_t dst_stride, const InterpKernel *const x_filters, int x0_q4,
+ int x_step_q4, const InterpKernel *const y_filters, int y0_q4,
+ int y_step_q4, int w, int h, int bd) {
+ // Note: Fixed size intermediate buffer, temp, places limits on parameters.
+ // 2d filtering proceeds in 2 steps:
+ // (1) Interpolate horizontally into an intermediate buffer, temp.
+ // (2) Interpolate temp vertically to derive the sub-pixel result.
+ // Deriving the maximum number of rows in the temp buffer (135):
+ // --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative).
+ // --Largest block size is 64x64 pixels.
+ // --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the
+ // original frame (in 1/16th pixel units).
+ // --Must round-up because block may be located at sub-pixel position.
+ // --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
+ // --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
+ uint16_t temp[MAX_EXT_SIZE * MAX_SB_SIZE];
+ int intermediate_height =
+ (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
+
+ assert(w <= MAX_SB_SIZE);
+ assert(h <= MAX_SB_SIZE);
+ assert(y_step_q4 <= 32);
+ assert(x_step_q4 <= 32);
+
+ highbd_convolve_add_src_horiz_hip(
+ src - src_stride * (SUBPEL_TAPS / 2 - 1), src_stride, temp, MAX_SB_SIZE,
+ x_filters, x0_q4, x_step_q4, w, intermediate_height, bd);
+ highbd_convolve_add_src_vert_hip(temp + MAX_SB_SIZE * (SUBPEL_TAPS / 2 - 1),
+ MAX_SB_SIZE, dst, dst_stride, y_filters,
+ y0_q4, y_step_q4, w, h, bd);
+}
+
+void aom_highbd_convolve8_add_src_horiz_hip_c(
+ const uint8_t *src, ptrdiff_t src_stride, uint16_t *dst,
+ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+ const int x0_q4 = get_filter_offset(filter_x, filters_x);
+ (void)filter_y;
+ (void)y_step_q4;
+
+ highbd_convolve_add_src_horiz_hip(src, src_stride, dst, dst_stride, filters_x,
+ x0_q4, x_step_q4, w, h, bd);
+}
+
+void aom_highbd_convolve8_add_src_vert_hip_c(
+ const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst,
+ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+ const int y0_q4 = get_filter_offset(filter_y, filters_y);
+ (void)filter_x;
+ (void)x_step_q4;
+
+ highbd_convolve_add_src_vert_hip(src, src_stride, dst, dst_stride, filters_y,
+ y0_q4, y_step_q4, w, h, bd);
+}
+
+void aom_highbd_convolve8_add_src_hip_c(const uint8_t *src,
+ ptrdiff_t src_stride, uint8_t *dst,
+ ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int bd) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+ const int x0_q4 = get_filter_offset(filter_x, filters_x);
+
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+ const int y0_q4 = get_filter_offset(filter_y, filters_y);
+
+ highbd_convolve_add_src_hip(src, src_stride, dst, dst_stride, filters_x,
+ x0_q4, x_step_q4, filters_y, y0_q4, y_step_q4, w,
+ h, bd);
+}
+
#endif // CONFIG_LOOP_RESTORATION
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/aom_convolve.h b/third_party/aom/aom_dsp/aom_convolve.h
index d0de6c5d20..c7943dced9 100644
--- a/third_party/aom/aom_dsp/aom_convolve.h
+++ b/third_party/aom/aom_dsp/aom_convolve.h
@@ -36,6 +36,11 @@ extern "C" {
#define MAX_EXT_SIZE 135
#endif // CONFIG_AV1 && CONFIG_EXT_PARTITION
+#if CONFIG_AV1 && CONFIG_LOOP_RESTORATION
+#define EXTRAPREC_BITS 2
+#define EXTRAPREC_CLAMP_LIMIT(bd) (1 << ((bd) + 1 + EXTRAPREC_BITS))
+#endif
+
typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
diff --git a/third_party/aom/aom_dsp/aom_dsp.cmake b/third_party/aom/aom_dsp/aom_dsp.cmake
index f00348cbcf..5a49ae8178 100644
--- a/third_party/aom/aom_dsp/aom_dsp.cmake
+++ b/third_party/aom/aom_dsp/aom_dsp.cmake
@@ -8,6 +8,9 @@
## Media Patent License 1.0 was not distributed with this source code in the
## PATENTS file, you can obtain it at www.aomedia.org/license/patent.
##
+if (NOT AOM_AOM_DSP_AOM_DSP_CMAKE_)
+set(AOM_AOM_DSP_AOM_DSP_CMAKE_ 1)
+
set(AOM_DSP_COMMON_SOURCES
"${AOM_ROOT}/aom_dsp/aom_convolve.c"
"${AOM_ROOT}/aom_dsp/aom_convolve.h"
@@ -23,7 +26,6 @@ set(AOM_DSP_COMMON_SOURCES
"${AOM_ROOT}/aom_dsp/loopfilter.c"
"${AOM_ROOT}/aom_dsp/prob.c"
"${AOM_ROOT}/aom_dsp/prob.h"
- "${AOM_ROOT}/aom_dsp/sad.c"
"${AOM_ROOT}/aom_dsp/simd/v128_intrinsics.h"
"${AOM_ROOT}/aom_dsp/simd/v128_intrinsics_c.h"
"${AOM_ROOT}/aom_dsp/simd/v256_intrinsics.h"
@@ -62,8 +64,10 @@ set(AOM_DSP_COMMON_INTRIN_SSE4_1
set(AOM_DSP_COMMON_INTRIN_AVX2
"${AOM_ROOT}/aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c"
- "${AOM_ROOT}/aom_dsp/x86/fwd_txfm_avx2.c"
- "${AOM_ROOT}/aom_dsp/x86/loopfilter_avx2.c")
+ "${AOM_ROOT}/aom_dsp/x86/loopfilter_avx2.c"
+ "${AOM_ROOT}/aom_dsp/x86/inv_txfm_avx2.c"
+ "${AOM_ROOT}/aom_dsp/x86/inv_txfm_common_avx2.h"
+ "${AOM_ROOT}/aom_dsp/x86/txfm_common_avx2.h")
set(AOM_DSP_COMMON_ASM_NEON
"${AOM_ROOT}/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm"
@@ -175,6 +179,8 @@ set(AOM_DSP_COMMON_INTRIN_MSA
if (CONFIG_HIGHBITDEPTH)
set(AOM_DSP_COMMON_ASM_SSE2
${AOM_DSP_COMMON_ASM_SSE2}
+ "${AOM_ROOT}/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm"
+ "${AOM_ROOT}/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/highbd_intrapred_sse2.asm")
set(AOM_DSP_COMMON_INTRIN_SSE2
@@ -198,7 +204,7 @@ if (CONFIG_ANS)
set(AOM_DSP_COMMON_SOURCES
${AOM_DSP_COMMON_SOURCES}
"${AOM_ROOT}/aom_dsp/ans.h")
-elseif (CONFIG_DAALA_EC)
+else ()
set(AOM_DSP_COMMON_SOURCES
${AOM_DSP_COMMON_SOURCES}
"${AOM_ROOT}/aom_dsp/entcode.c"
@@ -221,7 +227,7 @@ if (CONFIG_AV1)
"${AOM_ROOT}/aom_dsp/x86/inv_txfm_sse2.h")
endif ()
-if (CONFIG_DECODERS)
+if (CONFIG_AV1_DECODER)
set(AOM_DSP_DECODER_SOURCES
"${AOM_ROOT}/aom_dsp/binary_codes_reader.c"
"${AOM_ROOT}/aom_dsp/binary_codes_reader.h"
@@ -233,22 +239,17 @@ if (CONFIG_DECODERS)
set(AOM_DSP_DECODER_SOURCES
${AOM_DSP_DECODER_SOURCES}
"${AOM_ROOT}/aom_dsp/ansreader.h")
- elseif (CONFIG_DAALA_EC)
+ else ()
set(AOM_DSP_DECODER_SOURCES
${AOM_DSP_DECODER_SOURCES}
"${AOM_ROOT}/aom_dsp/daalaboolreader.c"
"${AOM_ROOT}/aom_dsp/daalaboolreader.h"
"${AOM_ROOT}/aom_dsp/entdec.c"
"${AOM_ROOT}/aom_dsp/entdec.h")
- else ()
- set(AOM_DSP_DECODER_SOURCES
- ${AOM_DSP_DECODER_SOURCES}
- "${AOM_ROOT}/aom_dsp/dkboolreader.c"
- "${AOM_ROOT}/aom_dsp/dkboolreader.h")
endif ()
endif ()
-if (CONFIG_ENCODERS)
+if (CONFIG_AV1_ENCODER)
set(AOM_DSP_ENCODER_SOURCES
"${AOM_ROOT}/aom_dsp/binary_codes_writer.c"
"${AOM_ROOT}/aom_dsp/binary_codes_writer.h"
@@ -257,6 +258,7 @@ if (CONFIG_ENCODERS)
"${AOM_ROOT}/aom_dsp/bitwriter_buffer.h"
"${AOM_ROOT}/aom_dsp/psnr.c"
"${AOM_ROOT}/aom_dsp/psnr.h"
+ "${AOM_ROOT}/aom_dsp/sad.c"
"${AOM_ROOT}/aom_dsp/variance.c"
"${AOM_ROOT}/aom_dsp/variance.h")
@@ -282,6 +284,9 @@ if (CONFIG_ENCODERS)
set(AOM_DSP_ENCODER_ASM_SSE4_1 "${AOM_ROOT}/aom_dsp/x86/sad_sse4.asm")
set(AOM_DSP_ENCODER_INTRIN_AVX2
+ "${AOM_ROOT}/aom_dsp/x86/fwd_dct32x32_impl_avx2.h"
+ "${AOM_ROOT}/aom_dsp/x86/fwd_txfm_avx2.c"
+ "${AOM_ROOT}/aom_dsp/x86/fwd_txfm_avx2.h"
"${AOM_ROOT}/aom_dsp/x86/sad4d_avx2.c"
"${AOM_ROOT}/aom_dsp/x86/sad_avx2.c"
"${AOM_ROOT}/aom_dsp/x86/sad_impl_avx2.c"
@@ -310,11 +315,6 @@ if (CONFIG_ENCODERS)
"${AOM_ROOT}/aom_dsp/x86/variance_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/sum_squares_sse2.c")
- set(AOM_DSP_ENCODER_INTRIN_SSSE3
- ${AOM_DSP_ENCODER_INTRIN_SSSE3}
- "${AOM_ROOT}/aom_dsp/x86/masked_sad_intrin_ssse3.c"
- "${AOM_ROOT}/aom_dsp/x86/masked_variance_intrin_ssse3.c")
-
set(AOM_DSP_ENCODER_ASM_SSSE3_X86_64
${AOM_DSP_ENCODER_ASM_SSSE3_X86_64}
"${AOM_ROOT}/aom_dsp/x86/avg_ssse3_x86_64.asm"
@@ -325,7 +325,6 @@ if (CONFIG_ENCODERS)
"${AOM_ROOT}/aom_dsp/x86/quantize_avx_x86_64.asm")
set(AOM_DSP_ENCODER_INTRIN_MSA
- "${AOM_ROOT}/aom_dsp/mips/avg_msa.c"
"${AOM_ROOT}/aom_dsp/mips/sad_msa.c"
"${AOM_ROOT}/aom_dsp/mips/subtract_msa.c"
"${AOM_ROOT}/aom_dsp/mips/variance_msa.c"
@@ -345,9 +344,7 @@ if (CONFIG_ENCODERS)
"${AOM_ROOT}/aom_dsp/x86/highbd_sad4d_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/highbd_sad_sse2.asm"
"${AOM_ROOT}/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm"
- "${AOM_ROOT}/aom_dsp/x86/highbd_variance_impl_sse2.asm"
- "${AOM_ROOT}/aom_dsp/x86/aom_high_subpixel_8t_sse2.asm"
- "${AOM_ROOT}/aom_dsp/x86/aom_high_subpixel_bilinear_sse2.asm")
+ "${AOM_ROOT}/aom_dsp/x86/highbd_variance_impl_sse2.asm")
set(AOM_DSP_ENCODER_INTRIN_SSE2
${AOM_DSP_ENCODER_INTRIN_SSE2}
@@ -368,18 +365,13 @@ if (CONFIG_ENCODERS)
"${AOM_ROOT}/aom_dsp/answriter.h"
"${AOM_ROOT}/aom_dsp/buf_ans.c"
"${AOM_ROOT}/aom_dsp/buf_ans.h")
- elseif (CONFIG_DAALA_EC)
+ else ()
set(AOM_DSP_ENCODER_SOURCES
${AOM_DSP_ENCODER_SOURCES}
"${AOM_ROOT}/aom_dsp/daalaboolwriter.c"
"${AOM_ROOT}/aom_dsp/daalaboolwriter.h"
"${AOM_ROOT}/aom_dsp/entenc.c"
"${AOM_ROOT}/aom_dsp/entenc.h")
- else ()
- set(AOM_DSP_ENCODER_SOURCES
- ${AOM_DSP_ENCODER_SOURCES}
- "${AOM_ROOT}/aom_dsp/dkboolwriter.c"
- "${AOM_ROOT}/aom_dsp/dkboolwriter.h")
endif ()
if (CONFIG_INTERNAL_STATS)
@@ -392,6 +384,18 @@ if (CONFIG_ENCODERS)
endif ()
endif ()
+if (CONFIG_LOOP_RESTORATION)
+ set(AOM_DSP_COMMON_INTRIN_SSE2
+ ${AOM_DSP_COMMON_INTRIN_SSE2}
+ "${AOM_ROOT}/aom_dsp/x86/aom_convolve_hip_sse2.c")
+
+ if (CONFIG_HIGHBITDEPTH)
+ set(AOM_DSP_COMMON_INTRIN_SSSE3
+ ${AOM_DSP_COMMON_INTRIN_SSSE3}
+ "${AOM_ROOT}/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c")
+ endif ()
+endif ()
+
if (CONFIG_MOTION_VAR)
set(AOM_DSP_ENCODER_INTRIN_SSE4_1
${AOM_DSP_ENCODER_INTRIN_SSE4_1}
@@ -406,13 +410,13 @@ function (setup_aom_dsp_targets)
set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} aom_dsp_common)
target_sources(aom PUBLIC $<TARGET_OBJECTS:aom_dsp_common>)
- if (CONFIG_DECODERS)
+ if (CONFIG_AV1_DECODER)
add_library(aom_dsp_decoder OBJECT ${AOM_DSP_DECODER_SOURCES})
set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} aom_dsp_decoder)
target_sources(aom PUBLIC $<TARGET_OBJECTS:aom_dsp_decoder>)
endif ()
- if (CONFIG_ENCODERS)
+ if (CONFIG_AV1_ENCODER)
add_library(aom_dsp_encoder OBJECT ${AOM_DSP_ENCODER_SOURCES})
set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} aom_dsp_encoder)
target_sources(aom PUBLIC $<TARGET_OBJECTS:aom_dsp_encoder>)
@@ -422,14 +426,14 @@ function (setup_aom_dsp_targets)
add_asm_library("aom_dsp_common_sse2" "AOM_DSP_COMMON_ASM_SSE2" "aom")
add_intrinsics_object_library("-msse2" "sse2" "aom_dsp_common"
"AOM_DSP_COMMON_INTRIN_SSE2")
- if (CONFIG_ENCODERS)
+ if (CONFIG_AV1_ENCODER)
add_asm_library("aom_dsp_encoder_sse2" "AOM_DSP_ENCODER_ASM_SSE2" "aom")
add_intrinsics_object_library("-msse2" "sse2" "aom_dsp_encoder"
"AOM_DSP_ENCODER_INTRIN_SSE2")
endif()
endif ()
- if (HAVE_SSE3 AND CONFIG_ENCODERS)
+ if (HAVE_SSE3 AND CONFIG_AV1_ENCODER)
add_asm_library("aom_dsp_encoder_sse3" "AOM_DSP_ENCODER_INTRIN_SSE3" "aom")
endif ()
@@ -438,21 +442,19 @@ function (setup_aom_dsp_targets)
add_intrinsics_object_library("-mssse3" "ssse3" "aom_dsp_common"
"AOM_DSP_COMMON_INTRIN_SSSE3")
- if (CONFIG_ENCODERS)
+ if (CONFIG_AV1_ENCODER)
if ("${AOM_TARGET_CPU}" STREQUAL "x86_64")
list(APPEND AOM_DSP_ENCODER_ASM_SSSE3
${AOM_DSP_ENCODER_ASM_SSSE3_X86_64})
endif ()
add_asm_library("aom_dsp_encoder_ssse3" "AOM_DSP_ENCODER_ASM_SSSE3" "aom")
- add_intrinsics_object_library("-mssse3" "ssse3" "aom_dsp_encoder"
- "AOM_DSP_ENCODER_INTRIN_SSSE3")
endif ()
endif ()
if (HAVE_SSE4_1)
add_intrinsics_object_library("-msse4.1" "sse4_1" "aom_dsp_common"
"AOM_DSP_COMMON_INTRIN_SSE4_1")
- if (CONFIG_ENCODERS)
+ if (CONFIG_AV1_ENCODER)
if (AOM_DSP_ENCODER_INTRIN_SSE4_1)
add_intrinsics_object_library("-msse4.1" "sse4_1" "aom_dsp_encoder"
"AOM_DSP_ENCODER_INTRIN_SSE4_1")
@@ -463,14 +465,16 @@ function (setup_aom_dsp_targets)
endif ()
if (HAVE_AVX AND "${AOM_TARGET_CPU}" STREQUAL "x86_64")
- add_asm_library("aom_dsp_encoder_avx" "AOM_DSP_ENCODER_AVX_ASM_X86_64"
- "aom")
+ if (CONFIG_AV1_ENCODER)
+ add_asm_library("aom_dsp_encoder_avx" "AOM_DSP_ENCODER_AVX_ASM_X86_64"
+ "aom")
+ endif ()
endif ()
if (HAVE_AVX2)
add_intrinsics_object_library("-mavx2" "avx2" "aom_dsp_common"
"AOM_DSP_COMMON_INTRIN_AVX2")
- if (CONFIG_ENCODERS)
+ if (CONFIG_AV1_ENCODER)
add_intrinsics_object_library("-mavx2" "avx2" "aom_dsp_encoder"
"AOM_DSP_ENCODER_INTRIN_AVX2")
endif ()
@@ -497,7 +501,7 @@ function (setup_aom_dsp_targets)
if (HAVE_MSA)
add_intrinsics_object_library("" "msa" "aom_dsp_common"
"AOM_DSP_COMMON_INTRIN_MSA")
- if (CONFIG_ENCODERS)
+ if (CONFIG_AV1_ENCODER)
add_intrinsics_object_library("" "msa" "aom_dsp_encoder"
"AOM_DSP_ENCODER_INTRIN_MSA")
endif ()
@@ -507,3 +511,5 @@ function (setup_aom_dsp_targets)
# $AOM_LIB_TARGETS.
set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} PARENT_SCOPE)
endfunction ()
+
+endif () # AOM_AOM_DSP_AOM_DSP_CMAKE_
diff --git a/third_party/aom/aom_dsp/aom_dsp.mk b/third_party/aom/aom_dsp/aom_dsp.mk
index 8c7241b831..6e2d5630ef 100644
--- a/third_party/aom/aom_dsp/aom_dsp.mk
+++ b/third_party/aom/aom_dsp/aom_dsp.mk
@@ -22,19 +22,16 @@ DSP_SRCS-yes += prob.h
DSP_SRCS-yes += prob.c
DSP_SRCS-$(CONFIG_ANS) += ans.h
-ifeq ($(CONFIG_ENCODERS),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
ifeq ($(CONFIG_ANS),yes)
DSP_SRCS-yes += answriter.h
DSP_SRCS-yes += buf_ans.h
DSP_SRCS-yes += buf_ans.c
-else ifeq ($(CONFIG_DAALA_EC),yes)
+else
DSP_SRCS-yes += entenc.c
DSP_SRCS-yes += entenc.h
DSP_SRCS-yes += daalaboolwriter.c
DSP_SRCS-yes += daalaboolwriter.h
-else
-DSP_SRCS-yes += dkboolwriter.h
-DSP_SRCS-yes += dkboolwriter.c
endif
DSP_SRCS-yes += bitwriter.h
DSP_SRCS-yes += bitwriter_buffer.c
@@ -49,17 +46,14 @@ DSP_SRCS-$(CONFIG_INTERNAL_STATS) += psnrhvs.c
DSP_SRCS-$(CONFIG_INTERNAL_STATS) += fastssim.c
endif
-ifeq ($(CONFIG_DECODERS),yes)
+ifeq ($(CONFIG_AV1_DECODER),yes)
ifeq ($(CONFIG_ANS),yes)
DSP_SRCS-yes += ansreader.h
-else ifeq ($(CONFIG_DAALA_EC),yes)
+else
DSP_SRCS-yes += entdec.c
DSP_SRCS-yes += entdec.h
DSP_SRCS-yes += daalaboolreader.c
DSP_SRCS-yes += daalaboolreader.h
-else
-DSP_SRCS-yes += dkboolreader.h
-DSP_SRCS-yes += dkboolreader.c
endif
DSP_SRCS-yes += bitreader.h
DSP_SRCS-yes += bitreader_buffer.c
@@ -71,7 +65,7 @@ endif
# intra predictions
DSP_SRCS-yes += intrapred.c
-ifeq ($(CONFIG_DAALA_EC),yes)
+ifneq ($(CONFIG_ANS),yes)
DSP_SRCS-yes += entcode.c
DSP_SRCS-yes += entcode.h
endif
@@ -205,6 +199,7 @@ endif # CONFIG_HIGHBITDEPTH
DSP_SRCS-yes += txfm_common.h
DSP_SRCS-yes += x86/txfm_common_intrin.h
DSP_SRCS-$(HAVE_SSE2) += x86/txfm_common_sse2.h
+DSP_SRCS-$(HAVE_SSSE3) += x86/obmc_intrinsic_ssse3.h
DSP_SRCS-$(HAVE_MSA) += mips/txfm_macros_msa.h
# forward transform
@@ -239,6 +234,8 @@ DSP_SRCS-$(HAVE_SSE2) += x86/inv_txfm_sse2.h
DSP_SRCS-$(HAVE_SSE2) += x86/inv_txfm_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/inv_wht_sse2.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/inv_txfm_ssse3.c
+DSP_SRCS-$(HAVE_AVX2) += x86/inv_txfm_common_avx2.h
+DSP_SRCS-$(HAVE_AVX2) += x86/inv_txfm_avx2.c
ifeq ($(HAVE_NEON_ASM),yes)
DSP_SRCS-yes += arm/save_reg_neon$(ASM)
@@ -278,6 +275,13 @@ DSP_SRCS-$(HAVE_DSPR2) += mips/itrans16_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_dspr2.c
DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c
endif # CONFIG_HIGHBITDEPTH
+
+ifeq ($(CONFIG_LOOP_RESTORATION),yes)
+DSP_SRCS-$(HAVE_SSE2) += x86/aom_convolve_hip_sse2.c
+ifeq ($(CONFIG_HIGHBITDEPTH),yes)
+DSP_SRCS-$(HAVE_SSSE3) += x86/aom_highbd_convolve_hip_ssse3.c
+endif
+endif # CONFIG_LOOP_RESTORATION
endif # CONFIG_AV1
# quantization
@@ -298,7 +302,6 @@ endif
DSP_SRCS-yes += avg.c
DSP_SRCS-$(HAVE_SSE2) += x86/avg_intrin_sse2.c
DSP_SRCS-$(HAVE_NEON) += arm/avg_neon.c
-DSP_SRCS-$(HAVE_MSA) += mips/avg_msa.c
DSP_SRCS-$(HAVE_NEON) += arm/hadamard_neon.c
ifeq ($(ARCH_X86_64),yes)
DSP_SRCS-$(HAVE_SSSE3) += x86/avg_ssse3_x86_64.asm
@@ -317,11 +320,10 @@ DSP_SRCS-yes += sum_squares.c
DSP_SRCS-$(HAVE_SSE2) += x86/sum_squares_sse2.c
endif # CONFIG_AV1_ENCODER
-ifeq ($(CONFIG_ENCODERS),yes)
+ifeq ($(CONFIG_AV1_ENCODER),yes)
DSP_SRCS-yes += sad.c
DSP_SRCS-yes += subtract.c
-DSP_SRCS-$(HAVE_MEDIA) += arm/sad_media$(ASM)
DSP_SRCS-$(HAVE_NEON) += arm/sad4d_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/sad_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/subtract_neon.c
@@ -364,18 +366,12 @@ DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad_sse2.asm
endif # CONFIG_HIGHBITDEPTH
-endif # CONFIG_ENCODERS
+endif # CONFIG_AV1_ENCODER
-ifneq ($(filter yes,$(CONFIG_ENCODERS)),)
+ifneq ($(filter yes,$(CONFIG_AV1_ENCODER)),)
DSP_SRCS-yes += variance.c
DSP_SRCS-yes += variance.h
-DSP_SRCS-$(HAVE_MEDIA) += arm/bilinear_filter_media$(ASM)
-DSP_SRCS-$(HAVE_MEDIA) += arm/subpel_variance_media.c
-DSP_SRCS-$(HAVE_MEDIA) += arm/variance_halfpixvar16x16_h_media$(ASM)
-DSP_SRCS-$(HAVE_MEDIA) += arm/variance_halfpixvar16x16_hv_media$(ASM)
-DSP_SRCS-$(HAVE_MEDIA) += arm/variance_halfpixvar16x16_v_media$(ASM)
-DSP_SRCS-$(HAVE_MEDIA) += arm/variance_media$(ASM)
DSP_SRCS-$(HAVE_NEON) += arm/subpel_variance_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/variance_neon.c
@@ -402,7 +398,7 @@ DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_variance_sse4.c
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_impl_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_subpel_variance_impl_sse2.asm
endif # CONFIG_HIGHBITDEPTH
-endif # CONFIG_ENCODERS
+endif # CONFIG_AV1_ENCODER
DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes)
diff --git a/third_party/aom/aom_dsp/aom_dsp_common.h b/third_party/aom/aom_dsp/aom_dsp_common.h
index 47ffbeb6cc..82f9a95e9a 100644
--- a/third_party/aom/aom_dsp/aom_dsp_common.h
+++ b/third_party/aom/aom_dsp/aom_dsp_common.h
@@ -31,6 +31,8 @@ extern "C" {
#define AOMMIN(x, y) (((x) < (y)) ? (x) : (y))
#define AOMMAX(x, y) (((x) > (y)) ? (x) : (y))
+#define NELEMENTS(x) (sizeof((x)) / sizeof((x)[0]))
+
#define IMPLIES(a, b) (!(a) || (b)) // Logical 'a implies b' (or 'a -> b')
#define IS_POWER_OF_TWO(x) (((x) & ((x)-1)) == 0)
diff --git a/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl b/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
index b4ef0d92f3..8047cbc095 100755
--- a/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -49,6 +49,9 @@ if (aom_config("CONFIG_TX64X64") eq "yes") {
@pred_names = qw/dc dc_top dc_left dc_128 v h d207e d63e d45e d117 d135 d153/;
if (aom_config("CONFIG_ALT_INTRA") eq "yes") {
push @pred_names, qw/paeth smooth/;
+ if (aom_config("CONFIG_SMOOTH_HV") eq "yes") {
+ push @pred_names, qw/smooth_v smooth_h/;
+ }
} else {
push @pred_names, 'tm';
}
@@ -168,10 +171,14 @@ if (aom_config("CONFIG_LOOP_RESTORATION") eq "yes") {
add_proto qw/void aom_convolve8_add_src/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
add_proto qw/void aom_convolve8_add_src_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
add_proto qw/void aom_convolve8_add_src_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+ add_proto qw/void aom_convolve8_add_src_hip/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+ add_proto qw/void aom_convolve8_add_src_horiz_hip/, "const uint8_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+ add_proto qw/void aom_convolve8_add_src_vert_hip/, "const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
specialize qw/aom_convolve8_add_src ssse3/;
specialize qw/aom_convolve8_add_src_horiz ssse3/;
specialize qw/aom_convolve8_add_src_vert ssse3/;
+ specialize qw/aom_convolve8_add_src_hip sse2/;
} # CONFIG_LOOP_RESTORATION
# TODO(any): These need to be extended to up to 128x128 block sizes
@@ -215,8 +222,12 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_convolve8_add_src/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
add_proto qw/void aom_highbd_convolve8_add_src_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
add_proto qw/void aom_highbd_convolve8_add_src_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void aom_highbd_convolve8_add_src_hip/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void aom_highbd_convolve8_add_src_horiz_hip/, "const uint8_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void aom_highbd_convolve8_add_src_vert_hip/, "const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/aom_highbd_convolve8_add_src/, "$sse2_x86_64";
+ specialize qw/aom_highbd_convolve8_add_src_hip ssse3/;
# The _horiz/_vert functions are currently unused, so we don't bother
# specialising them.
} # CONFIG_LOOP_RESTORATION
@@ -434,29 +445,30 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/aom_idct8x8_1_add sse2/;
add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct16x16_256_add sse2/;
+ specialize qw/aom_idct16x16_256_add sse2 avx2/;
add_proto qw/void aom_idct16x16_38_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_38_add avx2/;
add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct16x16_10_add sse2/;
+ specialize qw/aom_idct16x16_10_add sse2 avx2/;
add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct16x16_1_add sse2/;
+ specialize qw/aom_idct16x16_1_add sse2 avx2/;
add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_1024_add sse2 ssse3/;
+ specialize qw/aom_idct32x32_1024_add sse2 ssse3 avx2/;
add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_135_add sse2 ssse3/;
+ specialize qw/aom_idct32x32_135_add sse2 ssse3 avx2/;
# Need to add 135 eob idct32x32 implementations.
$aom_idct32x32_135_add_sse2=aom_idct32x32_1024_add_sse2;
add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_34_add sse2 ssse3/;
+ specialize qw/aom_idct32x32_34_add sse2 ssse3 avx2/;
add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_1_add sse2/;
+ specialize qw/aom_idct32x32_1_add sse2 avx2/;
add_proto qw/void aom_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
specialize qw/aom_highbd_idct4x4_16_add sse2/;
@@ -479,21 +491,22 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/aom_idct8x8_12_add sse2 ssse3 neon dspr2 msa/;
add_proto qw/void aom_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct16x16_1_add sse2 neon dspr2 msa/;
+ specialize qw/aom_idct16x16_1_add sse2 avx2 neon dspr2 msa/;
add_proto qw/void aom_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct16x16_256_add sse2 neon dspr2 msa/;
+ specialize qw/aom_idct16x16_256_add sse2 avx2 neon dspr2 msa/;
add_proto qw/void aom_idct16x16_38_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+ specialize qw/aom_idct16x16_38_add avx2/;
add_proto qw/void aom_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct16x16_10_add sse2 neon dspr2 msa/;
+ specialize qw/aom_idct16x16_10_add sse2 avx2 neon dspr2 msa/;
add_proto qw/void aom_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_1024_add sse2 ssse3 neon dspr2 msa/;
+ specialize qw/aom_idct32x32_1024_add sse2 ssse3 avx2 neon dspr2 msa/;
add_proto qw/void aom_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_135_add sse2 ssse3 neon dspr2 msa/;
+ specialize qw/aom_idct32x32_135_add sse2 ssse3 avx2 neon dspr2 msa/;
# Need to add 135 eob idct32x32 implementations.
$aom_idct32x32_135_add_sse2=aom_idct32x32_1024_add_sse2;
$aom_idct32x32_135_add_neon=aom_idct32x32_1024_add_neon;
@@ -501,12 +514,12 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
$aom_idct32x32_135_add_msa=aom_idct32x32_1024_add_msa;
add_proto qw/void aom_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_34_add sse2 ssse3 neon dspr2 msa/;
+ specialize qw/aom_idct32x32_34_add sse2 ssse3 avx2 neon dspr2 msa/;
# Need to add 34 eob idct32x32 neon implementation.
$aom_idct32x32_34_add_neon=aom_idct32x32_1024_add_neon;
add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/aom_idct32x32_1_add sse2 neon dspr2 msa/;
+ specialize qw/aom_idct32x32_1_add sse2 avx2 neon dspr2 msa/;
add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
specialize qw/aom_iwht4x4_1_add msa/;
@@ -578,7 +591,7 @@ if (aom_config("CONFIG_AV1") eq "yes") {
}
} # CONFIG_AV1
-if (aom_config("CONFIG_ENCODERS") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
#
# Block subtraction
#
@@ -604,13 +617,8 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
#
# Avg
#
- add_proto qw/unsigned int aom_avg_8x8/, "const uint8_t *, int p";
specialize qw/aom_avg_8x8 sse2 neon msa/;
- add_proto qw/unsigned int aom_avg_4x4/, "const uint8_t *, int p";
- specialize qw/aom_avg_4x4 sse2 neon msa/;
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
- add_proto qw/unsigned int aom_highbd_avg_8x8/, "const uint8_t *, int p";
- add_proto qw/unsigned int aom_highbd_avg_4x4/, "const uint8_t *, int p";
add_proto qw/void aom_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
specialize qw/aom_highbd_subtract_block sse2/;
}
@@ -652,22 +660,22 @@ foreach (@block_sizes) {
add_proto qw/unsigned int/, "aom_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
}
-specialize qw/aom_sad128x128 avx2 sse2/;
-specialize qw/aom_sad128x64 avx2 sse2/;
-specialize qw/aom_sad64x128 avx2 sse2/;
-specialize qw/aom_sad64x64 avx2 neon msa sse2/;
-specialize qw/aom_sad64x32 avx2 msa sse2/;
-specialize qw/aom_sad32x64 avx2 msa sse2/;
-specialize qw/aom_sad32x32 avx2 neon msa sse2/;
-specialize qw/aom_sad32x16 avx2 msa sse2/;
-specialize qw/aom_sad16x32 msa sse2/;
-specialize qw/aom_sad16x16 media neon msa sse2/;
-specialize qw/aom_sad16x8 neon msa sse2/;
-specialize qw/aom_sad8x16 neon msa sse2/;
-specialize qw/aom_sad8x8 neon msa sse2/;
-specialize qw/aom_sad8x4 msa sse2/;
-specialize qw/aom_sad4x8 msa sse2/;
-specialize qw/aom_sad4x4 neon msa sse2/;
+specialize qw/aom_sad128x128 avx2 sse2/;
+specialize qw/aom_sad128x64 avx2 sse2/;
+specialize qw/aom_sad64x128 avx2 sse2/;
+specialize qw/aom_sad64x64 avx2 neon msa sse2/;
+specialize qw/aom_sad64x32 avx2 msa sse2/;
+specialize qw/aom_sad32x64 avx2 msa sse2/;
+specialize qw/aom_sad32x32 avx2 neon msa sse2/;
+specialize qw/aom_sad32x16 avx2 msa sse2/;
+specialize qw/aom_sad16x32 msa sse2/;
+specialize qw/aom_sad16x16 neon msa sse2/;
+specialize qw/aom_sad16x8 neon msa sse2/;
+specialize qw/aom_sad8x16 neon msa sse2/;
+specialize qw/aom_sad8x8 neon msa sse2/;
+specialize qw/aom_sad8x4 msa sse2/;
+specialize qw/aom_sad4x8 msa sse2/;
+specialize qw/aom_sad4x4 neon msa sse2/;
specialize qw/aom_sad128x128_avg avx2 sse2/;
specialize qw/aom_sad128x64_avg avx2 sse2/;
@@ -727,14 +735,14 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
if (aom_config("CONFIG_EXT_INTER") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+ add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
specialize "aom_masked_sad${w}x${h}", qw/ssse3/;
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+ add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
specialize "aom_highbd_masked_sad${w}x${h}", qw/ssse3/;
}
}
@@ -876,9 +884,9 @@ if (aom_config("CONFIG_INTERNAL_STATS") eq "yes") {
add_proto qw/void aom_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
}
}
-} # CONFIG_ENCODERS
+} # CONFIG_AV1_ENCODER
-if (aom_config("CONFIG_ENCODERS") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
#
# Specialty Variance
@@ -896,10 +904,10 @@ add_proto qw/unsigned int aom_mse16x8/, "const uint8_t *src_ptr, int source_str
add_proto qw/unsigned int aom_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
add_proto qw/unsigned int aom_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
-specialize qw/aom_mse16x16 sse2 avx2 media neon msa/;
-specialize qw/aom_mse16x8 sse2 msa/;
-specialize qw/aom_mse8x16 sse2 msa/;
-specialize qw/aom_mse8x8 sse2 msa/;
+specialize qw/aom_mse16x16 sse2 avx2 neon msa/;
+specialize qw/aom_mse16x8 sse2 msa/;
+specialize qw/aom_mse8x16 sse2 msa/;
+specialize qw/aom_mse8x8 sse2 msa/;
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach $bd (8, 10, 12) {
@@ -956,33 +964,33 @@ foreach (@block_sizes) {
add_proto qw/uint32_t/, "aom_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
}
-specialize qw/aom_variance64x64 sse2 avx2 neon msa/;
-specialize qw/aom_variance64x32 sse2 avx2 neon msa/;
-specialize qw/aom_variance32x64 sse2 neon msa/;
-specialize qw/aom_variance32x32 sse2 avx2 neon msa/;
-specialize qw/aom_variance32x16 sse2 avx2 msa/;
-specialize qw/aom_variance16x32 sse2 msa/;
-specialize qw/aom_variance16x16 sse2 avx2 media neon msa/;
-specialize qw/aom_variance16x8 sse2 neon msa/;
-specialize qw/aom_variance8x16 sse2 neon msa/;
-specialize qw/aom_variance8x8 sse2 media neon msa/;
-specialize qw/aom_variance8x4 sse2 msa/;
-specialize qw/aom_variance4x8 sse2 msa/;
-specialize qw/aom_variance4x4 sse2 msa/;
-
-specialize qw/aom_sub_pixel_variance64x64 avx2 neon msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance64x32 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance32x64 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance32x32 avx2 neon msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance32x16 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance16x32 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance16x16 media neon msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance16x8 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance8x16 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance8x8 media neon msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance8x4 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance4x8 msa sse2 ssse3/;
-specialize qw/aom_sub_pixel_variance4x4 msa sse2 ssse3/;
+specialize qw/aom_variance64x64 sse2 avx2 neon msa/;
+specialize qw/aom_variance64x32 sse2 avx2 neon msa/;
+specialize qw/aom_variance32x64 sse2 neon msa/;
+specialize qw/aom_variance32x32 sse2 avx2 neon msa/;
+specialize qw/aom_variance32x16 sse2 avx2 msa/;
+specialize qw/aom_variance16x32 sse2 msa/;
+specialize qw/aom_variance16x16 sse2 avx2 neon msa/;
+specialize qw/aom_variance16x8 sse2 neon msa/;
+specialize qw/aom_variance8x16 sse2 neon msa/;
+specialize qw/aom_variance8x8 sse2 neon msa/;
+specialize qw/aom_variance8x4 sse2 msa/;
+specialize qw/aom_variance4x8 sse2 msa/;
+specialize qw/aom_variance4x4 sse2 msa/;
+
+specialize qw/aom_sub_pixel_variance64x64 avx2 neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance64x32 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x64 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x32 avx2 neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance32x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x32 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x16 neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance16x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x16 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x8 neon msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance8x4 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance4x8 msa sse2 ssse3/;
+specialize qw/aom_sub_pixel_variance4x4 msa sse2 ssse3/;
specialize qw/aom_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/;
specialize qw/aom_sub_pixel_avg_variance64x32 msa sse2 ssse3/;
@@ -1034,19 +1042,15 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
#
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
- specialize "aom_masked_variance${w}x${h}", qw/ssse3/;
+ add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
specialize "aom_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
- foreach $bd ("_", "_10_", "_12_") {
+ foreach $bd ("_8_", "_10_", "_12_") {
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_highbd${bd}masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
- add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
- specialize "aom_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
+ add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
specialize "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
}
}
@@ -1119,13 +1123,13 @@ add_proto qw/uint32_t aom_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, i
# Specialty Subpixel
#
add_proto qw/uint32_t aom_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/aom_variance_halfpixvar16x16_h sse2 media/;
+ specialize qw/aom_variance_halfpixvar16x16_h sse2/;
add_proto qw/uint32_t aom_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/aom_variance_halfpixvar16x16_v sse2 media/;
+ specialize qw/aom_variance_halfpixvar16x16_v sse2/;
add_proto qw/uint32_t aom_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
- specialize qw/aom_variance_halfpixvar16x16_hv sse2 media/;
+ specialize qw/aom_variance_halfpixvar16x16_hv sse2/;
#
# Comp Avg
@@ -1490,6 +1494,15 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
} # CONFIG_HIGHBITDEPTH
-} # CONFIG_ENCODERS
+if (aom_config("CONFIG_EXT_INTER") eq "yes") {
+ add_proto qw/void aom_comp_mask_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
+ add_proto qw/void aom_comp_mask_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
+ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void aom_highbd_comp_mask_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
+ add_proto qw/void aom_highbd_comp_mask_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
+ }
+}
+
+} # CONFIG_AV1_ENCODER
1;
diff --git a/third_party/aom/aom_dsp/arm/avg_neon.c b/third_party/aom/aom_dsp/arm/avg_neon.c
index e730ccbccc..6ff760017b 100644
--- a/third_party/aom/aom_dsp/arm/avg_neon.c
+++ b/third_party/aom/aom_dsp/arm/avg_neon.c
@@ -25,44 +25,6 @@ static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) {
return vget_lane_u32(c, 0);
}
-unsigned int aom_avg_4x4_neon(const uint8_t *s, int p) {
- uint16x8_t v_sum;
- uint32x2_t v_s0 = vdup_n_u32(0);
- uint32x2_t v_s1 = vdup_n_u32(0);
- v_s0 = vld1_lane_u32((const uint32_t *)s, v_s0, 0);
- v_s0 = vld1_lane_u32((const uint32_t *)(s + p), v_s0, 1);
- v_s1 = vld1_lane_u32((const uint32_t *)(s + 2 * p), v_s1, 0);
- v_s1 = vld1_lane_u32((const uint32_t *)(s + 3 * p), v_s1, 1);
- v_sum = vaddl_u8(vreinterpret_u8_u32(v_s0), vreinterpret_u8_u32(v_s1));
- return (horizontal_add_u16x8(v_sum) + 8) >> 4;
-}
-
-unsigned int aom_avg_8x8_neon(const uint8_t *s, int p) {
- uint8x8_t v_s0 = vld1_u8(s);
- const uint8x8_t v_s1 = vld1_u8(s + p);
- uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
-
- v_s0 = vld1_u8(s + 2 * p);
- v_sum = vaddw_u8(v_sum, v_s0);
-
- v_s0 = vld1_u8(s + 3 * p);
- v_sum = vaddw_u8(v_sum, v_s0);
-
- v_s0 = vld1_u8(s + 4 * p);
- v_sum = vaddw_u8(v_sum, v_s0);
-
- v_s0 = vld1_u8(s + 5 * p);
- v_sum = vaddw_u8(v_sum, v_s0);
-
- v_s0 = vld1_u8(s + 6 * p);
- v_sum = vaddw_u8(v_sum, v_s0);
-
- v_s0 = vld1_u8(s + 7 * p);
- v_sum = vaddw_u8(v_sum, v_s0);
-
- return (horizontal_add_u16x8(v_sum) + 32) >> 6;
-}
-
// coeff: 16 bits, dynamic range [-32640, 32640].
// length: value range {16, 64, 256, 1024}.
int aom_satd_neon(const int16_t *coeff, int length) {
diff --git a/third_party/aom/aom_dsp/arm/bilinear_filter_media.asm b/third_party/aom/aom_dsp/arm/bilinear_filter_media.asm
deleted file mode 100644
index 17b7d25f97..0000000000
--- a/third_party/aom/aom_dsp/arm/bilinear_filter_media.asm
+++ /dev/null
@@ -1,240 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
- EXPORT |aom_filter_block2d_bil_first_pass_media|
- EXPORT |aom_filter_block2d_bil_second_pass_media|
-
- AREA |.text|, CODE, READONLY ; name this block of code
-
-;-------------------------------------
-; r0 unsigned char *src_ptr,
-; r1 unsigned short *dst_ptr,
-; r2 unsigned int src_pitch,
-; r3 unsigned int height,
-; stack unsigned int width,
-; stack const short *aom_filter
-;-------------------------------------
-; The output is transposed stroed in output array to make it easy for second pass filtering.
-|aom_filter_block2d_bil_first_pass_media| PROC
- stmdb sp!, {r4 - r11, lr}
-
- ldr r11, [sp, #40] ; aom_filter address
- ldr r4, [sp, #36] ; width
-
- mov r12, r3 ; outer-loop counter
-
- add r7, r2, r4 ; preload next row
- pld [r0, r7]
-
- sub r2, r2, r4 ; src increment for height loop
-
- ldr r5, [r11] ; load up filter coefficients
-
- mov r3, r3, lsl #1 ; height*2
- add r3, r3, #2 ; plus 2 to make output buffer 4-bit aligned since height is actually (height+1)
-
- mov r11, r1 ; save dst_ptr for each row
-
- cmp r5, #128 ; if filter coef = 128, then skip the filter
- beq bil_null_1st_filter
-
-|bil_height_loop_1st_v6|
- ldrb r6, [r0] ; load source data
- ldrb r7, [r0, #1]
- ldrb r8, [r0, #2]
- mov lr, r4, lsr #2 ; 4-in-parellel loop counter
-
-|bil_width_loop_1st_v6|
- ldrb r9, [r0, #3]
- ldrb r10, [r0, #4]
-
- pkhbt r6, r6, r7, lsl #16 ; src[1] | src[0]
- pkhbt r7, r7, r8, lsl #16 ; src[2] | src[1]
-
- smuad r6, r6, r5 ; apply the filter
- pkhbt r8, r8, r9, lsl #16 ; src[3] | src[2]
- smuad r7, r7, r5
- pkhbt r9, r9, r10, lsl #16 ; src[4] | src[3]
-
- smuad r8, r8, r5
- smuad r9, r9, r5
-
- add r0, r0, #4
- subs lr, lr, #1
-
- add r6, r6, #0x40 ; round_shift_and_clamp
- add r7, r7, #0x40
- usat r6, #16, r6, asr #7
- usat r7, #16, r7, asr #7
-
- strh r6, [r1], r3 ; result is transposed and stored
-
- add r8, r8, #0x40 ; round_shift_and_clamp
- strh r7, [r1], r3
- add r9, r9, #0x40
- usat r8, #16, r8, asr #7
- usat r9, #16, r9, asr #7
-
- strh r8, [r1], r3 ; result is transposed and stored
-
- ldrneb r6, [r0] ; load source data
- strh r9, [r1], r3
-
- ldrneb r7, [r0, #1]
- ldrneb r8, [r0, #2]
-
- bne bil_width_loop_1st_v6
-
- add r0, r0, r2 ; move to next input row
- subs r12, r12, #1
-
- add r9, r2, r4, lsl #1 ; adding back block width
- pld [r0, r9] ; preload next row
-
- add r11, r11, #2 ; move over to next column
- mov r1, r11
-
- bne bil_height_loop_1st_v6
-
- ldmia sp!, {r4 - r11, pc}
-
-|bil_null_1st_filter|
-|bil_height_loop_null_1st|
- mov lr, r4, lsr #2 ; loop counter
-
-|bil_width_loop_null_1st|
- ldrb r6, [r0] ; load data
- ldrb r7, [r0, #1]
- ldrb r8, [r0, #2]
- ldrb r9, [r0, #3]
-
- strh r6, [r1], r3 ; store it to immediate buffer
- add r0, r0, #4
- strh r7, [r1], r3
- subs lr, lr, #1
- strh r8, [r1], r3
- strh r9, [r1], r3
-
- bne bil_width_loop_null_1st
-
- subs r12, r12, #1
- add r0, r0, r2 ; move to next input line
- add r11, r11, #2 ; move over to next column
- mov r1, r11
-
- bne bil_height_loop_null_1st
-
- ldmia sp!, {r4 - r11, pc}
-
- ENDP ; |aom_filter_block2d_bil_first_pass_media|
-
-
-;---------------------------------
-; r0 unsigned short *src_ptr,
-; r1 unsigned char *dst_ptr,
-; r2 int dst_pitch,
-; r3 unsigned int height,
-; stack unsigned int width,
-; stack const short *aom_filter
-;---------------------------------
-|aom_filter_block2d_bil_second_pass_media| PROC
- stmdb sp!, {r4 - r11, lr}
-
- ldr r11, [sp, #40] ; aom_filter address
- ldr r4, [sp, #36] ; width
-
- ldr r5, [r11] ; load up filter coefficients
- mov r12, r4 ; outer-loop counter = width, since we work on transposed data matrix
- mov r11, r1
-
- cmp r5, #128 ; if filter coef = 128, then skip the filter
- beq bil_null_2nd_filter
-
-|bil_height_loop_2nd|
- ldr r6, [r0] ; load the data
- ldr r8, [r0, #4]
- ldrh r10, [r0, #8]
- mov lr, r3, lsr #2 ; loop counter
-
-|bil_width_loop_2nd|
- pkhtb r7, r6, r8 ; src[1] | src[2]
- pkhtb r9, r8, r10 ; src[3] | src[4]
-
- smuad r6, r6, r5 ; apply filter
- smuad r8, r8, r5 ; apply filter
-
- subs lr, lr, #1
-
- smuadx r7, r7, r5 ; apply filter
- smuadx r9, r9, r5 ; apply filter
-
- add r0, r0, #8
-
- add r6, r6, #0x40 ; round_shift_and_clamp
- add r7, r7, #0x40
- usat r6, #8, r6, asr #7
- usat r7, #8, r7, asr #7
- strb r6, [r1], r2 ; the result is transposed back and stored
-
- add r8, r8, #0x40 ; round_shift_and_clamp
- strb r7, [r1], r2
- add r9, r9, #0x40
- usat r8, #8, r8, asr #7
- usat r9, #8, r9, asr #7
- strb r8, [r1], r2 ; the result is transposed back and stored
-
- ldrne r6, [r0] ; load data
- strb r9, [r1], r2
- ldrne r8, [r0, #4]
- ldrneh r10, [r0, #8]
-
- bne bil_width_loop_2nd
-
- subs r12, r12, #1
- add r0, r0, #4 ; update src for next row
- add r11, r11, #1
- mov r1, r11
-
- bne bil_height_loop_2nd
- ldmia sp!, {r4 - r11, pc}
-
-|bil_null_2nd_filter|
-|bil_height_loop_null_2nd|
- mov lr, r3, lsr #2
-
-|bil_width_loop_null_2nd|
- ldr r6, [r0], #4 ; load data
- subs lr, lr, #1
- ldr r8, [r0], #4
-
- strb r6, [r1], r2 ; store data
- mov r7, r6, lsr #16
- strb r7, [r1], r2
- mov r9, r8, lsr #16
- strb r8, [r1], r2
- strb r9, [r1], r2
-
- bne bil_width_loop_null_2nd
-
- subs r12, r12, #1
- add r0, r0, #4
- add r11, r11, #1
- mov r1, r11
-
- bne bil_height_loop_null_2nd
-
- ldmia sp!, {r4 - r11, pc}
- ENDP ; |aom_filter_block2d_second_pass_media|
-
- END
diff --git a/third_party/aom/aom_dsp/arm/sad_media.asm b/third_party/aom/aom_dsp/arm/sad_media.asm
deleted file mode 100644
index 49ddb67642..0000000000
--- a/third_party/aom/aom_dsp/arm/sad_media.asm
+++ /dev/null
@@ -1,98 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
- EXPORT |aom_sad16x16_media|
-
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0 const unsigned char *src_ptr
-; r1 int src_stride
-; r2 const unsigned char *ref_ptr
-; r3 int ref_stride
-|aom_sad16x16_media| PROC
- stmfd sp!, {r4-r12, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
- pld [r0, r1, lsl #1]
- pld [r2, r3, lsl #1]
-
- mov r4, #0 ; sad = 0;
- mov r5, #8 ; loop count
-
-loop
- ; 1st row
- ldr r6, [r0, #0x0] ; load 4 src pixels (1A)
- ldr r8, [r2, #0x0] ; load 4 ref pixels (1A)
- ldr r7, [r0, #0x4] ; load 4 src pixels (1A)
- ldr r9, [r2, #0x4] ; load 4 ref pixels (1A)
- ldr r10, [r0, #0x8] ; load 4 src pixels (1B)
- ldr r11, [r0, #0xC] ; load 4 src pixels (1B)
-
- usada8 r4, r8, r6, r4 ; calculate sad for 4 pixels
- usad8 r8, r7, r9 ; calculate sad for 4 pixels
-
- ldr r12, [r2, #0x8] ; load 4 ref pixels (1B)
- ldr lr, [r2, #0xC] ; load 4 ref pixels (1B)
-
- add r0, r0, r1 ; set src pointer to next row
- add r2, r2, r3 ; set dst pointer to next row
-
- pld [r0, r1, lsl #1]
- pld [r2, r3, lsl #1]
-
- usada8 r4, r10, r12, r4 ; calculate sad for 4 pixels
- usada8 r8, r11, lr, r8 ; calculate sad for 4 pixels
-
- ldr r6, [r0, #0x0] ; load 4 src pixels (2A)
- ldr r7, [r0, #0x4] ; load 4 src pixels (2A)
- add r4, r4, r8 ; add partial sad values
-
- ; 2nd row
- ldr r8, [r2, #0x0] ; load 4 ref pixels (2A)
- ldr r9, [r2, #0x4] ; load 4 ref pixels (2A)
- ldr r10, [r0, #0x8] ; load 4 src pixels (2B)
- ldr r11, [r0, #0xC] ; load 4 src pixels (2B)
-
- usada8 r4, r6, r8, r4 ; calculate sad for 4 pixels
- usad8 r8, r7, r9 ; calculate sad for 4 pixels
-
- ldr r12, [r2, #0x8] ; load 4 ref pixels (2B)
- ldr lr, [r2, #0xC] ; load 4 ref pixels (2B)
-
- add r0, r0, r1 ; set src pointer to next row
- add r2, r2, r3 ; set dst pointer to next row
-
- usada8 r4, r10, r12, r4 ; calculate sad for 4 pixels
- usada8 r8, r11, lr, r8 ; calculate sad for 4 pixels
-
- pld [r0, r1, lsl #1]
- pld [r2, r3, lsl #1]
-
- subs r5, r5, #1 ; decrement loop counter
- add r4, r4, r8 ; add partial sad values
-
- bne loop
-
- mov r0, r4 ; return sad
- ldmfd sp!, {r4-r12, pc}
-
- ENDP
-
- END
-
diff --git a/third_party/aom/aom_dsp/arm/subpel_variance_media.c b/third_party/aom/aom_dsp/arm/subpel_variance_media.c
deleted file mode 100644
index 46ec028d37..0000000000
--- a/third_party/aom/aom_dsp/arm/subpel_variance_media.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "./aom_config.h"
-#include "./aom_dsp_rtcd.h"
-#include "aom/aom_integer.h"
-#include "aom_ports/mem.h"
-
-#if HAVE_MEDIA
-static const int16_t bilinear_filters_media[8][2] = { { 128, 0 }, { 112, 16 },
- { 96, 32 }, { 80, 48 },
- { 64, 64 }, { 48, 80 },
- { 32, 96 }, { 16, 112 } };
-
-extern void aom_filter_block2d_bil_first_pass_media(
- const uint8_t *src_ptr, uint16_t *dst_ptr, uint32_t src_pitch,
- uint32_t height, uint32_t width, const int16_t *filter);
-
-extern void aom_filter_block2d_bil_second_pass_media(
- const uint16_t *src_ptr, uint8_t *dst_ptr, int32_t src_pitch,
- uint32_t height, uint32_t width, const int16_t *filter);
-
-unsigned int aom_sub_pixel_variance8x8_media(
- const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
- const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
- uint16_t first_pass[10 * 8];
- uint8_t second_pass[8 * 8];
- const int16_t *HFilter, *VFilter;
-
- HFilter = bilinear_filters_media[xoffset];
- VFilter = bilinear_filters_media[yoffset];
-
- aom_filter_block2d_bil_first_pass_media(src_ptr, first_pass,
- src_pixels_per_line, 9, 8, HFilter);
- aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8,
- VFilter);
-
- return aom_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line,
- sse);
-}
-
-unsigned int aom_sub_pixel_variance16x16_media(
- const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset,
- const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) {
- uint16_t first_pass[36 * 16];
- uint8_t second_pass[20 * 16];
- const int16_t *HFilter, *VFilter;
- unsigned int var;
-
- if (xoffset == 4 && yoffset == 0) {
- var = aom_variance_halfpixvar16x16_h_media(
- src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
- } else if (xoffset == 0 && yoffset == 4) {
- var = aom_variance_halfpixvar16x16_v_media(
- src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
- } else if (xoffset == 4 && yoffset == 4) {
- var = aom_variance_halfpixvar16x16_hv_media(
- src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
- } else {
- HFilter = bilinear_filters_media[xoffset];
- VFilter = bilinear_filters_media[yoffset];
-
- aom_filter_block2d_bil_first_pass_media(
- src_ptr, first_pass, src_pixels_per_line, 17, 16, HFilter);
- aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16,
- 16, VFilter);
-
- var = aom_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line,
- sse);
- }
- return var;
-}
-#endif // HAVE_MEDIA
diff --git a/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm b/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
deleted file mode 100644
index 1e5c9178e6..0000000000
--- a/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm
+++ /dev/null
@@ -1,185 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
- EXPORT |aom_variance_halfpixvar16x16_h_media|
-
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0 unsigned char *src_ptr
-; r1 int source_stride
-; r2 unsigned char *ref_ptr
-; r3 int recon_stride
-; stack unsigned int *sse
-|aom_variance_halfpixvar16x16_h_media| PROC
-
- stmfd sp!, {r4-r12, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
-
- mov r8, #0 ; initialize sum = 0
- ldr r10, c80808080
- mov r11, #0 ; initialize sse = 0
- mov r12, #16 ; set loop counter to 16 (=block height)
- mov lr, #0 ; constant zero
-loop
- ; 1st 4 pixels
- ldr r4, [r0, #0] ; load 4 src pixels
- ldr r6, [r0, #1] ; load 4 src pixels with 1 byte offset
- ldr r5, [r2, #0] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- usub8 r6, r4, r5 ; calculate difference
- pld [r0, r1, lsl #1]
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- pld [r2, r3, lsl #1]
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
- ; calculate total sum
- adds r8, r8, r4 ; add positive differences to sum
- subs r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 2nd 4 pixels
- ldr r4, [r0, #4] ; load 4 src pixels
- ldr r6, [r0, #5] ; load 4 src pixels with 1 byte offset
- ldr r5, [r2, #4] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 3rd 4 pixels
- ldr r4, [r0, #8] ; load 4 src pixels
- ldr r6, [r0, #9] ; load 4 src pixels with 1 byte offset
- ldr r5, [r2, #8] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 4th 4 pixels
- ldr r4, [r0, #12] ; load 4 src pixels
- ldr r6, [r0, #13] ; load 4 src pixels with 1 byte offset
- ldr r5, [r2, #12] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- add r0, r0, r1 ; set src_ptr to next row
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- add r2, r2, r3 ; set dst_ptr to next row
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- subs r12, r12, #1
-
- bne loop
-
- ; return stuff
- ldr r6, [sp, #40] ; get address of sse
- mul r0, r8, r8 ; sum * sum
- str r11, [r6] ; store sse
- sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8))
-
- ldmfd sp!, {r4-r12, pc}
-
- ENDP
-
-c80808080
- DCD 0x80808080
-
- END
-
diff --git a/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm b/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
deleted file mode 100644
index 9e0af830ee..0000000000
--- a/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm
+++ /dev/null
@@ -1,225 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
- EXPORT |aom_variance_halfpixvar16x16_hv_media|
-
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0 unsigned char *src_ptr
-; r1 int source_stride
-; r2 unsigned char *ref_ptr
-; r3 int recon_stride
-; stack unsigned int *sse
-|aom_variance_halfpixvar16x16_hv_media| PROC
-
- stmfd sp!, {r4-r12, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
-
- mov r8, #0 ; initialize sum = 0
- ldr r10, c80808080
- mov r11, #0 ; initialize sse = 0
- mov r12, #16 ; set loop counter to 16 (=block height)
- mov lr, #0 ; constant zero
-loop
- add r9, r0, r1 ; pointer to pixels on the next row
- ; 1st 4 pixels
- ldr r4, [r0, #0] ; load source pixels a, row N
- ldr r6, [r0, #1] ; load source pixels b, row N
- ldr r5, [r9, #0] ; load source pixels c, row N+1
- ldr r7, [r9, #1] ; load source pixels d, row N+1
-
- ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
- ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1
- mvn r7, r7
- uhsub8 r5, r5, r7
- eor r5, r5, r10
- ; z = (x + y + 1) >> 1, interpolate half pixel values vertically
- mvn r5, r5
- uhsub8 r4, r4, r5
- ldr r5, [r2, #0] ; load 4 ref pixels
- eor r4, r4, r10
-
- usub8 r6, r4, r5 ; calculate difference
- pld [r0, r1, lsl #1]
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- pld [r2, r3, lsl #1]
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
- ; calculate total sum
- adds r8, r8, r4 ; add positive differences to sum
- subs r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 2nd 4 pixels
- ldr r4, [r0, #4] ; load source pixels a, row N
- ldr r6, [r0, #5] ; load source pixels b, row N
- ldr r5, [r9, #4] ; load source pixels c, row N+1
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- ldr r7, [r9, #5] ; load source pixels d, row N+1
-
- ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
- ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1
- mvn r7, r7
- uhsub8 r5, r5, r7
- eor r5, r5, r10
- ; z = (x + y + 1) >> 1, interpolate half pixel values vertically
- mvn r5, r5
- uhsub8 r4, r4, r5
- ldr r5, [r2, #4] ; load 4 ref pixels
- eor r4, r4, r10
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 3rd 4 pixels
- ldr r4, [r0, #8] ; load source pixels a, row N
- ldr r6, [r0, #9] ; load source pixels b, row N
- ldr r5, [r9, #8] ; load source pixels c, row N+1
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- ldr r7, [r9, #9] ; load source pixels d, row N+1
-
- ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
- ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1
- mvn r7, r7
- uhsub8 r5, r5, r7
- eor r5, r5, r10
- ; z = (x + y + 1) >> 1, interpolate half pixel values vertically
- mvn r5, r5
- uhsub8 r4, r4, r5
- ldr r5, [r2, #8] ; load 4 ref pixels
- eor r4, r4, r10
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 4th 4 pixels
- ldr r4, [r0, #12] ; load source pixels a, row N
- ldr r6, [r0, #13] ; load source pixels b, row N
- ldr r5, [r9, #12] ; load source pixels c, row N+1
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
- ldr r7, [r9, #13] ; load source pixels d, row N+1
-
- ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
- ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1
- mvn r7, r7
- uhsub8 r5, r5, r7
- eor r5, r5, r10
- ; z = (x + y + 1) >> 1, interpolate half pixel values vertically
- mvn r5, r5
- uhsub8 r4, r4, r5
- ldr r5, [r2, #12] ; load 4 ref pixels
- eor r4, r4, r10
-
- usub8 r6, r4, r5 ; calculate difference
- add r0, r0, r1 ; set src_ptr to next row
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- add r2, r2, r3 ; set dst_ptr to next row
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
- subs r12, r12, #1
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- bne loop
-
- ; return stuff
- ldr r6, [sp, #40] ; get address of sse
- mul r0, r8, r8 ; sum * sum
- str r11, [r6] ; store sse
- sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8))
-
- ldmfd sp!, {r4-r12, pc}
-
- ENDP
-
-c80808080
- DCD 0x80808080
-
- END
diff --git a/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm b/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
deleted file mode 100644
index 545b681794..0000000000
--- a/third_party/aom/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm
+++ /dev/null
@@ -1,187 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
- EXPORT |aom_variance_halfpixvar16x16_v_media|
-
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0 unsigned char *src_ptr
-; r1 int source_stride
-; r2 unsigned char *ref_ptr
-; r3 int recon_stride
-; stack unsigned int *sse
-|aom_variance_halfpixvar16x16_v_media| PROC
-
- stmfd sp!, {r4-r12, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
-
- mov r8, #0 ; initialize sum = 0
- ldr r10, c80808080
- mov r11, #0 ; initialize sse = 0
- mov r12, #16 ; set loop counter to 16 (=block height)
- mov lr, #0 ; constant zero
-loop
- add r9, r0, r1 ; set src pointer to next row
- ; 1st 4 pixels
- ldr r4, [r0, #0] ; load 4 src pixels
- ldr r6, [r9, #0] ; load 4 src pixels from next row
- ldr r5, [r2, #0] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- usub8 r6, r4, r5 ; calculate difference
- pld [r0, r1, lsl #1]
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- pld [r2, r3, lsl #1]
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
- ; calculate total sum
- adds r8, r8, r4 ; add positive differences to sum
- subs r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 2nd 4 pixels
- ldr r4, [r0, #4] ; load 4 src pixels
- ldr r6, [r9, #4] ; load 4 src pixels from next row
- ldr r5, [r2, #4] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 3rd 4 pixels
- ldr r4, [r0, #8] ; load 4 src pixels
- ldr r6, [r9, #8] ; load 4 src pixels from next row
- ldr r5, [r2, #8] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 4th 4 pixels
- ldr r4, [r0, #12] ; load 4 src pixels
- ldr r6, [r9, #12] ; load 4 src pixels from next row
- ldr r5, [r2, #12] ; load 4 ref pixels
-
- ; bilinear interpolation
- mvn r6, r6
- uhsub8 r4, r4, r6
- eor r4, r4, r10
-
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- add r0, r0, r1 ; set src_ptr to next row
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r6, r5, r4 ; calculate difference with reversed operands
- add r2, r2, r3 ; set dst_ptr to next row
- sel r6, r6, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r7, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
- smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2)
-
-
- subs r12, r12, #1
-
- bne loop
-
- ; return stuff
- ldr r6, [sp, #40] ; get address of sse
- mul r0, r8, r8 ; sum * sum
- str r11, [r6] ; store sse
- sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8))
-
- ldmfd sp!, {r4-r12, pc}
-
- ENDP
-
-c80808080
- DCD 0x80808080
-
- END
-
diff --git a/third_party/aom/aom_dsp/arm/variance_media.asm b/third_party/aom/aom_dsp/arm/variance_media.asm
deleted file mode 100644
index fdc311a81c..0000000000
--- a/third_party/aom/aom_dsp/arm/variance_media.asm
+++ /dev/null
@@ -1,361 +0,0 @@
-;
-; Copyright (c) 2016, Alliance for Open Media. All rights reserved
-;
-; This source code is subject to the terms of the BSD 2 Clause License and
-; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
-; was not distributed with this source code in the LICENSE file, you can
-; obtain it at www.aomedia.org/license/software. If the Alliance for Open
-; Media Patent License 1.0 was not distributed with this source code in the
-; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
-;
-
-;
-
-
- EXPORT |aom_variance16x16_media|
- EXPORT |aom_variance8x8_media|
- EXPORT |aom_mse16x16_media|
-
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0 unsigned char *src_ptr
-; r1 int source_stride
-; r2 unsigned char *ref_ptr
-; r3 int recon_stride
-; stack unsigned int *sse
-|aom_variance16x16_media| PROC
-
- stmfd sp!, {r4-r12, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
-
- mov r8, #0 ; initialize sum = 0
- mov r11, #0 ; initialize sse = 0
- mov r12, #16 ; set loop counter to 16 (=block height)
-
-loop16x16
- ; 1st 4 pixels
- ldr r4, [r0, #0] ; load 4 src pixels
- ldr r5, [r2, #0] ; load 4 ref pixels
-
- mov lr, #0 ; constant zero
-
- usub8 r6, r4, r5 ; calculate difference
- pld [r0, r1, lsl #1]
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r9, r5, r4 ; calculate difference with reversed operands
- pld [r2, r3, lsl #1]
- sel r6, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
- ; calculate total sum
- adds r8, r8, r4 ; add positive differences to sum
- subs r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 2nd 4 pixels
- ldr r4, [r0, #4] ; load 4 src pixels
- ldr r5, [r2, #4] ; load 4 ref pixels
- smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r9, r5, r4 ; calculate difference with reversed operands
- sel r6, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 3rd 4 pixels
- ldr r4, [r0, #8] ; load 4 src pixels
- ldr r5, [r2, #8] ; load 4 ref pixels
- smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r9, r5, r4 ; calculate difference with reversed operands
- sel r6, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
-
- ; 4th 4 pixels
- ldr r4, [r0, #12] ; load 4 src pixels
- ldr r5, [r2, #12] ; load 4 ref pixels
- smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
-
- usub8 r6, r4, r5 ; calculate difference
- add r0, r0, r1 ; set src_ptr to next row
- sel r7, r6, lr ; select bytes with positive difference
- usub8 r9, r5, r4 ; calculate difference with reversed operands
- add r2, r2, r3 ; set dst_ptr to next row
- sel r6, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r4, r7, lr ; calculate sum of positive differences
- usad8 r5, r6, lr ; calculate sum of negative differences
- orr r6, r6, r7 ; differences of all 4 pixels
-
- ; calculate total sum
- add r8, r8, r4 ; add positive differences to sum
- sub r8, r8, r5 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r5, r6 ; byte (two pixels) to halfwords
- uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
- smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
- smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
-
-
- subs r12, r12, #1
-
- bne loop16x16
-
- ; return stuff
- ldr r6, [sp, #40] ; get address of sse
- mul r0, r8, r8 ; sum * sum
- str r11, [r6] ; store sse
- sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8))
-
- ldmfd sp!, {r4-r12, pc}
-
- ENDP
-
-; r0 unsigned char *src_ptr
-; r1 int source_stride
-; r2 unsigned char *ref_ptr
-; r3 int recon_stride
-; stack unsigned int *sse
-|aom_variance8x8_media| PROC
-
- push {r4-r10, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
-
- mov r12, #8 ; set loop counter to 8 (=block height)
- mov r4, #0 ; initialize sum = 0
- mov r5, #0 ; initialize sse = 0
-
-loop8x8
- ; 1st 4 pixels
- ldr r6, [r0, #0x0] ; load 4 src pixels
- ldr r7, [r2, #0x0] ; load 4 ref pixels
-
- mov lr, #0 ; constant zero
-
- usub8 r8, r6, r7 ; calculate difference
- pld [r0, r1, lsl #1]
- sel r10, r8, lr ; select bytes with positive difference
- usub8 r9, r7, r6 ; calculate difference with reversed operands
- pld [r2, r3, lsl #1]
- sel r8, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r6, r10, lr ; calculate sum of positive differences
- usad8 r7, r8, lr ; calculate sum of negative differences
- orr r8, r8, r10 ; differences of all 4 pixels
- ; calculate total sum
- add r4, r4, r6 ; add positive differences to sum
- sub r4, r4, r7 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r7, r8 ; byte (two pixels) to halfwords
- uxtb16 r10, r8, ror #8 ; another two pixels to halfwords
- smlad r5, r7, r7, r5 ; dual signed multiply, add and accumulate (1)
-
- ; 2nd 4 pixels
- ldr r6, [r0, #0x4] ; load 4 src pixels
- ldr r7, [r2, #0x4] ; load 4 ref pixels
- smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2)
-
- usub8 r8, r6, r7 ; calculate difference
- add r0, r0, r1 ; set src_ptr to next row
- sel r10, r8, lr ; select bytes with positive difference
- usub8 r9, r7, r6 ; calculate difference with reversed operands
- add r2, r2, r3 ; set dst_ptr to next row
- sel r8, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r6, r10, lr ; calculate sum of positive differences
- usad8 r7, r8, lr ; calculate sum of negative differences
- orr r8, r8, r10 ; differences of all 4 pixels
-
- ; calculate total sum
- add r4, r4, r6 ; add positive differences to sum
- sub r4, r4, r7 ; subtract negative differences from sum
-
- ; calculate sse
- uxtb16 r7, r8 ; byte (two pixels) to halfwords
- uxtb16 r10, r8, ror #8 ; another two pixels to halfwords
- smlad r5, r7, r7, r5 ; dual signed multiply, add and accumulate (1)
- subs r12, r12, #1 ; next row
- smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2)
-
- bne loop8x8
-
- ; return stuff
- ldr r8, [sp, #32] ; get address of sse
- mul r1, r4, r4 ; sum * sum
- str r5, [r8] ; store sse
- sub r0, r5, r1, ASR #6 ; return (sse - ((sum * sum) >> 6))
-
- pop {r4-r10, pc}
-
- ENDP
-
-; r0 unsigned char *src_ptr
-; r1 int source_stride
-; r2 unsigned char *ref_ptr
-; r3 int recon_stride
-; stack unsigned int *sse
-;
-;note: Based on aom_variance16x16_media. In this function, sum is never used.
-; So, we can remove this part of calculation.
-
-|aom_mse16x16_media| PROC
-
- push {r4-r9, lr}
-
- pld [r0, r1, lsl #0]
- pld [r2, r3, lsl #0]
-
- mov r12, #16 ; set loop counter to 16 (=block height)
- mov r4, #0 ; initialize sse = 0
-
-loopmse
- ; 1st 4 pixels
- ldr r5, [r0, #0x0] ; load 4 src pixels
- ldr r6, [r2, #0x0] ; load 4 ref pixels
-
- mov lr, #0 ; constant zero
-
- usub8 r8, r5, r6 ; calculate difference
- pld [r0, r1, lsl #1]
- sel r7, r8, lr ; select bytes with positive difference
- usub8 r9, r6, r5 ; calculate difference with reversed operands
- pld [r2, r3, lsl #1]
- sel r8, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r5, r7, lr ; calculate sum of positive differences
- usad8 r6, r8, lr ; calculate sum of negative differences
- orr r8, r8, r7 ; differences of all 4 pixels
-
- ldr r5, [r0, #0x4] ; load 4 src pixels
-
- ; calculate sse
- uxtb16 r6, r8 ; byte (two pixels) to halfwords
- uxtb16 r7, r8, ror #8 ; another two pixels to halfwords
- smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1)
-
- ; 2nd 4 pixels
- ldr r6, [r2, #0x4] ; load 4 ref pixels
- smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2)
-
- usub8 r8, r5, r6 ; calculate difference
- sel r7, r8, lr ; select bytes with positive difference
- usub8 r9, r6, r5 ; calculate difference with reversed operands
- sel r8, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r5, r7, lr ; calculate sum of positive differences
- usad8 r6, r8, lr ; calculate sum of negative differences
- orr r8, r8, r7 ; differences of all 4 pixels
- ldr r5, [r0, #0x8] ; load 4 src pixels
- ; calculate sse
- uxtb16 r6, r8 ; byte (two pixels) to halfwords
- uxtb16 r7, r8, ror #8 ; another two pixels to halfwords
- smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1)
-
- ; 3rd 4 pixels
- ldr r6, [r2, #0x8] ; load 4 ref pixels
- smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2)
-
- usub8 r8, r5, r6 ; calculate difference
- sel r7, r8, lr ; select bytes with positive difference
- usub8 r9, r6, r5 ; calculate difference with reversed operands
- sel r8, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r5, r7, lr ; calculate sum of positive differences
- usad8 r6, r8, lr ; calculate sum of negative differences
- orr r8, r8, r7 ; differences of all 4 pixels
-
- ldr r5, [r0, #0xc] ; load 4 src pixels
-
- ; calculate sse
- uxtb16 r6, r8 ; byte (two pixels) to halfwords
- uxtb16 r7, r8, ror #8 ; another two pixels to halfwords
- smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1)
-
- ; 4th 4 pixels
- ldr r6, [r2, #0xc] ; load 4 ref pixels
- smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2)
-
- usub8 r8, r5, r6 ; calculate difference
- add r0, r0, r1 ; set src_ptr to next row
- sel r7, r8, lr ; select bytes with positive difference
- usub8 r9, r6, r5 ; calculate difference with reversed operands
- add r2, r2, r3 ; set dst_ptr to next row
- sel r8, r9, lr ; select bytes with negative difference
-
- ; calculate partial sums
- usad8 r5, r7, lr ; calculate sum of positive differences
- usad8 r6, r8, lr ; calculate sum of negative differences
- orr r8, r8, r7 ; differences of all 4 pixels
-
- subs r12, r12, #1 ; next row
-
- ; calculate sse
- uxtb16 r6, r8 ; byte (two pixels) to halfwords
- uxtb16 r7, r8, ror #8 ; another two pixels to halfwords
- smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1)
- smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2)
-
- bne loopmse
-
- ; return stuff
- ldr r1, [sp, #28] ; get address of sse
- mov r0, r4 ; return sse
- str r4, [r1] ; store sse
-
- pop {r4-r9, pc}
-
- ENDP
-
- END
diff --git a/third_party/aom/aom_dsp/avg.c b/third_party/aom/aom_dsp/avg.c
index eb60597052..f732224fd9 100644
--- a/third_party/aom/aom_dsp/avg.c
+++ b/third_party/aom/aom_dsp/avg.c
@@ -13,26 +13,6 @@
#include "./aom_dsp_rtcd.h"
#include "aom_ports/mem.h"
-unsigned int aom_avg_8x8_c(const uint8_t *src, int stride) {
- int i, j;
- int sum = 0;
- for (i = 0; i < 8; ++i, src += stride)
- for (j = 0; j < 8; sum += src[j], ++j) {
- }
-
- return ROUND_POWER_OF_TWO(sum, 6);
-}
-
-unsigned int aom_avg_4x4_c(const uint8_t *src, int stride) {
- int i, j;
- int sum = 0;
- for (i = 0; i < 4; ++i, src += stride)
- for (j = 0; j < 4; sum += src[j], ++j) {
- }
-
- return ROUND_POWER_OF_TWO(sum, 4);
-}
-
// src_diff: first pass, 9 bit, dynamic range [-255, 255]
// second pass, 12 bit, dynamic range [-2040, 2040]
static void hadamard_col8(const int16_t *src_diff, int src_stride,
@@ -192,28 +172,6 @@ void aom_minmax_8x8_c(const uint8_t *src, int src_stride, const uint8_t *ref,
}
#if CONFIG_HIGHBITDEPTH
-unsigned int aom_highbd_avg_8x8_c(const uint8_t *src, int stride) {
- int i, j;
- int sum = 0;
- const uint16_t *s = CONVERT_TO_SHORTPTR(src);
- for (i = 0; i < 8; ++i, s += stride)
- for (j = 0; j < 8; sum += s[j], ++j) {
- }
-
- return ROUND_POWER_OF_TWO(sum, 6);
-}
-
-unsigned int aom_highbd_avg_4x4_c(const uint8_t *src, int stride) {
- int i, j;
- int sum = 0;
- const uint16_t *s = CONVERT_TO_SHORTPTR(src);
- for (i = 0; i < 4; ++i, s += stride)
- for (j = 0; j < 4; sum += s[j], ++j) {
- }
-
- return ROUND_POWER_OF_TWO(sum, 4);
-}
-
void aom_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
int dp, int *min, int *max) {
int i, j;
diff --git a/third_party/aom/aom_dsp/binary_codes_reader.c b/third_party/aom/aom_dsp/binary_codes_reader.c
index 96c4cb436d..bf304dadaa 100644
--- a/third_party/aom/aom_dsp/binary_codes_reader.c
+++ b/third_party/aom/aom_dsp/binary_codes_reader.c
@@ -9,7 +9,7 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#include "aom_dsp/bitreader.h"
+#include "aom_dsp/binary_codes_reader.h"
#include "av1/common/common.h"
@@ -33,26 +33,28 @@ static uint16_t inv_recenter_finite_nonneg(uint16_t n, uint16_t r, uint16_t v) {
}
}
-int16_t aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits) {
- if (aom_read_bit(r, NULL)) {
- int s = aom_read_bit(r, NULL);
- int16_t x = aom_read_literal(r, mag_bits, NULL) + 1;
+int16_t aom_read_primitive_symmetric_(aom_reader *r,
+ unsigned int mag_bits ACCT_STR_PARAM) {
+ if (aom_read_bit(r, ACCT_STR_NAME)) {
+ int s = aom_read_bit(r, ACCT_STR_NAME);
+ int16_t x = aom_read_literal(r, mag_bits, ACCT_STR_NAME) + 1;
return (s > 0 ? -x : x);
} else {
return 0;
}
}
-uint16_t aom_read_primitive_quniform(aom_reader *r, uint16_t n) {
+uint16_t aom_read_primitive_quniform_(aom_reader *r,
+ uint16_t n ACCT_STR_PARAM) {
if (n <= 1) return 0;
const int l = get_msb(n - 1) + 1;
const int m = (1 << l) - n;
- const int v = aom_read_literal(r, l - 1, NULL);
- return v < m ? v : (v << 1) - m + aom_read_bit(r, NULL);
+ const int v = aom_read_literal(r, l - 1, ACCT_STR_NAME);
+ return v < m ? v : (v << 1) - m + aom_read_bit(r, ACCT_STR_NAME);
}
-uint16_t aom_read_primitive_refbilevel(aom_reader *r, uint16_t n, uint16_t p,
- uint16_t ref) {
+uint16_t aom_read_primitive_refbilevel_(aom_reader *r, uint16_t n, uint16_t p,
+ uint16_t ref ACCT_STR_PARAM) {
if (n <= 1) return 0;
assert(p > 0 && p <= n);
assert(ref < n);
@@ -64,10 +66,10 @@ uint16_t aom_read_primitive_refbilevel(aom_reader *r, uint16_t n, uint16_t p,
lolimit = n - p;
}
int v;
- if (aom_read_bit(r, NULL)) {
- v = aom_read_primitive_quniform(r, p) + lolimit;
+ if (aom_read_bit(r, ACCT_STR_NAME)) {
+ v = aom_read_primitive_quniform(r, p, ACCT_STR_NAME) + lolimit;
} else {
- v = aom_read_primitive_quniform(r, n - p);
+ v = aom_read_primitive_quniform(r, n - p, ACCT_STR_NAME);
if (v >= lolimit) v += p;
}
return v;
@@ -75,7 +77,8 @@ uint16_t aom_read_primitive_refbilevel(aom_reader *r, uint16_t n, uint16_t p,
// Decode finite subexponential code that for a symbol v in [0, n-1] with
// parameter k
-uint16_t aom_read_primitive_subexpfin(aom_reader *r, uint16_t n, uint16_t k) {
+uint16_t aom_read_primitive_subexpfin_(aom_reader *r, uint16_t n,
+ uint16_t k ACCT_STR_PARAM) {
int i = 0;
int mk = 0;
uint16_t v;
@@ -83,14 +86,14 @@ uint16_t aom_read_primitive_subexpfin(aom_reader *r, uint16_t n, uint16_t k) {
int b = (i ? k + i - 1 : k);
int a = (1 << b);
if (n <= mk + 3 * a) {
- v = aom_read_primitive_quniform(r, n - mk) + mk;
+ v = aom_read_primitive_quniform(r, n - mk, ACCT_STR_NAME) + mk;
break;
} else {
- if (aom_read_bit(r, NULL)) {
+ if (aom_read_bit(r, ACCT_STR_NAME)) {
i = i + 1;
mk += a;
} else {
- v = aom_read_literal(r, b, NULL) + mk;
+ v = aom_read_literal(r, b, ACCT_STR_NAME) + mk;
break;
}
}
@@ -101,17 +104,19 @@ uint16_t aom_read_primitive_subexpfin(aom_reader *r, uint16_t n, uint16_t k) {
// Decode finite subexponential code that for a symbol v in [0, n-1] with
// parameter k
// based on a reference ref also in [0, n-1].
-uint16_t aom_read_primitive_refsubexpfin(aom_reader *r, uint16_t n, uint16_t k,
- uint16_t ref) {
- return inv_recenter_finite_nonneg(n, ref,
- aom_read_primitive_subexpfin(r, n, k));
+uint16_t aom_read_primitive_refsubexpfin_(aom_reader *r, uint16_t n, uint16_t k,
+ uint16_t ref ACCT_STR_PARAM) {
+ return inv_recenter_finite_nonneg(
+ n, ref, aom_read_primitive_subexpfin(r, n, k, ACCT_STR_NAME));
}
// Decode finite subexponential code that for a symbol v in [-(n-1), n-1] with
// parameter k based on a reference ref also in [-(n-1), n-1].
-int16_t aom_read_signed_primitive_refsubexpfin(aom_reader *r, uint16_t n,
- uint16_t k, int16_t ref) {
+int16_t aom_read_signed_primitive_refsubexpfin_(aom_reader *r, uint16_t n,
+ uint16_t k,
+ int16_t ref ACCT_STR_PARAM) {
ref += n - 1;
const uint16_t scaled_n = (n << 1) - 1;
- return aom_read_primitive_refsubexpfin(r, scaled_n, k, ref) - n + 1;
+ return aom_read_primitive_refsubexpfin(r, scaled_n, k, ref, ACCT_STR_NAME) -
+ n + 1;
}
diff --git a/third_party/aom/aom_dsp/binary_codes_reader.h b/third_party/aom/aom_dsp/binary_codes_reader.h
index 738d91da83..1540cf46b3 100644
--- a/third_party/aom/aom_dsp/binary_codes_reader.h
+++ b/third_party/aom/aom_dsp/binary_codes_reader.h
@@ -21,16 +21,32 @@ extern "C" {
#include "aom/aom_integer.h"
#include "aom_dsp/bitreader.h"
-int16_t aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits);
+#define aom_read_primitive_symmetric(r, n, ACCT_STR_NAME) \
+ aom_read_primitive_symmetric_(r, n ACCT_STR_ARG(ACCT_STR_NAME))
+#define aom_read_primitive_quniform(r, n, ACCT_STR_NAME) \
+ aom_read_primitive_quniform_(r, n ACCT_STR_ARG(ACCT_STR_NAME))
+#define aom_read_primitive_refbilevel(r, n, p, ref, ACCT_STR_NAME) \
+ aom_read_primitive_refbilevel_(r, n, p, ref ACCT_STR_ARG(ACCT_STR_NAME))
+#define aom_read_primitive_subexpfin(r, n, k, ACCT_STR_NAME) \
+ aom_read_primitive_subexpfin_(r, n, k ACCT_STR_ARG(ACCT_STR_NAME))
+#define aom_read_primitive_refsubexpfin(r, n, k, ref, ACCT_STR_NAME) \
+ aom_read_primitive_refsubexpfin_(r, n, k, ref ACCT_STR_ARG(ACCT_STR_NAME))
+#define aom_read_signed_primitive_refsubexpfin(r, n, k, ref, ACCT_STR_NAME) \
+ aom_read_signed_primitive_refsubexpfin_(r, n, k, \
+ ref ACCT_STR_ARG(ACCT_STR_NAME))
-uint16_t aom_read_primitive_quniform(aom_reader *r, uint16_t n);
-uint16_t aom_read_primitive_refbilevel(aom_reader *r, uint16_t n, uint16_t p,
- uint16_t ref);
-uint16_t aom_read_primitive_subexpfin(aom_reader *r, uint16_t n, uint16_t k);
-uint16_t aom_read_primitive_refsubexpfin(aom_reader *r, uint16_t n, uint16_t k,
- uint16_t ref);
-int16_t aom_read_signed_primitive_refsubexpfin(aom_reader *r, uint16_t n,
- uint16_t k, int16_t ref);
+int16_t aom_read_primitive_symmetric_(aom_reader *r,
+ unsigned int mag_bits ACCT_STR_PARAM);
+uint16_t aom_read_primitive_quniform_(aom_reader *r, uint16_t n ACCT_STR_PARAM);
+uint16_t aom_read_primitive_refbilevel_(aom_reader *r, uint16_t n, uint16_t p,
+ uint16_t ref ACCT_STR_PARAM);
+uint16_t aom_read_primitive_subexpfin_(aom_reader *r, uint16_t n,
+ uint16_t k ACCT_STR_PARAM);
+uint16_t aom_read_primitive_refsubexpfin_(aom_reader *r, uint16_t n, uint16_t k,
+ uint16_t ref ACCT_STR_PARAM);
+int16_t aom_read_signed_primitive_refsubexpfin_(aom_reader *r, uint16_t n,
+ uint16_t k,
+ int16_t ref ACCT_STR_PARAM);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/third_party/aom/aom_dsp/bitreader.h b/third_party/aom/aom_dsp/bitreader.h
index 9cd34dd483..5bad70cb3b 100644
--- a/third_party/aom/aom_dsp/bitreader.h
+++ b/third_party/aom/aom_dsp/bitreader.h
@@ -16,18 +16,13 @@
#include <limits.h>
#include "./aom_config.h"
-#if CONFIG_EC_ADAPT && !CONFIG_EC_MULTISYMBOL
-#error "CONFIG_EC_ADAPT is enabled without enabling CONFIG_EC_MULTISYMBOL."
-#endif
#include "aom/aomdx.h"
#include "aom/aom_integer.h"
#if CONFIG_ANS
#include "aom_dsp/ansreader.h"
-#elif CONFIG_DAALA_EC
-#include "aom_dsp/daalaboolreader.h"
#else
-#include "aom_dsp/dkboolreader.h"
+#include "aom_dsp/daalaboolreader.h"
#endif
#include "aom_dsp/prob.h"
#include "av1/common/odintrin.h"
@@ -61,26 +56,20 @@ extern "C" {
#if CONFIG_ANS
typedef struct AnsDecoder aom_reader;
-#elif CONFIG_DAALA_EC
-typedef struct daala_reader aom_reader;
#else
-typedef struct aom_dk_reader aom_reader;
+typedef struct daala_reader aom_reader;
#endif
static INLINE int aom_reader_init(aom_reader *r, const uint8_t *buffer,
size_t size, aom_decrypt_cb decrypt_cb,
void *decrypt_state) {
-#if CONFIG_ANS
(void)decrypt_cb;
(void)decrypt_state;
+#if CONFIG_ANS
if (size > INT_MAX) return 1;
return ans_read_init(r, buffer, (int)size);
-#elif CONFIG_DAALA_EC
- (void)decrypt_cb;
- (void)decrypt_state;
- return aom_daala_reader_init(r, buffer, (int)size);
#else
- return aom_dk_reader_init(r, buffer, size, decrypt_cb, decrypt_state);
+ return aom_daala_reader_init(r, buffer, (int)size);
#endif
}
@@ -89,20 +78,16 @@ static INLINE const uint8_t *aom_reader_find_end(aom_reader *r) {
(void)r;
assert(0 && "Use the raw buffer size with ANS");
return NULL;
-#elif CONFIG_DAALA_EC
- return aom_daala_reader_find_end(r);
#else
- return aom_dk_reader_find_end(r);
+ return aom_daala_reader_find_end(r);
#endif
}
static INLINE int aom_reader_has_error(aom_reader *r) {
#if CONFIG_ANS
return ans_reader_has_error(r);
-#elif CONFIG_DAALA_EC
- return aom_daala_reader_has_error(r);
#else
- return aom_dk_reader_has_error(r);
+ return aom_daala_reader_has_error(r);
#endif
}
@@ -112,10 +97,8 @@ static INLINE uint32_t aom_reader_tell(const aom_reader *r) {
(void)r;
assert(0 && "aom_reader_tell() is unimplemented for ANS");
return 0;
-#elif CONFIG_DAALA_EC
- return aom_daala_reader_tell(r);
#else
- return aom_dk_reader_tell(r);
+ return aom_daala_reader_tell(r);
#endif
}
@@ -125,10 +108,8 @@ static INLINE uint32_t aom_reader_tell_frac(const aom_reader *r) {
(void)r;
assert(0 && "aom_reader_tell_frac() is unimplemented for ANS");
return 0;
-#elif CONFIG_DAALA_EC
- return aom_daala_reader_tell_frac(r);
#else
- return aom_dk_reader_tell_frac(r);
+ return aom_daala_reader_tell_frac(r);
#endif
}
@@ -155,10 +136,8 @@ static INLINE int aom_read_(aom_reader *r, int prob ACCT_STR_PARAM) {
int ret;
#if CONFIG_ANS
ret = rabs_read(r, prob);
-#elif CONFIG_DAALA_EC
- ret = aom_daala_read(r, prob);
#else
- ret = aom_dk_read(r, prob);
+ ret = aom_daala_read(r, prob);
#endif
#if CONFIG_ACCOUNTING
if (ACCT_STR_NAME) aom_process_accounting(r, ACCT_STR_NAME);
@@ -171,7 +150,7 @@ static INLINE int aom_read_bit_(aom_reader *r ACCT_STR_PARAM) {
int ret;
#if CONFIG_ANS
ret = rabs_read_bit(r); // Non trivial optimization at half probability
-#elif CONFIG_DAALA_EC && CONFIG_RAWBITS
+#elif CONFIG_RAWBITS
// Note this uses raw bits and is not the same as aom_daala_read(r, 128);
// Calls to this function are omitted from raw symbol accounting.
ret = aom_daala_read_bit(r);
@@ -194,28 +173,14 @@ static INLINE int aom_read_literal_(aom_reader *r, int bits ACCT_STR_PARAM) {
return literal;
}
-static INLINE int aom_read_tree_as_bits(aom_reader *r,
- const aom_tree_index *tree,
- const aom_prob *probs) {
- aom_tree_index i = 0;
-
- while ((i = tree[i + aom_read(r, probs[i >> 1], NULL)]) > 0) continue;
- return -i;
-}
-
-#if CONFIG_EC_MULTISYMBOL
static INLINE int aom_read_cdf_(aom_reader *r, const aom_cdf_prob *cdf,
int nsymbs ACCT_STR_PARAM) {
int ret;
#if CONFIG_ANS
(void)nsymbs;
ret = rans_read(r, cdf);
-#elif CONFIG_DAALA_EC
- ret = daala_read_symbol(r, cdf, nsymbs);
#else
-#error \
- "CONFIG_EC_MULTISYMBOL is selected without a valid backing entropy " \
- "coder. Enable daala_ec or ans for a valid configuration."
+ ret = daala_read_symbol(r, cdf, nsymbs);
#endif
#if CONFIG_ACCOUNTING
@@ -253,16 +218,11 @@ static INLINE int aom_read_tree_as_cdf(aom_reader *r,
} while (i > 0);
return -i;
}
-#endif // CONFIG_EC_MULTISYMBOL
static INLINE int aom_read_tree_(aom_reader *r, const aom_tree_index *tree,
const aom_prob *probs ACCT_STR_PARAM) {
int ret;
-#if CONFIG_EC_MULTISYMBOL
ret = aom_read_tree_as_cdf(r, tree, probs);
-#else
- ret = aom_read_tree_as_bits(r, tree, probs);
-#endif
#if CONFIG_ACCOUNTING
if (ACCT_STR_NAME) aom_process_accounting(r, ACCT_STR_NAME);
#endif
diff --git a/third_party/aom/aom_dsp/bitreader_buffer.c b/third_party/aom/aom_dsp/bitreader_buffer.c
index 009682b4c8..e51b1cc3a3 100644
--- a/third_party/aom/aom_dsp/bitreader_buffer.c
+++ b/third_party/aom/aom_dsp/bitreader_buffer.c
@@ -24,7 +24,7 @@ int aom_rb_read_bit(struct aom_read_bit_buffer *rb) {
rb->bit_offset = off + 1;
return bit;
} else {
- rb->error_handler(rb->error_handler_data);
+ if (rb->error_handler) rb->error_handler(rb->error_handler_data);
return 0;
}
}
diff --git a/third_party/aom/aom_dsp/bitwriter.h b/third_party/aom/aom_dsp/bitwriter.h
index 6e3fac2607..588e47bf3b 100644
--- a/third_party/aom/aom_dsp/bitwriter.h
+++ b/third_party/aom/aom_dsp/bitwriter.h
@@ -14,16 +14,11 @@
#include <assert.h>
#include "./aom_config.h"
-#if CONFIG_EC_ADAPT && !CONFIG_EC_MULTISYMBOL
-#error "CONFIG_EC_ADAPT is enabled without enabling CONFIG_EC_MULTISYMBOL"
-#endif
#if CONFIG_ANS
#include "aom_dsp/buf_ans.h"
-#elif CONFIG_DAALA_EC
-#include "aom_dsp/daalaboolwriter.h"
#else
-#include "aom_dsp/dkboolwriter.h"
+#include "aom_dsp/daalaboolwriter.h"
#endif
#include "aom_dsp/prob.h"
@@ -38,10 +33,8 @@ extern "C" {
#if CONFIG_ANS
typedef struct BufAnsCoder aom_writer;
-#elif CONFIG_DAALA_EC
-typedef struct daala_writer aom_writer;
#else
-typedef struct aom_dk_writer aom_writer;
+typedef struct daala_writer aom_writer;
#endif
typedef struct TOKEN_STATS {
@@ -72,10 +65,8 @@ static INLINE void aom_start_encode(aom_writer *bc, uint8_t *buffer) {
(void)bc;
(void)buffer;
assert(0 && "buf_ans requires a more complicated startup procedure");
-#elif CONFIG_DAALA_EC
- aom_daala_start_encode(bc, buffer);
#else
- aom_dk_start_encode(bc, buffer);
+ aom_daala_start_encode(bc, buffer);
#endif
}
@@ -83,20 +74,16 @@ static INLINE void aom_stop_encode(aom_writer *bc) {
#if CONFIG_ANS
(void)bc;
assert(0 && "buf_ans requires a more complicated shutdown procedure");
-#elif CONFIG_DAALA_EC
- aom_daala_stop_encode(bc);
#else
- aom_dk_stop_encode(bc);
+ aom_daala_stop_encode(bc);
#endif
}
static INLINE void aom_write(aom_writer *br, int bit, int probability) {
#if CONFIG_ANS
buf_rabs_write(br, bit, probability);
-#elif CONFIG_DAALA_EC
- aom_daala_write(br, bit, probability);
#else
- aom_dk_write(br, bit, probability);
+ aom_daala_write(br, bit, probability);
#endif
}
@@ -113,7 +100,7 @@ static INLINE void aom_write_record(aom_writer *br, int bit, int probability,
static INLINE void aom_write_bit(aom_writer *w, int bit) {
#if CONFIG_ANS
buf_rabs_write_bit(w, bit);
-#elif CONFIG_DAALA_EC && CONFIG_RAWBITS
+#elif CONFIG_RAWBITS
// Note this uses raw bits and is not the same as aom_daala_write(r, 128);
aom_daala_write_bit(w, bit);
#else
@@ -137,28 +124,6 @@ static INLINE void aom_write_literal(aom_writer *w, int data, int bits) {
for (bit = bits - 1; bit >= 0; bit--) aom_write_bit(w, 1 & (data >> bit));
}
-static INLINE void aom_write_tree_as_bits(aom_writer *w,
- const aom_tree_index *tr,
- const aom_prob *probs, int bits,
- int len, aom_tree_index i) {
- do {
- const int bit = (bits >> --len) & 1;
- aom_write(w, bit, probs[i >> 1]);
- i = tr[i + bit];
- } while (len);
-}
-
-static INLINE void aom_write_tree_as_bits_record(
- aom_writer *w, const aom_tree_index *tr, const aom_prob *probs, int bits,
- int len, aom_tree_index i, TOKEN_STATS *token_stats) {
- do {
- const int bit = (bits >> --len) & 1;
- aom_write_record(w, bit, probs[i >> 1], token_stats);
- i = tr[i + bit];
- } while (len);
-}
-
-#if CONFIG_EC_MULTISYMBOL
static INLINE void aom_write_cdf(aom_writer *w, int symb,
const aom_cdf_prob *cdf, int nsymbs) {
#if CONFIG_ANS
@@ -167,12 +132,8 @@ static INLINE void aom_write_cdf(aom_writer *w, int symb,
const aom_cdf_prob cum_prob = symb > 0 ? cdf[symb - 1] : 0;
const aom_cdf_prob prob = cdf[symb] - cum_prob;
buf_rans_write(w, cum_prob, prob);
-#elif CONFIG_DAALA_EC
- daala_write_symbol(w, symb, cdf, nsymbs);
#else
-#error \
- "CONFIG_EC_MULTISYMBOL is selected without a valid backing entropy " \
- "coder. Enable daala_ec or ans for a valid configuration."
+ daala_write_symbol(w, symb, cdf, nsymbs);
#endif
}
@@ -223,16 +184,10 @@ static INLINE void aom_write_tree_as_cdf(aom_writer *w,
} while (len);
}
-#endif // CONFIG_EC_MULTISYMBOL
-
static INLINE void aom_write_tree(aom_writer *w, const aom_tree_index *tree,
const aom_prob *probs, int bits, int len,
aom_tree_index i) {
-#if CONFIG_EC_MULTISYMBOL
aom_write_tree_as_cdf(w, tree, probs, bits, len, i);
-#else
- aom_write_tree_as_bits(w, tree, probs, bits, len, i);
-#endif
}
static INLINE void aom_write_tree_record(aom_writer *w,
@@ -240,12 +195,8 @@ static INLINE void aom_write_tree_record(aom_writer *w,
const aom_prob *probs, int bits,
int len, aom_tree_index i,
TOKEN_STATS *token_stats) {
-#if CONFIG_EC_MULTISYMBOL
(void)token_stats;
aom_write_tree_as_cdf(w, tree, probs, bits, len, i);
-#else
- aom_write_tree_as_bits_record(w, tree, probs, bits, len, i, token_stats);
-#endif
}
#ifdef __cplusplus
diff --git a/third_party/aom/aom_dsp/dkboolreader.c b/third_party/aom/aom_dsp/dkboolreader.c
deleted file mode 100644
index 288d5f1ce4..0000000000
--- a/third_party/aom/aom_dsp/dkboolreader.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <stdlib.h>
-
-#include "./aom_config.h"
-
-#include "aom_dsp/dkboolreader.h"
-#include "aom_dsp/prob.h"
-#include "aom_dsp/aom_dsp_common.h"
-#include "aom_ports/mem.h"
-#include "aom_mem/aom_mem.h"
-#include "aom_util/endian_inl.h"
-
-static INLINE int aom_dk_read_bit(struct aom_dk_reader *r) {
- return aom_dk_read(r, 128); // aom_prob_half
-}
-
-int aom_dk_reader_init(struct aom_dk_reader *r, const uint8_t *buffer,
- size_t size, aom_decrypt_cb decrypt_cb,
- void *decrypt_state) {
- if (size && !buffer) {
- return 1;
- } else {
- r->buffer_end = buffer + size;
- r->buffer_start = r->buffer = buffer;
- r->value = 0;
- r->count = -8;
- r->range = 255;
- r->decrypt_cb = decrypt_cb;
- r->decrypt_state = decrypt_state;
- aom_dk_reader_fill(r);
-#if CONFIG_ACCOUNTING
- r->accounting = NULL;
-#endif
- return aom_dk_read_bit(r) != 0; // marker bit
- }
-}
-
-void aom_dk_reader_fill(struct aom_dk_reader *r) {
- const uint8_t *const buffer_end = r->buffer_end;
- const uint8_t *buffer = r->buffer;
- const uint8_t *buffer_start = buffer;
- BD_VALUE value = r->value;
- int count = r->count;
- const size_t bytes_left = buffer_end - buffer;
- const size_t bits_left = bytes_left * CHAR_BIT;
- int shift = BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT);
-
- if (r->decrypt_cb) {
- size_t n = AOMMIN(sizeof(r->clear_buffer), bytes_left);
- r->decrypt_cb(r->decrypt_state, buffer, r->clear_buffer, (int)n);
- buffer = r->clear_buffer;
- buffer_start = r->clear_buffer;
- }
- if (bits_left > BD_VALUE_SIZE) {
- const int bits = (shift & 0xfffffff8) + CHAR_BIT;
- BD_VALUE nv;
- BD_VALUE big_endian_values;
- memcpy(&big_endian_values, buffer, sizeof(BD_VALUE));
-#if SIZE_MAX == 0xffffffffffffffffULL
- big_endian_values = HToBE64(big_endian_values);
-#else
- big_endian_values = HToBE32(big_endian_values);
-#endif
- nv = big_endian_values >> (BD_VALUE_SIZE - bits);
- count += bits;
- buffer += (bits >> 3);
- value = r->value | (nv << (shift & 0x7));
- } else {
- const int bits_over = (int)(shift + CHAR_BIT - (int)bits_left);
- int loop_end = 0;
- if (bits_over >= 0) {
- count += LOTS_OF_BITS;
- loop_end = bits_over;
- }
-
- if (bits_over < 0 || bits_left) {
- while (shift >= loop_end) {
- count += CHAR_BIT;
- value |= (BD_VALUE)*buffer++ << shift;
- shift -= CHAR_BIT;
- }
- }
- }
-
- // NOTE: Variable 'buffer' may not relate to 'r->buffer' after decryption,
- // so we increase 'r->buffer' by the amount that 'buffer' moved, rather than
- // assign 'buffer' to 'r->buffer'.
- r->buffer += buffer - buffer_start;
- r->value = value;
- r->count = count;
-}
-
-const uint8_t *aom_dk_reader_find_end(struct aom_dk_reader *r) {
- // Find the end of the coded buffer
- while (r->count > CHAR_BIT && r->count < BD_VALUE_SIZE) {
- r->count -= CHAR_BIT;
- r->buffer--;
- }
- return r->buffer;
-}
diff --git a/third_party/aom/aom_dsp/dkboolreader.h b/third_party/aom/aom_dsp/dkboolreader.h
deleted file mode 100644
index f0bc843813..0000000000
--- a/third_party/aom/aom_dsp/dkboolreader.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_DSP_DKBOOLREADER_H_
-#define AOM_DSP_DKBOOLREADER_H_
-
-#include <assert.h>
-#include <stddef.h>
-#include <limits.h>
-
-#include "./aom_config.h"
-#if CONFIG_BITSTREAM_DEBUG
-#include <assert.h>
-#include <stdio.h>
-#include "aom_util/debug_util.h"
-#endif // CONFIG_BITSTREAM_DEBUG
-
-#include "aom_ports/mem.h"
-#include "aom/aomdx.h"
-#include "aom/aom_integer.h"
-#include "aom_dsp/prob.h"
-#if CONFIG_ACCOUNTING
-#include "av1/decoder/accounting.h"
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef size_t BD_VALUE;
-
-#define BD_VALUE_SIZE ((int)sizeof(BD_VALUE) * CHAR_BIT)
-
-// This is meant to be a large, positive constant that can still be efficiently
-// loaded as an immediate (on platforms like ARM, for example).
-// Even relatively modest values like 100 would work fine.
-#define LOTS_OF_BITS 0x40000000
-
-struct aom_dk_reader {
- // Be careful when reordering this struct, it may impact the cache negatively.
- BD_VALUE value;
- unsigned int range;
- int count;
- const uint8_t *buffer_start;
- const uint8_t *buffer_end;
- const uint8_t *buffer;
- aom_decrypt_cb decrypt_cb;
- void *decrypt_state;
- uint8_t clear_buffer[sizeof(BD_VALUE) + 1];
-#if CONFIG_ACCOUNTING
- Accounting *accounting;
-#endif
-};
-
-int aom_dk_reader_init(struct aom_dk_reader *r, const uint8_t *buffer,
- size_t size, aom_decrypt_cb decrypt_cb,
- void *decrypt_state);
-
-void aom_dk_reader_fill(struct aom_dk_reader *r);
-
-const uint8_t *aom_dk_reader_find_end(struct aom_dk_reader *r);
-
-static INLINE uint32_t aom_dk_reader_tell(const struct aom_dk_reader *r) {
- const uint32_t bits_read =
- (uint32_t)((r->buffer - r->buffer_start) * CHAR_BIT);
- const int count =
- (r->count < LOTS_OF_BITS) ? r->count : r->count - LOTS_OF_BITS;
- assert(r->buffer >= r->buffer_start);
- return bits_read - (count + CHAR_BIT);
-}
-
-/*The resolution of fractional-precision bit usage measurements, i.e.,
- 3 => 1/8th bits.*/
-#define DK_BITRES (3)
-
-static INLINE uint32_t aom_dk_reader_tell_frac(const struct aom_dk_reader *r) {
- uint32_t num_bits;
- uint32_t range;
- int l;
- int i;
- num_bits = aom_dk_reader_tell(r) << DK_BITRES;
- range = r->range;
- l = 0;
- for (i = DK_BITRES; i-- > 0;) {
- int b;
- range = range * range >> 7;
- b = (int)(range >> 8);
- l = l << 1 | b;
- range >>= b;
- }
- return num_bits - l;
-}
-
-static INLINE int aom_dk_reader_has_error(struct aom_dk_reader *r) {
- // Check if we have reached the end of the buffer.
- //
- // Variable 'count' stores the number of bits in the 'value' buffer, minus
- // 8. The top byte is part of the algorithm, and the remainder is buffered
- // to be shifted into it. So if count == 8, the top 16 bits of 'value' are
- // occupied, 8 for the algorithm and 8 in the buffer.
- //
- // When reading a byte from the user's buffer, count is filled with 8 and
- // one byte is filled into the value buffer. When we reach the end of the
- // data, count is additionally filled with LOTS_OF_BITS. So when
- // count == LOTS_OF_BITS - 1, the user's data has been exhausted.
- //
- // 1 if we have tried to decode bits after the end of stream was encountered.
- // 0 No error.
- return r->count > BD_VALUE_SIZE && r->count < LOTS_OF_BITS;
-}
-
-static INLINE int aom_dk_read(struct aom_dk_reader *r, int prob) {
- unsigned int bit = 0;
- BD_VALUE value;
- BD_VALUE bigsplit;
- int count;
- unsigned int range;
- unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT;
-
- if (r->count < 0) aom_dk_reader_fill(r);
-
- value = r->value;
- count = r->count;
-
- bigsplit = (BD_VALUE)split << (BD_VALUE_SIZE - CHAR_BIT);
-
- range = split;
-
- if (value >= bigsplit) {
- range = r->range - split;
- value = value - bigsplit;
- bit = 1;
- }
-
- {
- register int shift = aom_norm[range];
- range <<= shift;
- value <<= shift;
- count -= shift;
- }
- r->value = value;
- r->count = count;
- r->range = range;
-
-#if CONFIG_BITSTREAM_DEBUG
- {
- int ref_bit, ref_prob;
- const int queue_r = bitstream_queue_get_read();
- const int frame_idx = bitstream_queue_get_frame_read();
- bitstream_queue_pop(&ref_bit, &ref_prob);
- if (prob != ref_prob) {
- fprintf(
- stderr,
- "\n *** prob error, frame_idx_r %d prob %d ref_prob %d queue_r %d\n",
- frame_idx, prob, ref_prob, queue_r);
- assert(0);
- }
- if ((int)bit != ref_bit) {
- fprintf(stderr, "\n *** bit error, frame_idx_r %d bit %d ref_bit %d\n",
- frame_idx, bit, ref_bit);
- assert(0);
- }
- }
-#endif // CONFIG_BITSTREAM_DEBUG
-
- return bit;
-}
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // AOM_DSP_DKBOOLREADER_H_
diff --git a/third_party/aom/aom_dsp/dkboolwriter.c b/third_party/aom/aom_dsp/dkboolwriter.c
deleted file mode 100644
index fc98e7c9be..0000000000
--- a/third_party/aom/aom_dsp/dkboolwriter.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include <assert.h>
-
-#include "./dkboolwriter.h"
-
-static INLINE void aom_dk_write_bit(aom_dk_writer *w, int bit) {
- aom_dk_write(w, bit, 128); // aom_prob_half
-}
-
-void aom_dk_start_encode(aom_dk_writer *br, uint8_t *source) {
- br->lowvalue = 0;
- br->range = 255;
- br->count = -24;
- br->buffer = source;
- br->pos = 0;
- aom_dk_write_bit(br, 0);
-}
-
-void aom_dk_stop_encode(aom_dk_writer *br) {
- int i;
-
-#if CONFIG_BITSTREAM_DEBUG
- bitstream_queue_set_skip_write(1);
-#endif // CONFIG_BITSTREAM_DEBUG
-
- for (i = 0; i < 32; i++) aom_dk_write_bit(br, 0);
-
-#if CONFIG_BITSTREAM_DEBUG
- bitstream_queue_set_skip_write(0);
-#endif // CONFIG_BITSTREAM_DEBUG
-
- // Ensure there's no ambigous collision with any index marker bytes
- if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) br->buffer[br->pos++] = 0;
-}
diff --git a/third_party/aom/aom_dsp/dkboolwriter.h b/third_party/aom/aom_dsp/dkboolwriter.h
deleted file mode 100644
index 835436885b..0000000000
--- a/third_party/aom/aom_dsp/dkboolwriter.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_DSP_DKBOOLWRITER_H_
-#define AOM_DSP_DKBOOLWRITER_H_
-
-#include "./aom_config.h"
-
-#if CONFIG_BITSTREAM_DEBUG
-#include <stdio.h>
-#include "aom_util/debug_util.h"
-#endif // CONFIG_BITSTREAM_DEBUG
-
-#include "aom_dsp/prob.h"
-#include "aom_ports/mem.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct aom_dk_writer {
- unsigned int lowvalue;
- unsigned int range;
- int count;
- unsigned int pos;
- uint8_t *buffer;
-} aom_dk_writer;
-
-void aom_dk_start_encode(aom_dk_writer *bc, uint8_t *buffer);
-void aom_dk_stop_encode(aom_dk_writer *bc);
-
-static INLINE void aom_dk_write(aom_dk_writer *br, int bit, int probability) {
- unsigned int split;
- int count = br->count;
- unsigned int range = br->range;
- unsigned int lowvalue = br->lowvalue;
- register int shift;
-
-#if CONFIG_BITSTREAM_DEBUG
- // int queue_r = 0;
- // int frame_idx_r = 0;
- // int queue_w = bitstream_queue_get_write();
- // int frame_idx_w = bitstream_queue_get_frame_write();
- // if (frame_idx_w == frame_idx_r && queue_w == queue_r) {
- // fprintf(stderr, "\n *** bitstream queue at frame_idx_w %d queue_w %d\n",
- // frame_idx_w, queue_w);
- // }
- bitstream_queue_push(bit, probability);
-#endif // CONFIG_BITSTREAM_DEBUG
-
- split = 1 + (((range - 1) * probability) >> 8);
-
- range = split;
-
- if (bit) {
- lowvalue += split;
- range = br->range - split;
- }
-
- shift = aom_norm[range];
-
- range <<= shift;
- count += shift;
-
- if (count >= 0) {
- int offset = shift - count;
-
- if ((lowvalue << (offset - 1)) & 0x80000000) {
- int x = br->pos - 1;
-
- while (x >= 0 && br->buffer[x] == 0xff) {
- br->buffer[x] = 0;
- x--;
- }
-
- br->buffer[x] += 1;
- }
-
- br->buffer[br->pos++] = (lowvalue >> (24 - offset));
- lowvalue <<= offset;
- shift = count;
- lowvalue &= 0xffffff;
- count -= 8;
- }
-
- lowvalue <<= shift;
- br->count = count;
- br->lowvalue = lowvalue;
- br->range = range;
-}
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // AOM_DSP_DKBOOLWRITER_H_
diff --git a/third_party/aom/aom_dsp/intrapred.c b/third_party/aom/aom_dsp/intrapred.c
index 1f0870b647..370d0374b0 100644
--- a/third_party/aom/aom_dsp/intrapred.c
+++ b/third_party/aom/aom_dsp/intrapred.c
@@ -208,33 +208,30 @@ static const int sm_weight_log2_scale = 8;
#if CONFIG_TX64X64
// max(block_size_wide[BLOCK_LARGEST], block_size_high[BLOCK_LARGEST])
#define MAX_BLOCK_DIM 64
-#define NUM_BLOCK_DIMS 6 // log2(MAX_BLOCK_DIM)
#else
#define MAX_BLOCK_DIM 32
-#define NUM_BLOCK_DIMS 5
#endif // CONFIG_TX64X64
-static const uint8_t sm_weight_arrays[NUM_BLOCK_DIMS][MAX_BLOCK_DIM] = {
+static const uint8_t sm_weight_arrays[2 * MAX_BLOCK_DIM] = {
+ // Unused, because we always offset by bs, which is at least 2.
+ 0, 0,
// bs = 2
- { 255, 128 },
+ 255, 128,
// bs = 4
- { 255, 149, 85, 64 },
+ 255, 149, 85, 64,
// bs = 8
- { 255, 197, 146, 105, 73, 50, 37, 32 },
+ 255, 197, 146, 105, 73, 50, 37, 32,
// bs = 16
- { 255, 225, 196, 170, 145, 123, 102, 84, 68, 54, 43, 33, 26, 20, 17, 16 },
+ 255, 225, 196, 170, 145, 123, 102, 84, 68, 54, 43, 33, 26, 20, 17, 16,
// bs = 32
- {
- 255, 240, 225, 210, 196, 182, 169, 157, 145, 133, 122,
- 111, 101, 92, 83, 74, 66, 59, 52, 45, 39, 34,
- 29, 25, 21, 17, 14, 12, 10, 9, 8, 8 },
+ 255, 240, 225, 210, 196, 182, 169, 157, 145, 133, 122, 111, 101, 92, 83, 74,
+ 66, 59, 52, 45, 39, 34, 29, 25, 21, 17, 14, 12, 10, 9, 8, 8,
#if CONFIG_TX64X64
// bs = 64
- { 255, 248, 240, 233, 225, 218, 210, 203, 196, 189, 182, 176, 169,
- 163, 156, 150, 144, 138, 133, 127, 121, 116, 111, 106, 101, 96,
- 91, 86, 82, 77, 73, 69, 65, 61, 57, 54, 50, 47, 44,
- 41, 38, 35, 32, 29, 27, 25, 22, 20, 18, 16, 15, 13,
- 12, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4 },
+ 255, 248, 240, 233, 225, 218, 210, 203, 196, 189, 182, 176, 169, 163, 156,
+ 150, 144, 138, 133, 127, 121, 116, 111, 106, 101, 96, 91, 86, 82, 77, 73, 69,
+ 65, 61, 57, 54, 50, 47, 44, 41, 38, 35, 32, 29, 27, 25, 22, 20, 18, 16, 15,
+ 13, 12, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4,
#endif // CONFIG_TX64X64
};
@@ -250,10 +247,7 @@ static INLINE void smooth_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
const uint8_t *above, const uint8_t *left) {
const uint8_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
const uint8_t right_pred = above[bs - 1]; // estimated by top-right pixel
- const int arr_index = get_msb(bs) - 1;
- assert(arr_index >= 0);
- assert(arr_index < NUM_BLOCK_DIMS);
- const uint8_t *const sm_weights = sm_weight_arrays[arr_index];
+ const uint8_t *const sm_weights = sm_weight_arrays + bs;
// scale = 2 * 2^sm_weight_log2_scale
const int log2_scale = 1 + sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
@@ -277,6 +271,64 @@ static INLINE void smooth_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
}
}
+#if CONFIG_SMOOTH_HV
+static INLINE void smooth_v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
+ const uint8_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ // scale = 2^sm_weight_log2_scale
+ const int log2_scale = sm_weight_log2_scale;
+ const uint16_t scale = (1 << sm_weight_log2_scale);
+ sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+
+ int r;
+ for (r = 0; r < bs; r++) {
+ int c;
+ for (c = 0; c < bs; ++c) {
+ const uint8_t pixels[] = { above[c], below_pred };
+ const uint8_t weights[] = { sm_weights[r], scale - sm_weights[r] };
+ uint32_t this_pred = 0;
+ assert(scale >= sm_weights[r]);
+ int i;
+ for (i = 0; i < 2; ++i) {
+ this_pred += weights[i] * pixels[i];
+ }
+ dst[c] = clip_pixel(divide_round(this_pred, log2_scale));
+ }
+ dst += stride;
+ }
+}
+
+static INLINE void smooth_h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
+ const uint8_t right_pred = above[bs - 1]; // estimated by top-right pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ // scale = 2^sm_weight_log2_scale
+ const int log2_scale = sm_weight_log2_scale;
+ const uint16_t scale = (1 << sm_weight_log2_scale);
+ sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+
+ int r;
+ for (r = 0; r < bs; r++) {
+ int c;
+ for (c = 0; c < bs; ++c) {
+ const uint8_t pixels[] = { left[r], right_pred };
+ const uint8_t weights[] = { sm_weights[c], scale - sm_weights[c] };
+ uint32_t this_pred = 0;
+ assert(scale >= sm_weights[c]);
+ int i;
+ for (i = 0; i < 2; ++i) {
+ this_pred += weights[i] * pixels[i];
+ }
+ dst[c] = clip_pixel(divide_round(this_pred, log2_scale));
+ }
+ dst += stride;
+ }
+}
+#endif // CONFIG_SMOOTH_HV
+
#else
static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
@@ -743,10 +795,7 @@ static INLINE void highbd_smooth_predictor(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left, int bd) {
const uint16_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
const uint16_t right_pred = above[bs - 1]; // estimated by top-right pixel
- const int arr_index = get_msb(bs) - 1;
- assert(arr_index >= 0);
- assert(arr_index < NUM_BLOCK_DIMS);
- const uint8_t *const sm_weights = sm_weight_arrays[arr_index];
+ const uint8_t *const sm_weights = sm_weight_arrays + bs;
// scale = 2 * 2^sm_weight_log2_scale
const int log2_scale = 1 + sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
@@ -770,6 +819,64 @@ static INLINE void highbd_smooth_predictor(uint16_t *dst, ptrdiff_t stride,
}
}
+#if CONFIG_SMOOTH_HV
+static INLINE void highbd_smooth_v_predictor(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
+ const uint16_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ // scale = 2^sm_weight_log2_scale
+ const int log2_scale = sm_weight_log2_scale;
+ const uint16_t scale = (1 << sm_weight_log2_scale);
+ sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+
+ int r;
+ for (r = 0; r < bs; r++) {
+ int c;
+ for (c = 0; c < bs; ++c) {
+ const uint16_t pixels[] = { above[c], below_pred };
+ const uint8_t weights[] = { sm_weights[r], scale - sm_weights[r] };
+ uint32_t this_pred = 0;
+ assert(scale >= sm_weights[r]);
+ int i;
+ for (i = 0; i < 2; ++i) {
+ this_pred += weights[i] * pixels[i];
+ }
+ dst[c] = clip_pixel_highbd(divide_round(this_pred, log2_scale), bd);
+ }
+ dst += stride;
+ }
+}
+
+static INLINE void highbd_smooth_h_predictor(uint16_t *dst, ptrdiff_t stride,
+ int bs, const uint16_t *above,
+ const uint16_t *left, int bd) {
+ const uint16_t right_pred = above[bs - 1]; // estimated by top-right pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ // scale = 2^sm_weight_log2_scale
+ const int log2_scale = sm_weight_log2_scale;
+ const uint16_t scale = (1 << sm_weight_log2_scale);
+ sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+
+ int r;
+ for (r = 0; r < bs; r++) {
+ int c;
+ for (c = 0; c < bs; ++c) {
+ const uint16_t pixels[] = { left[r], right_pred };
+ const uint8_t weights[] = { sm_weights[c], scale - sm_weights[c] };
+ uint32_t this_pred = 0;
+ assert(scale >= sm_weights[c]);
+ int i;
+ for (i = 0; i < 2; ++i) {
+ this_pred += weights[i] * pixels[i];
+ }
+ dst[c] = clip_pixel_highbd(divide_round(this_pred, log2_scale), bd);
+ }
+ dst += stride;
+ }
+}
+#endif
+
#else
static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
const uint16_t *above,
@@ -879,6 +986,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
intra_pred_sized(type, 16) \
intra_pred_sized(type, 32) \
intra_pred_sized(type, 64) \
+ intra_pred_highbd_sized(type, 2) \
intra_pred_highbd_sized(type, 4) \
intra_pred_highbd_sized(type, 8) \
intra_pred_highbd_sized(type, 16) \
@@ -958,8 +1066,12 @@ intra_pred_above_4x4(d153)
intra_pred_allsizes(v)
intra_pred_allsizes(h)
#if CONFIG_ALT_INTRA
-intra_pred_allsizes(paeth)
intra_pred_allsizes(smooth)
+#if CONFIG_SMOOTH_HV
+intra_pred_allsizes(smooth_v)
+intra_pred_allsizes(smooth_h)
+#endif // CONFIG_SMOOTH_HV
+intra_pred_allsizes(paeth)
#else
intra_pred_allsizes(tm)
#endif // CONFIG_ALT_INTRA
diff --git a/third_party/aom/aom_dsp/inv_txfm.c b/third_party/aom/aom_dsp/inv_txfm.c
index bb995856ae..6e7d8c9280 100644
--- a/third_party/aom/aom_dsp/inv_txfm.c
+++ b/third_party/aom/aom_dsp/inv_txfm.c
@@ -1442,4 +1442,868 @@ void aom_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
}
}
+void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+ tran_low_t step1[8], step2[8];
+ tran_high_t temp1, temp2;
+ // stage 1
+ step1[0] = input[0];
+ step1[2] = input[4];
+ step1[1] = input[2];
+ step1[3] = input[6];
+ temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
+ temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
+ step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
+ temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
+ step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ // stage 2 & stage 3 - even half
+ aom_highbd_idct4_c(step1, step1, bd);
+
+ // stage 2 - odd half
+ step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
+ step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
+ step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
+ step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
+
+ // stage 3 - odd half
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[7] = step2[7];
+
+ // stage 4
+ output[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
+ output[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
+ output[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
+ output[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
+ output[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
+ output[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
+ output[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
+ output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
+}
+
+void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+ tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ tran_low_t x0 = input[0];
+ tran_low_t x1 = input[1];
+ tran_low_t x2 = input[2];
+ tran_low_t x3 = input[3];
+ (void)bd;
+
+ if (!(x0 | x1 | x2 | x3)) {
+ memset(output, 0, 4 * sizeof(*output));
+ return;
+ }
+
+ s0 = sinpi_1_9 * x0;
+ s1 = sinpi_2_9 * x0;
+ s2 = sinpi_3_9 * x1;
+ s3 = sinpi_4_9 * x2;
+ s4 = sinpi_1_9 * x2;
+ s5 = sinpi_2_9 * x3;
+ s6 = sinpi_4_9 * x3;
+ s7 = (tran_high_t)HIGHBD_WRAPLOW(x0 - x2 + x3, bd);
+
+ s0 = s0 + s3 + s5;
+ s1 = s1 - s4 - s6;
+ s3 = s2;
+ s2 = sinpi_3_9 * s7;
+
+ // 1-D transform scaling factor is sqrt(2).
+ // The overall dynamic range is 14b (input) + 14b (multiplication scaling)
+ // + 1b (addition) = 29b.
+ // Hence the output bit depth is 15b.
+ output[0] = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s3), bd);
+ output[1] = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s3), bd);
+ output[2] = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
+ output[3] = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s1 - s3), bd);
+}
+
+void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+ tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+ tran_low_t x0 = input[7];
+ tran_low_t x1 = input[0];
+ tran_low_t x2 = input[5];
+ tran_low_t x3 = input[2];
+ tran_low_t x4 = input[3];
+ tran_low_t x5 = input[4];
+ tran_low_t x6 = input[1];
+ tran_low_t x7 = input[6];
+ (void)bd;
+
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
+ memset(output, 0, 8 * sizeof(*output));
+ return;
+ }
+
+ // stage 1
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+ x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s4), bd);
+ x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s5), bd);
+ x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 + s6), bd);
+ x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 + s7), bd);
+ x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 - s4), bd);
+ x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 - s5), bd);
+ x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 - s6), bd);
+ x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 - s7), bd);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+ x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
+ x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
+ x2 = HIGHBD_WRAPLOW(s0 - s2, bd);
+ x3 = HIGHBD_WRAPLOW(s1 - s3, bd);
+ x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s6), bd);
+ x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s7), bd);
+ x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s6), bd);
+ x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
+
+ // stage 3
+ s2 = cospi_16_64 * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (x6 - x7);
+
+ x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
+ x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
+ x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6), bd);
+ x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7), bd);
+
+ output[0] = HIGHBD_WRAPLOW(x0, bd);
+ output[1] = HIGHBD_WRAPLOW(-x4, bd);
+ output[2] = HIGHBD_WRAPLOW(x6, bd);
+ output[3] = HIGHBD_WRAPLOW(-x2, bd);
+ output[4] = HIGHBD_WRAPLOW(x3, bd);
+ output[5] = HIGHBD_WRAPLOW(-x7, bd);
+ output[6] = HIGHBD_WRAPLOW(x5, bd);
+ output[7] = HIGHBD_WRAPLOW(-x1, bd);
+}
+
+void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+ tran_low_t step1[16], step2[16];
+ tran_high_t temp1, temp2;
+ (void)bd;
+
+ // stage 1
+ step1[0] = input[0 / 2];
+ step1[1] = input[16 / 2];
+ step1[2] = input[8 / 2];
+ step1[3] = input[24 / 2];
+ step1[4] = input[4 / 2];
+ step1[5] = input[20 / 2];
+ step1[6] = input[12 / 2];
+ step1[7] = input[28 / 2];
+ step1[8] = input[2 / 2];
+ step1[9] = input[18 / 2];
+ step1[10] = input[10 / 2];
+ step1[11] = input[26 / 2];
+ step1[12] = input[6 / 2];
+ step1[13] = input[22 / 2];
+ step1[14] = input[14 / 2];
+ step1[15] = input[30 / 2];
+
+ // stage 2
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[4] = step1[4];
+ step2[5] = step1[5];
+ step2[6] = step1[6];
+ step2[7] = step1[7];
+
+ temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+ temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+ step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+ temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+ step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+ temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+ step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+ temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+ step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+
+ temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+ temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+ step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+ temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+ step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd);
+ step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd);
+ step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd);
+ step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd);
+ step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd);
+ step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd);
+ step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd);
+ step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
+
+ // stage 4
+ temp1 = (step1[0] + step1[1]) * cospi_16_64;
+ temp2 = (step1[0] - step1[1]) * cospi_16_64;
+ step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+ step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
+ step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
+ step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
+ step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
+
+ step2[8] = step1[8];
+ step2[15] = step1[15];
+ temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+ temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+ step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+ temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+ step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+
+ // stage 5
+ step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd);
+ step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd);
+ step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
+ step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[7] = step2[7];
+
+ step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd);
+ step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd);
+ step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd);
+ step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd);
+ step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd);
+ step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd);
+ step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd);
+ step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd);
+
+ // stage 6
+ step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
+ step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
+ step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
+ step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
+ step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
+ step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
+ step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
+ step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+ temp2 = (step1[10] + step1[13]) * cospi_16_64;
+ step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+ temp2 = (step1[11] + step1[12]) * cospi_16_64;
+ step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ // stage 7
+ output[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd);
+ output[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd);
+ output[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd);
+ output[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd);
+ output[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd);
+ output[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd);
+ output[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd);
+ output[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd);
+ output[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd);
+ output[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd);
+ output[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd);
+ output[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd);
+ output[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd);
+ output[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd);
+ output[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd);
+ output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
+}
+
+void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+ tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
+ tran_high_t s9, s10, s11, s12, s13, s14, s15;
+
+ tran_low_t x0 = input[15];
+ tran_low_t x1 = input[0];
+ tran_low_t x2 = input[13];
+ tran_low_t x3 = input[2];
+ tran_low_t x4 = input[11];
+ tran_low_t x5 = input[4];
+ tran_low_t x6 = input[9];
+ tran_low_t x7 = input[6];
+ tran_low_t x8 = input[7];
+ tran_low_t x9 = input[8];
+ tran_low_t x10 = input[5];
+ tran_low_t x11 = input[10];
+ tran_low_t x12 = input[3];
+ tran_low_t x13 = input[12];
+ tran_low_t x14 = input[1];
+ tran_low_t x15 = input[14];
+ (void)bd;
+
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+ x13 | x14 | x15)) {
+ memset(output, 0, 16 * sizeof(*output));
+ return;
+ }
+
+ // stage 1
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+ s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+ s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+ s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+ s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+ s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+ s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+ s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+ x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s8), bd);
+ x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s9), bd);
+ x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 + s10), bd);
+ x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 + s11), bd);
+ x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s12), bd);
+ x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s13), bd);
+ x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6 + s14), bd);
+ x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7 + s15), bd);
+ x8 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 - s8), bd);
+ x9 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 - s9), bd);
+ x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 - s10), bd);
+ x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 - s11), bd);
+ x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s12), bd);
+ x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s13), bd);
+ x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s6 - s14), bd);
+ x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s7 - s15), bd);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4;
+ s5 = x5;
+ s6 = x6;
+ s7 = x7;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+ x0 = HIGHBD_WRAPLOW(s0 + s4, bd);
+ x1 = HIGHBD_WRAPLOW(s1 + s5, bd);
+ x2 = HIGHBD_WRAPLOW(s2 + s6, bd);
+ x3 = HIGHBD_WRAPLOW(s3 + s7, bd);
+ x4 = HIGHBD_WRAPLOW(s0 - s4, bd);
+ x5 = HIGHBD_WRAPLOW(s1 - s5, bd);
+ x6 = HIGHBD_WRAPLOW(s2 - s6, bd);
+ x7 = HIGHBD_WRAPLOW(s3 - s7, bd);
+ x8 = HIGHBD_WRAPLOW(dct_const_round_shift(s8 + s12), bd);
+ x9 = HIGHBD_WRAPLOW(dct_const_round_shift(s9 + s13), bd);
+ x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s10 + s14), bd);
+ x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s11 + s15), bd);
+ x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s8 - s12), bd);
+ x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s9 - s13), bd);
+ x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s10 - s14), bd);
+ x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s11 - s15), bd);
+
+ // stage 3
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s8 = x8;
+ s9 = x9;
+ s10 = x10;
+ s11 = x11;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+ x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
+ x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
+ x2 = HIGHBD_WRAPLOW(s0 - s2, bd);
+ x3 = HIGHBD_WRAPLOW(s1 - s3, bd);
+ x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s6), bd);
+ x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s7), bd);
+ x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s6), bd);
+ x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
+ x8 = HIGHBD_WRAPLOW(s8 + s10, bd);
+ x9 = HIGHBD_WRAPLOW(s9 + s11, bd);
+ x10 = HIGHBD_WRAPLOW(s8 - s10, bd);
+ x11 = HIGHBD_WRAPLOW(s9 - s11, bd);
+ x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s12 + s14), bd);
+ x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 + s15), bd);
+ x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s12 - s14), bd);
+ x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 - s15), bd);
+
+ // stage 4
+ s2 = (-cospi_16_64) * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (-x6 + x7);
+ s10 = cospi_16_64 * (x10 + x11);
+ s11 = cospi_16_64 * (-x10 + x11);
+ s14 = (-cospi_16_64) * (x14 + x15);
+ s15 = cospi_16_64 * (x14 - x15);
+
+ x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
+ x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
+ x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6), bd);
+ x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7), bd);
+ x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s10), bd);
+ x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s11), bd);
+ x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s14), bd);
+ x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s15), bd);
+
+ output[0] = HIGHBD_WRAPLOW(x0, bd);
+ output[1] = HIGHBD_WRAPLOW(-x8, bd);
+ output[2] = HIGHBD_WRAPLOW(x12, bd);
+ output[3] = HIGHBD_WRAPLOW(-x4, bd);
+ output[4] = HIGHBD_WRAPLOW(x6, bd);
+ output[5] = HIGHBD_WRAPLOW(x14, bd);
+ output[6] = HIGHBD_WRAPLOW(x10, bd);
+ output[7] = HIGHBD_WRAPLOW(x2, bd);
+ output[8] = HIGHBD_WRAPLOW(x3, bd);
+ output[9] = HIGHBD_WRAPLOW(x11, bd);
+ output[10] = HIGHBD_WRAPLOW(x15, bd);
+ output[11] = HIGHBD_WRAPLOW(x7, bd);
+ output[12] = HIGHBD_WRAPLOW(x5, bd);
+ output[13] = HIGHBD_WRAPLOW(-x13, bd);
+ output[14] = HIGHBD_WRAPLOW(x9, bd);
+ output[15] = HIGHBD_WRAPLOW(-x1, bd);
+}
+
+void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
+ tran_low_t step1[32], step2[32];
+ tran_high_t temp1, temp2;
+ (void)bd;
+
+ // stage 1
+ step1[0] = input[0];
+ step1[1] = input[16];
+ step1[2] = input[8];
+ step1[3] = input[24];
+ step1[4] = input[4];
+ step1[5] = input[20];
+ step1[6] = input[12];
+ step1[7] = input[28];
+ step1[8] = input[2];
+ step1[9] = input[18];
+ step1[10] = input[10];
+ step1[11] = input[26];
+ step1[12] = input[6];
+ step1[13] = input[22];
+ step1[14] = input[14];
+ step1[15] = input[30];
+
+ temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
+ temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
+ step1[16] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[31] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
+ temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
+ step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
+ temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
+ step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
+ temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
+ step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
+ temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
+ step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
+ temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
+ step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
+ temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
+ step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
+ temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
+ step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ // stage 2
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[4] = step1[4];
+ step2[5] = step1[5];
+ step2[6] = step1[6];
+ step2[7] = step1[7];
+
+ temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+ temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+ step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+ temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+ step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+ temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+ step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+ temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+ step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[17], bd);
+ step2[17] = HIGHBD_WRAPLOW(step1[16] - step1[17], bd);
+ step2[18] = HIGHBD_WRAPLOW(-step1[18] + step1[19], bd);
+ step2[19] = HIGHBD_WRAPLOW(step1[18] + step1[19], bd);
+ step2[20] = HIGHBD_WRAPLOW(step1[20] + step1[21], bd);
+ step2[21] = HIGHBD_WRAPLOW(step1[20] - step1[21], bd);
+ step2[22] = HIGHBD_WRAPLOW(-step1[22] + step1[23], bd);
+ step2[23] = HIGHBD_WRAPLOW(step1[22] + step1[23], bd);
+ step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[25], bd);
+ step2[25] = HIGHBD_WRAPLOW(step1[24] - step1[25], bd);
+ step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[27], bd);
+ step2[27] = HIGHBD_WRAPLOW(step1[26] + step1[27], bd);
+ step2[28] = HIGHBD_WRAPLOW(step1[28] + step1[29], bd);
+ step2[29] = HIGHBD_WRAPLOW(step1[28] - step1[29], bd);
+ step2[30] = HIGHBD_WRAPLOW(-step1[30] + step1[31], bd);
+ step2[31] = HIGHBD_WRAPLOW(step1[30] + step1[31], bd);
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+
+ temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+ temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+ step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+ temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+ step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+ step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd);
+ step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd);
+ step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd);
+ step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd);
+ step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd);
+ step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd);
+ step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd);
+ step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
+
+ step1[16] = step2[16];
+ step1[31] = step2[31];
+ temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
+ temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
+ step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
+ temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
+ step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[19] = step2[19];
+ step1[20] = step2[20];
+ temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
+ temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
+ step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
+ temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
+ step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[27] = step2[27];
+ step1[28] = step2[28];
+
+ // stage 4
+ temp1 = (step1[0] + step1[1]) * cospi_16_64;
+ temp2 = (step1[0] - step1[1]) * cospi_16_64;
+ step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+ step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
+ step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
+ step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
+ step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
+
+ step2[8] = step1[8];
+ step2[15] = step1[15];
+ temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+ temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+ step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+ temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+ step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+
+ step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[19], bd);
+ step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[18], bd);
+ step2[18] = HIGHBD_WRAPLOW(step1[17] - step1[18], bd);
+ step2[19] = HIGHBD_WRAPLOW(step1[16] - step1[19], bd);
+ step2[20] = HIGHBD_WRAPLOW(-step1[20] + step1[23], bd);
+ step2[21] = HIGHBD_WRAPLOW(-step1[21] + step1[22], bd);
+ step2[22] = HIGHBD_WRAPLOW(step1[21] + step1[22], bd);
+ step2[23] = HIGHBD_WRAPLOW(step1[20] + step1[23], bd);
+
+ step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[27], bd);
+ step2[25] = HIGHBD_WRAPLOW(step1[25] + step1[26], bd);
+ step2[26] = HIGHBD_WRAPLOW(step1[25] - step1[26], bd);
+ step2[27] = HIGHBD_WRAPLOW(step1[24] - step1[27], bd);
+ step2[28] = HIGHBD_WRAPLOW(-step1[28] + step1[31], bd);
+ step2[29] = HIGHBD_WRAPLOW(-step1[29] + step1[30], bd);
+ step2[30] = HIGHBD_WRAPLOW(step1[29] + step1[30], bd);
+ step2[31] = HIGHBD_WRAPLOW(step1[28] + step1[31], bd);
+
+ // stage 5
+ step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd);
+ step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd);
+ step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
+ step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[7] = step2[7];
+
+ step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd);
+ step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd);
+ step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd);
+ step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd);
+ step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd);
+ step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd);
+ step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd);
+ step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd);
+
+ step1[16] = step2[16];
+ step1[17] = step2[17];
+ temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
+ temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
+ step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
+ temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
+ step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
+ temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
+ step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
+ temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
+ step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[22] = step2[22];
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[25] = step2[25];
+ step1[30] = step2[30];
+ step1[31] = step2[31];
+
+ // stage 6
+ step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
+ step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
+ step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
+ step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
+ step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
+ step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
+ step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
+ step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+ temp2 = (step1[10] + step1[13]) * cospi_16_64;
+ step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+ temp2 = (step1[11] + step1[12]) * cospi_16_64;
+ step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[23], bd);
+ step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[22], bd);
+ step2[18] = HIGHBD_WRAPLOW(step1[18] + step1[21], bd);
+ step2[19] = HIGHBD_WRAPLOW(step1[19] + step1[20], bd);
+ step2[20] = HIGHBD_WRAPLOW(step1[19] - step1[20], bd);
+ step2[21] = HIGHBD_WRAPLOW(step1[18] - step1[21], bd);
+ step2[22] = HIGHBD_WRAPLOW(step1[17] - step1[22], bd);
+ step2[23] = HIGHBD_WRAPLOW(step1[16] - step1[23], bd);
+
+ step2[24] = HIGHBD_WRAPLOW(-step1[24] + step1[31], bd);
+ step2[25] = HIGHBD_WRAPLOW(-step1[25] + step1[30], bd);
+ step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[29], bd);
+ step2[27] = HIGHBD_WRAPLOW(-step1[27] + step1[28], bd);
+ step2[28] = HIGHBD_WRAPLOW(step1[27] + step1[28], bd);
+ step2[29] = HIGHBD_WRAPLOW(step1[26] + step1[29], bd);
+ step2[30] = HIGHBD_WRAPLOW(step1[25] + step1[30], bd);
+ step2[31] = HIGHBD_WRAPLOW(step1[24] + step1[31], bd);
+
+ // stage 7
+ step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd);
+ step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd);
+ step1[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd);
+ step1[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd);
+ step1[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd);
+ step1[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd);
+ step1[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd);
+ step1[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd);
+ step1[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd);
+ step1[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd);
+ step1[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd);
+ step1[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd);
+ step1[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd);
+ step1[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd);
+ step1[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd);
+ step1[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
+
+ step1[16] = step2[16];
+ step1[17] = step2[17];
+ step1[18] = step2[18];
+ step1[19] = step2[19];
+ temp1 = (-step2[20] + step2[27]) * cospi_16_64;
+ temp2 = (step2[20] + step2[27]) * cospi_16_64;
+ step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = (-step2[21] + step2[26]) * cospi_16_64;
+ temp2 = (step2[21] + step2[26]) * cospi_16_64;
+ step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = (-step2[22] + step2[25]) * cospi_16_64;
+ temp2 = (step2[22] + step2[25]) * cospi_16_64;
+ step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ temp1 = (-step2[23] + step2[24]) * cospi_16_64;
+ temp2 = (step2[23] + step2[24]) * cospi_16_64;
+ step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+ step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[28] = step2[28];
+ step1[29] = step2[29];
+ step1[30] = step2[30];
+ step1[31] = step2[31];
+
+ // final stage
+ output[0] = HIGHBD_WRAPLOW(step1[0] + step1[31], bd);
+ output[1] = HIGHBD_WRAPLOW(step1[1] + step1[30], bd);
+ output[2] = HIGHBD_WRAPLOW(step1[2] + step1[29], bd);
+ output[3] = HIGHBD_WRAPLOW(step1[3] + step1[28], bd);
+ output[4] = HIGHBD_WRAPLOW(step1[4] + step1[27], bd);
+ output[5] = HIGHBD_WRAPLOW(step1[5] + step1[26], bd);
+ output[6] = HIGHBD_WRAPLOW(step1[6] + step1[25], bd);
+ output[7] = HIGHBD_WRAPLOW(step1[7] + step1[24], bd);
+ output[8] = HIGHBD_WRAPLOW(step1[8] + step1[23], bd);
+ output[9] = HIGHBD_WRAPLOW(step1[9] + step1[22], bd);
+ output[10] = HIGHBD_WRAPLOW(step1[10] + step1[21], bd);
+ output[11] = HIGHBD_WRAPLOW(step1[11] + step1[20], bd);
+ output[12] = HIGHBD_WRAPLOW(step1[12] + step1[19], bd);
+ output[13] = HIGHBD_WRAPLOW(step1[13] + step1[18], bd);
+ output[14] = HIGHBD_WRAPLOW(step1[14] + step1[17], bd);
+ output[15] = HIGHBD_WRAPLOW(step1[15] + step1[16], bd);
+ output[16] = HIGHBD_WRAPLOW(step1[15] - step1[16], bd);
+ output[17] = HIGHBD_WRAPLOW(step1[14] - step1[17], bd);
+ output[18] = HIGHBD_WRAPLOW(step1[13] - step1[18], bd);
+ output[19] = HIGHBD_WRAPLOW(step1[12] - step1[19], bd);
+ output[20] = HIGHBD_WRAPLOW(step1[11] - step1[20], bd);
+ output[21] = HIGHBD_WRAPLOW(step1[10] - step1[21], bd);
+ output[22] = HIGHBD_WRAPLOW(step1[9] - step1[22], bd);
+ output[23] = HIGHBD_WRAPLOW(step1[8] - step1[23], bd);
+ output[24] = HIGHBD_WRAPLOW(step1[7] - step1[24], bd);
+ output[25] = HIGHBD_WRAPLOW(step1[6] - step1[25], bd);
+ output[26] = HIGHBD_WRAPLOW(step1[5] - step1[26], bd);
+ output[27] = HIGHBD_WRAPLOW(step1[4] - step1[27], bd);
+ output[28] = HIGHBD_WRAPLOW(step1[3] - step1[28], bd);
+ output[29] = HIGHBD_WRAPLOW(step1[2] - step1[29], bd);
+ output[30] = HIGHBD_WRAPLOW(step1[1] - step1[30], bd);
+ output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
+}
+
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/loopfilter.c b/third_party/aom/aom_dsp/loopfilter.c
index e2e8392194..7ea1e6b89b 100644
--- a/third_party/aom/aom_dsp/loopfilter.c
+++ b/third_party/aom/aom_dsp/loopfilter.c
@@ -149,10 +149,15 @@ void aom_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
#if !CONFIG_PARALLEL_DEBLOCKING
const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
@@ -179,10 +184,15 @@ void aom_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
void aom_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
#if !CONFIG_PARALLEL_DEBLOCKING
const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
@@ -206,7 +216,7 @@ void aom_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
aom_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1);
}
-static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
+static INLINE void filter8(int8_t mask, uint8_t thresh, int8_t flat,
uint8_t *op3, uint8_t *op2, uint8_t *op1,
uint8_t *op0, uint8_t *oq0, uint8_t *oq1,
uint8_t *oq2, uint8_t *oq3) {
@@ -229,10 +239,15 @@ static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
void aom_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
@@ -256,8 +271,13 @@ void aom_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
void aom_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
const int8_t mask =
@@ -278,8 +298,8 @@ void aom_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
}
#if PARALLEL_DEBLOCKING_11_TAP
-static INLINE void filter12(int8_t mask, uint8_t thresh, uint8_t flat,
- uint8_t flat2, uint8_t *op5, uint8_t *op4,
+static INLINE void filter12(int8_t mask, uint8_t thresh, int8_t flat,
+ int8_t flat2, uint8_t *op5, uint8_t *op4,
uint8_t *op3, uint8_t *op2, uint8_t *op1,
uint8_t *op0, uint8_t *oq0, uint8_t *oq1,
uint8_t *oq2, uint8_t *oq3, uint8_t *oq4,
@@ -308,8 +328,8 @@ static INLINE void filter12(int8_t mask, uint8_t thresh, uint8_t flat,
#endif
#if PARALLEL_DEBLOCKING_9_TAP
-static INLINE void filter10(int8_t mask, uint8_t thresh, uint8_t flat,
- uint8_t flat2, uint8_t *op4, uint8_t *op3,
+static INLINE void filter10(int8_t mask, uint8_t thresh, int8_t flat,
+ int8_t flat2, uint8_t *op4, uint8_t *op3,
uint8_t *op2, uint8_t *op1, uint8_t *op0,
uint8_t *oq0, uint8_t *oq1, uint8_t *oq2,
uint8_t *oq3, uint8_t *oq4) {
@@ -332,8 +352,8 @@ static INLINE void filter10(int8_t mask, uint8_t thresh, uint8_t flat,
}
#endif
-static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat,
- uint8_t flat2, uint8_t *op7, uint8_t *op6,
+static INLINE void filter16(int8_t mask, uint8_t thresh, int8_t flat,
+ int8_t flat2, uint8_t *op7, uint8_t *op6,
uint8_t *op5, uint8_t *op4, uint8_t *op3,
uint8_t *op2, uint8_t *op1, uint8_t *op0,
uint8_t *oq0, uint8_t *oq1, uint8_t *oq2,
@@ -390,10 +410,15 @@ static void mb_lpf_horizontal_edge_w(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int count) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int step = 4;
+#else
+ int step = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8 * count; ++i) {
+ for (i = 0; i < step * count; ++i) {
const uint8_t p7 = s[-8 * p], p6 = s[-7 * p], p5 = s[-6 * p],
p4 = s[-5 * p], p3 = s[-4 * p], p2 = s[-3 * p],
p1 = s[-2 * p], p0 = s[-p];
@@ -436,7 +461,11 @@ void aom_lpf_horizontal_edge_8_c(uint8_t *s, int p, const uint8_t *blimit,
void aom_lpf_horizontal_edge_16_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1);
+#else
mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2);
+#endif
}
static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit,
@@ -478,7 +507,11 @@ static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit,
void aom_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 4);
+#else
mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8);
+#endif
}
void aom_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
@@ -596,10 +629,15 @@ void aom_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh, int bd) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
#if !CONFIG_PARALLEL_DEBLOCKING
const uint16_t p3 = s[-4 * p];
const uint16_t p2 = s[-3 * p];
@@ -636,10 +674,15 @@ void aom_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
#if !CONFIG_PARALLEL_DEBLOCKING
const uint16_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
@@ -665,7 +708,7 @@ void aom_highbd_lpf_vertical_4_dual_c(
bd);
}
-static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat,
+static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, int8_t flat,
uint16_t *op3, uint16_t *op2, uint16_t *op1,
uint16_t *op0, uint16_t *oq0, uint16_t *oq1,
uint16_t *oq2, uint16_t *oq3, int bd) {
@@ -689,10 +732,15 @@ void aom_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
const uint16_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
const uint16_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
@@ -718,8 +766,13 @@ void aom_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int count = 4;
+#else
+ int count = 8;
+#endif
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < count; ++i) {
const uint16_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
const int8_t mask =
@@ -741,8 +794,8 @@ void aom_highbd_lpf_vertical_8_dual_c(
bd);
}
-static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, uint8_t flat,
- uint8_t flat2, uint16_t *op7, uint16_t *op6,
+static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, int8_t flat,
+ int8_t flat2, uint16_t *op7, uint16_t *op6,
uint16_t *op5, uint16_t *op4, uint16_t *op3,
uint16_t *op2, uint16_t *op1, uint16_t *op0,
uint16_t *oq0, uint16_t *oq1, uint16_t *oq2,
@@ -813,10 +866,15 @@ static void highbd_mb_lpf_horizontal_edge_w(uint16_t *s, int p,
const uint8_t *thresh, int count,
int bd) {
int i;
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ int step = 4;
+#else
+ int step = 8;
+#endif
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
- for (i = 0; i < 8 * count; ++i) {
+ for (i = 0; i < step * count; ++i) {
const uint16_t p3 = s[-4 * p];
const uint16_t p2 = s[-3 * p];
const uint16_t p1 = s[-2 * p];
@@ -852,7 +910,11 @@ void aom_highbd_lpf_horizontal_edge_16_c(uint16_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1, bd);
+#else
highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2, bd);
+#endif
}
static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p,
@@ -888,13 +950,21 @@ static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p,
void aom_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 4, bd);
+#else
highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8, bd);
+#endif
}
void aom_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
+#if CONFIG_PARALLEL_DEBLOCKING && CONFIG_CB4X4
+ highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8, bd);
+#else
highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
+#endif
}
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/mips/avg_msa.c b/third_party/aom/aom_dsp/mips/avg_msa.c
deleted file mode 100644
index 0e17281553..0000000000
--- a/third_party/aom/aom_dsp/mips/avg_msa.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/macros_msa.h"
-
-uint32_t aom_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
- uint32_t sum_out;
- v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
- v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
- v4u32 sum = { 0 };
-
- LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
- HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3);
- HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7);
- ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6);
- ADD2(sum0, sum2, sum4, sum6, sum0, sum4);
- sum0 += sum4;
-
- sum = __msa_hadd_u_w(sum0, sum0);
- sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum);
- sum = __msa_hadd_u_w(sum0, sum0);
- sum = (v4u32)__msa_srari_w((v4i32)sum, 6);
- sum_out = __msa_copy_u_w((v4i32)sum, 0);
-
- return sum_out;
-}
-
-uint32_t aom_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
- uint32_t sum_out;
- uint32_t src0, src1, src2, src3;
- v16u8 vec = { 0 };
- v8u16 sum0;
- v4u32 sum1;
- v2u64 sum2;
-
- LW4(src, src_stride, src0, src1, src2, src3);
- INSERT_W4_UB(src0, src1, src2, src3, vec);
-
- sum0 = __msa_hadd_u_h(vec, vec);
- sum1 = __msa_hadd_u_w(sum0, sum0);
- sum0 = (v8u16)__msa_pckev_h((v8i16)sum1, (v8i16)sum1);
- sum1 = __msa_hadd_u_w(sum0, sum0);
- sum2 = __msa_hadd_u_d(sum1, sum1);
- sum1 = (v4u32)__msa_srari_w((v4i32)sum2, 4);
- sum_out = __msa_copy_u_w((v4i32)sum1, 0);
-
- return sum_out;
-}
diff --git a/third_party/aom/aom_dsp/prob.c b/third_party/aom/aom_dsp/prob.c
index c60bfdac5b..eefe7521f0 100644
--- a/third_party/aom/aom_dsp/prob.c
+++ b/third_party/aom/aom_dsp/prob.c
@@ -11,25 +11,10 @@
#include "./aom_config.h"
-#if CONFIG_EC_MULTISYMBOL
#include <string.h>
-#endif
#include "aom_dsp/prob.h"
-const uint8_t aom_norm[256] = {
- 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
static unsigned int tree_merge_probs_impl(unsigned int i,
const aom_tree_index *tree,
const aom_prob *pre_probs,
@@ -53,7 +38,6 @@ void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs,
tree_merge_probs_impl(0, tree, pre_probs, counts, probs);
}
-#if CONFIG_EC_MULTISYMBOL
typedef struct tree_node tree_node;
struct tree_node {
@@ -233,4 +217,3 @@ void av1_indices_from_tree(int *ind, int *inv, const aom_tree_index *tree) {
int stack_index = 0;
tree_to_index(&stack_index, ind, inv, tree, 0, 0);
}
-#endif
diff --git a/third_party/aom/aom_dsp/prob.h b/third_party/aom/aom_dsp/prob.h
index 8085929238..ec6654ab7f 100644
--- a/third_party/aom/aom_dsp/prob.h
+++ b/third_party/aom/aom_dsp/prob.h
@@ -20,7 +20,7 @@
#include "aom_ports/bitops.h"
#include "aom_ports/mem.h"
-#if CONFIG_DAALA_EC
+#if !CONFIG_ANS
#include "aom_dsp/entcode.h"
#endif
@@ -33,14 +33,12 @@ typedef uint8_t aom_prob;
// TODO(negge): Rename this aom_prob once we remove vpxbool.
typedef uint16_t aom_cdf_prob;
-#if CONFIG_EC_MULTISYMBOL
#define CDF_SIZE(x) ((x) + 1)
-#endif
#define CDF_PROB_BITS 15
#define CDF_PROB_TOP (1 << CDF_PROB_BITS)
-#if CONFIG_DAALA_EC
+#if !CONFIG_ANS
#define AOM_ICDF OD_ICDF
#else
#define AOM_ICDF(x) (x)
@@ -117,7 +115,6 @@ static INLINE aom_prob mode_mv_merge_probs(aom_prob pre_prob,
void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs,
const unsigned int *counts, aom_prob *probs);
-#if CONFIG_EC_MULTISYMBOL
int tree_to_cdf(const aom_tree_index *tree, const aom_prob *probs,
aom_tree_index root, aom_cdf_prob *cdf, aom_tree_index *ind,
int *pth, int *len);
@@ -150,9 +147,6 @@ static INLINE void av1_tree_to_cdf(const aom_tree_index *tree,
} while (0)
void av1_indices_from_tree(int *ind, int *inv, const aom_tree_index *tree);
-#endif
-
-DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]);
#if CONFIG_EC_ADAPT
static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
@@ -165,7 +159,7 @@ static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
tmp = AOM_ICDF(tmp0);
diff = ((CDF_PROB_TOP - (nsymbs << rate2)) >> rate) << rate;
// Single loop (faster)
-#if CONFIG_DAALA_EC && CONFIG_EC_SMALLMUL
+#if !CONFIG_ANS && CONFIG_EC_SMALLMUL
for (i = 0; i < nsymbs - 1; ++i, tmp -= tmp0) {
tmp -= (i == val ? diff : 0);
cdf[i] += ((tmp - cdf[i]) >> rate);
diff --git a/third_party/aom/aom_dsp/sad.c b/third_party/aom/aom_dsp/sad.c
index 3e10705195..2cc172ba57 100644
--- a/third_party/aom/aom_dsp/sad.c
+++ b/third_party/aom/aom_dsp/sad.c
@@ -16,6 +16,7 @@
#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
+#include "aom_dsp/blend.h"
/* Sum the difference between every corresponding element of the buffers. */
static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b,
@@ -311,15 +312,20 @@ highbd_sadMxNx4D(4, 4)
#if CONFIG_AV1 && CONFIG_EXT_INTER
static INLINE
- unsigned int masked_sad(const uint8_t *a, int a_stride, const uint8_t *b,
+ unsigned int masked_sad(const uint8_t *src, int src_stride,
+ const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m, int m_stride,
int width, int height) {
int y, x;
unsigned int sad = 0;
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) sad += m[x] * abs(a[x] - b[x]);
+ for (x = 0; x < width; x++) {
+ const uint8_t pred = AOM_BLEND_A64(m[x], a[x], b[x]);
+ sad += abs(pred - src[x]);
+ }
+ src += src_stride;
a += a_stride;
b += b_stride;
m += m_stride;
@@ -329,12 +335,17 @@ highbd_sadMxNx4D(4, 4)
return sad;
}
-#define MASKSADMxN(m, n) \
- unsigned int aom_masked_sad##m##x##n##_c( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad(src, src_stride, ref, ref_stride, msk, msk_stride, m, \
- n); \
+#define MASKSADMxN(m, n) \
+ unsigned int aom_masked_sad##m##x##n##_c( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad(src, src_stride, ref, ref_stride, second_pred, m, msk, \
+ msk_stride, m, n); \
+ else \
+ return masked_sad(src, src_stride, second_pred, m, ref, ref_stride, msk, \
+ msk_stride, m, n); \
}
/* clang-format off */
@@ -360,18 +371,24 @@ MASKSADMxN(4, 4)
#if CONFIG_HIGHBITDEPTH
static INLINE
- unsigned int highbd_masked_sad(const uint8_t *a8, int a_stride,
+ unsigned int highbd_masked_sad(const uint8_t *src8, int src_stride,
+ const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
const uint8_t *m, int m_stride, int width,
int height) {
int y, x;
unsigned int sad = 0;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) sad += m[x] * abs(a[x] - b[x]);
+ for (x = 0; x < width; x++) {
+ const uint16_t pred = AOM_BLEND_A64(m[x], a[x], b[x]);
+ sad += abs(pred - src[x]);
+ }
+ src += src_stride;
a += a_stride;
b += b_stride;
m += m_stride;
@@ -381,12 +398,17 @@ MASKSADMxN(4, 4)
return sad;
}
-#define HIGHBD_MASKSADMXN(m, n) \
- unsigned int aom_highbd_masked_sad##m##x##n##_c( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return highbd_masked_sad(src, src_stride, ref, ref_stride, msk, \
- msk_stride, m, n); \
+#define HIGHBD_MASKSADMXN(m, n) \
+ unsigned int aom_highbd_masked_sad##m##x##n##_c( \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
+ int msk_stride, int invert_mask) { \
+ if (!invert_mask) \
+ return highbd_masked_sad(src8, src_stride, ref8, ref_stride, \
+ second_pred8, m, msk, msk_stride, m, n); \
+ else \
+ return highbd_masked_sad(src8, src_stride, second_pred8, m, ref8, \
+ ref_stride, msk, msk_stride, m, n); \
}
#if CONFIG_EXT_PARTITION
diff --git a/third_party/aom/aom_dsp/simd/v64_intrinsics.h b/third_party/aom/aom_dsp/simd/v64_intrinsics.h
index ee2b683a44..5c0042d8c6 100644
--- a/third_party/aom/aom_dsp/simd/v64_intrinsics.h
+++ b/third_party/aom/aom_dsp/simd/v64_intrinsics.h
@@ -60,7 +60,9 @@ SIMD_INLINE void v64_store_aligned(void *p, v64 a) {
c_v64_store_aligned(p, a);
}
-SIMD_INLINE v64 v64_align(v64 a, v64 b, c) { return c_v64_align(a, b, c); }
+SIMD_INLINE v64 v64_align(v64 a, v64 b, unsigned int c) {
+ return c_v64_align(a, b, c);
+}
SIMD_INLINE v64 v64_zero() { return c_v64_zero(); }
SIMD_INLINE v64 v64_dup_8(uint8_t x) { return c_v64_dup_8(x); }
diff --git a/third_party/aom/aom_dsp/variance.c b/third_party/aom/aom_dsp/variance.c
index 9fc0db783f..79677c92f9 100644
--- a/third_party/aom/aom_dsp/variance.c
+++ b/third_party/aom/aom_dsp/variance.c
@@ -18,6 +18,7 @@
#include "aom_dsp/variance.h"
#include "aom_dsp/aom_filter.h"
+#include "aom_dsp/blend.h"
uint32_t aom_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride) {
@@ -672,297 +673,215 @@ void aom_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_AV1 && CONFIG_EXT_INTER
-void masked_variance(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, const uint8_t *m, int m_stride, int w, int h,
- unsigned int *sse, int *sum) {
+void aom_comp_mask_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, const uint8_t *ref, int ref_stride,
+ const uint8_t *mask, int mask_stride,
+ int invert_mask) {
int i, j;
- int64_t sum64 = 0;
- uint64_t sse64 = 0;
-
- for (i = 0; i < h; i++) {
- for (j = 0; j < w; j++) {
- const int diff = (a[j] - b[j]) * (m[j]);
- sum64 += diff;
- sse64 += diff * diff;
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ if (!invert_mask)
+ comp_pred[j] = AOM_BLEND_A64(mask[j], ref[j], pred[j]);
+ else
+ comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], ref[j]);
}
-
- a += a_stride;
- b += b_stride;
- m += m_stride;
+ comp_pred += width;
+ pred += width;
+ ref += ref_stride;
+ mask += mask_stride;
}
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
- *sum = (int)ROUND_POWER_OF_TWO(sum64, 6);
- *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 12);
}
-#define MASK_VAR(W, H) \
- unsigned int aom_masked_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- int sum; \
- masked_variance(a, a_stride, b, b_stride, m, m_stride, W, H, sse, &sum); \
- return *sse - (unsigned int)(((int64_t)sum * sum) / (W * H)); \
+void aom_comp_mask_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
+ int width, int height, const uint8_t *ref,
+ int ref_stride, const uint8_t *mask,
+ int mask_stride, int invert_mask) {
+ int i, j;
+ int stride = ref_stride << 3;
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ if (!invert_mask)
+ comp_pred[j] = AOM_BLEND_A64(mask[j], ref[(j << 3)], pred[j]);
+ else
+ comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], ref[(j << 3)]);
+ }
+ comp_pred += width;
+ pred += width;
+ ref += stride;
+ mask += mask_stride;
}
+}
#define MASK_SUBPIX_VAR(W, H) \
unsigned int aom_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint8_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
bilinear_filters_2t[xoffset]); \
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
- return aom_masked_variance##W##x##H##_c(temp2, W, dst, dst_stride, msk, \
- msk_stride, sse); \
+ aom_comp_mask_pred_c(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
+ invert_mask); \
+ return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
}
-MASK_VAR(4, 4)
MASK_SUBPIX_VAR(4, 4)
-
-MASK_VAR(4, 8)
MASK_SUBPIX_VAR(4, 8)
-
-MASK_VAR(8, 4)
MASK_SUBPIX_VAR(8, 4)
-
-MASK_VAR(8, 8)
MASK_SUBPIX_VAR(8, 8)
-
-MASK_VAR(8, 16)
MASK_SUBPIX_VAR(8, 16)
-
-MASK_VAR(16, 8)
MASK_SUBPIX_VAR(16, 8)
-
-MASK_VAR(16, 16)
MASK_SUBPIX_VAR(16, 16)
-
-MASK_VAR(16, 32)
MASK_SUBPIX_VAR(16, 32)
-
-MASK_VAR(32, 16)
MASK_SUBPIX_VAR(32, 16)
-
-MASK_VAR(32, 32)
MASK_SUBPIX_VAR(32, 32)
-
-MASK_VAR(32, 64)
MASK_SUBPIX_VAR(32, 64)
-
-MASK_VAR(64, 32)
MASK_SUBPIX_VAR(64, 32)
-
-MASK_VAR(64, 64)
MASK_SUBPIX_VAR(64, 64)
-
#if CONFIG_EXT_PARTITION
-MASK_VAR(64, 128)
MASK_SUBPIX_VAR(64, 128)
-
-MASK_VAR(128, 64)
MASK_SUBPIX_VAR(128, 64)
-
-MASK_VAR(128, 128)
MASK_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
#if CONFIG_HIGHBITDEPTH
-void highbd_masked_variance64(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride, const uint8_t *m,
- int m_stride, int w, int h, uint64_t *sse,
- int64_t *sum) {
+void aom_highbd_comp_mask_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
+ int width, int height, const uint8_t *ref8,
+ int ref_stride, const uint8_t *mask,
+ int mask_stride, int invert_mask) {
int i, j;
- uint16_t *a = CONVERT_TO_SHORTPTR(a8);
- uint16_t *b = CONVERT_TO_SHORTPTR(b8);
-
- *sum = 0;
- *sse = 0;
-
- for (i = 0; i < h; i++) {
- for (j = 0; j < w; j++) {
- const int diff = (a[j] - b[j]) * (m[j]);
- *sum += (int64_t)diff;
- *sse += (int64_t)diff * diff;
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ if (!invert_mask)
+ comp_pred[j] = AOM_BLEND_A64(mask[j], ref[j], pred[j]);
+ else
+ comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], ref[j]);
}
-
- a += a_stride;
- b += b_stride;
- m += m_stride;
+ comp_pred += width;
+ pred += width;
+ ref += ref_stride;
+ mask += mask_stride;
}
- *sum = (*sum >= 0) ? *sum : -*sum;
- *sum = ROUND_POWER_OF_TWO(*sum, 6);
- *sse = ROUND_POWER_OF_TWO(*sse, 12);
}
-void highbd_masked_variance(const uint8_t *a8, int a_stride, const uint8_t *b8,
- int b_stride, const uint8_t *m, int m_stride, int w,
- int h, unsigned int *sse, int *sum) {
- int64_t sum64;
- uint64_t sse64;
- highbd_masked_variance64(a8, a_stride, b8, b_stride, m, m_stride, w, h,
- &sse64, &sum64);
- *sum = (int)sum64;
- *sse = (unsigned int)sse64;
-}
-
-void highbd_10_masked_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- const uint8_t *m, int m_stride, int w, int h,
- unsigned int *sse, int *sum) {
- int64_t sum64;
- uint64_t sse64;
- highbd_masked_variance64(a8, a_stride, b8, b_stride, m, m_stride, w, h,
- &sse64, &sum64);
- *sum = (int)ROUND_POWER_OF_TWO(sum64, 2);
- *sse = (unsigned int)ROUND_POWER_OF_TWO(sse64, 4);
-}
-
-void highbd_12_masked_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- const uint8_t *m, int m_stride, int w, int h,
- unsigned int *sse, int *sum) {
- int64_t sum64;
- uint64_t sse64;
- highbd_masked_variance64(a8, a_stride, b8, b_stride, m, m_stride, w, h,
- &sse64, &sum64);
- *sum = (int)ROUND_POWER_OF_TWO(sum64, 4);
- *sse = (unsigned int)ROUND_POWER_OF_TWO(sse64, 8);
-}
+void aom_highbd_comp_mask_upsampled_pred_c(uint16_t *comp_pred,
+ const uint8_t *pred8, int width,
+ int height, const uint8_t *ref8,
+ int ref_stride, const uint8_t *mask,
+ int mask_stride, int invert_mask) {
+ int i, j;
+ int stride = ref_stride << 3;
-#define HIGHBD_MASK_VAR(W, H) \
- unsigned int aom_highbd_masked_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- int sum; \
- highbd_masked_variance(a, a_stride, b, b_stride, m, m_stride, W, H, sse, \
- &sum); \
- return *sse - (unsigned int)(((int64_t)sum * sum) / (W * H)); \
- } \
- \
- unsigned int aom_highbd_10_masked_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- int sum; \
- int64_t var; \
- highbd_10_masked_variance(a, a_stride, b, b_stride, m, m_stride, W, H, \
- sse, &sum); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
- return (var >= 0) ? (uint32_t)var : 0; \
- } \
- \
- unsigned int aom_highbd_12_masked_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- int sum; \
- int64_t var; \
- highbd_12_masked_variance(a, a_stride, b, b_stride, m, m_stride, W, H, \
- sse, &sum); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
- return (var >= 0) ? (uint32_t)var : 0; \
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ if (!invert_mask)
+ comp_pred[j] = AOM_BLEND_A64(mask[j], ref[j << 3], pred[j]);
+ else
+ comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], ref[j << 3]);
+ }
+ comp_pred += width;
+ pred += width;
+ ref += stride;
+ mask += mask_stride;
}
+}
-#define HIGHBD_MASK_SUBPIX_VAR(W, H) \
- unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- \
- aom_highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- aom_highbd_var_filter_block2d_bil_second_pass( \
- fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
- \
- return aom_highbd_masked_variance##W##x##H##_c( \
- CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
- } \
- \
- unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- \
- aom_highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- aom_highbd_var_filter_block2d_bil_second_pass( \
- fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
- \
- return aom_highbd_10_masked_variance##W##x##H##_c( \
- CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
- } \
- \
- unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- \
- aom_highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
- aom_highbd_var_filter_block2d_bil_second_pass( \
- fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
- \
- return aom_highbd_12_masked_variance##W##x##H##_c( \
- CONVERT_TO_BYTEPTR(temp2), W, dst, dst_stride, msk, msk_stride, sse); \
+#define HIGHBD_MASK_SUBPIX_VAR(W, H) \
+ unsigned int aom_highbd_8_masked_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
+ \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
+ src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
+ fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
+ \
+ aom_highbd_comp_mask_pred_c(temp3, second_pred, W, H, \
+ CONVERT_TO_BYTEPTR(temp2), W, msk, msk_stride, \
+ invert_mask); \
+ \
+ return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ ref, ref_stride, sse); \
+ } \
+ \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
+ \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
+ src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
+ fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
+ \
+ aom_highbd_comp_mask_pred_c(temp3, second_pred, W, H, \
+ CONVERT_TO_BYTEPTR(temp2), W, msk, msk_stride, \
+ invert_mask); \
+ \
+ return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ ref, ref_stride, sse); \
+ } \
+ \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
+ \
+ aom_highbd_var_filter_block2d_bil_first_pass( \
+ src, fdata3, src_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
+ aom_highbd_var_filter_block2d_bil_second_pass( \
+ fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
+ \
+ aom_highbd_comp_mask_pred_c(temp3, second_pred, W, H, \
+ CONVERT_TO_BYTEPTR(temp2), W, msk, msk_stride, \
+ invert_mask); \
+ \
+ return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ ref, ref_stride, sse); \
}
-HIGHBD_MASK_VAR(4, 4)
HIGHBD_MASK_SUBPIX_VAR(4, 4)
-
-HIGHBD_MASK_VAR(4, 8)
HIGHBD_MASK_SUBPIX_VAR(4, 8)
-
-HIGHBD_MASK_VAR(8, 4)
HIGHBD_MASK_SUBPIX_VAR(8, 4)
-
-HIGHBD_MASK_VAR(8, 8)
HIGHBD_MASK_SUBPIX_VAR(8, 8)
-
-HIGHBD_MASK_VAR(8, 16)
HIGHBD_MASK_SUBPIX_VAR(8, 16)
-
-HIGHBD_MASK_VAR(16, 8)
HIGHBD_MASK_SUBPIX_VAR(16, 8)
-
-HIGHBD_MASK_VAR(16, 16)
HIGHBD_MASK_SUBPIX_VAR(16, 16)
-
-HIGHBD_MASK_VAR(16, 32)
HIGHBD_MASK_SUBPIX_VAR(16, 32)
-
-HIGHBD_MASK_VAR(32, 16)
HIGHBD_MASK_SUBPIX_VAR(32, 16)
-
-HIGHBD_MASK_VAR(32, 32)
HIGHBD_MASK_SUBPIX_VAR(32, 32)
-
-HIGHBD_MASK_VAR(32, 64)
HIGHBD_MASK_SUBPIX_VAR(32, 64)
-
-HIGHBD_MASK_VAR(64, 32)
HIGHBD_MASK_SUBPIX_VAR(64, 32)
-
-HIGHBD_MASK_VAR(64, 64)
HIGHBD_MASK_SUBPIX_VAR(64, 64)
-
#if CONFIG_EXT_PARTITION
-HIGHBD_MASK_VAR(64, 128)
HIGHBD_MASK_SUBPIX_VAR(64, 128)
-
-HIGHBD_MASK_VAR(128, 64)
HIGHBD_MASK_SUBPIX_VAR(128, 64)
-
-HIGHBD_MASK_VAR(128, 128)
HIGHBD_MASK_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/variance.h b/third_party/aom/aom_dsp/variance.h
index 7c925cfac0..20f0895cbe 100644
--- a/third_party/aom/aom_dsp/variance.h
+++ b/third_party/aom/aom_dsp/variance.h
@@ -57,15 +57,13 @@ typedef unsigned int (*aom_subp_avg_variance_fn_t)(
#if CONFIG_AV1 && CONFIG_EXT_INTER
typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
- const uint8_t *msk_ptr,
- int msk_stride);
-typedef unsigned int (*aom_masked_variance_fn_t)(
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
- const uint8_t *msk, int msk_stride, unsigned int *sse);
+ const uint8_t *second_pred,
+ const uint8_t *msk, int msk_stride,
+ int invert_mask);
typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *ref, int ref_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse);
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
+ const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse);
#endif // CONFIG_AV1 && CONFIG_EXT_INTER
#if CONFIG_AV1 && CONFIG_MOTION_VAR
@@ -94,7 +92,6 @@ typedef struct aom_variance_vtable {
aom_sad_multi_d_fn_t sdx4df;
#if CONFIG_EXT_INTER
aom_masked_sad_fn_t msdf;
- aom_masked_variance_fn_t mvf;
aom_masked_subpixvariance_fn_t msvf;
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
diff --git a/third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c b/third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c
new file mode 100644
index 0000000000..14352895d8
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h>
+#include <assert.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
+
+void aom_convolve8_add_src_hip_sse2(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ const int bd = 8;
+ assert(x_step_q4 == 16 && y_step_q4 == 16);
+ assert(!(w & 7));
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ uint16_t temp[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE];
+ int intermediate_height = h + SUBPEL_TAPS - 1;
+ int i, j;
+ const int center_tap = ((SUBPEL_TAPS - 1) / 2);
+ const uint8_t *const src_ptr = src - center_tap * src_stride - center_tap;
+
+ const __m128i zero = _mm_setzero_si128();
+ // Add an offset to account for the "add_src" part of the convolve function.
+ const __m128i offset = _mm_insert_epi16(zero, 1 << FILTER_BITS, 3);
+
+ /* Horizontal filter */
+ {
+ const __m128i coeffs_x =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_x), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS - EXTRAPREC_BITS - 1)) +
+ (1 << (bd + FILTER_BITS - 1)));
+
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i data =
+ _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
+
+ // Filter even-index pixels
+ const __m128i src_0 = _mm_unpacklo_epi8(data, zero);
+ const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
+ const __m128i src_2 = _mm_unpacklo_epi8(_mm_srli_si128(data, 2), zero);
+ const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
+ const __m128i src_4 = _mm_unpacklo_epi8(_mm_srli_si128(data, 4), zero);
+ const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
+ const __m128i src_6 = _mm_unpacklo_epi8(_mm_srli_si128(data, 6), zero);
+ const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
+
+ __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
+ _mm_add_epi32(res_2, res_6));
+ res_even = _mm_srai_epi32(_mm_add_epi32(res_even, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Filter odd-index pixels
+ const __m128i src_1 = _mm_unpacklo_epi8(_mm_srli_si128(data, 1), zero);
+ const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
+ const __m128i src_3 = _mm_unpacklo_epi8(_mm_srli_si128(data, 3), zero);
+ const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
+ const __m128i src_5 = _mm_unpacklo_epi8(_mm_srli_si128(data, 5), zero);
+ const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
+ const __m128i src_7 = _mm_unpacklo_epi8(_mm_srli_si128(data, 7), zero);
+ const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
+
+ __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
+ _mm_add_epi32(res_3, res_7));
+ res_odd = _mm_srai_epi32(_mm_add_epi32(res_odd, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7
+ __m128i res = _mm_packs_epi32(res_even, res_odd);
+ res = _mm_min_epi16(_mm_max_epi16(res, zero),
+ _mm_set1_epi16(EXTRAPREC_CLAMP_LIMIT(bd) - 1));
+ _mm_storeu_si128((__m128i *)&temp[i * MAX_SB_SIZE + j], res);
+ }
+ }
+ }
+
+ /* Vertical filter */
+ {
+ const __m128i coeffs_y =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_y), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS + EXTRAPREC_BITS - 1)) -
+ (1 << (bd + FILTER_BITS + EXTRAPREC_BITS - 1)));
+
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ // Filter even-index pixels
+ const uint16_t *data = &temp[i * MAX_SB_SIZE + j];
+ const __m128i src_0 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_2 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_4 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_6 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
+ const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
+ const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
+ const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
+
+ const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+ _mm_add_epi32(res_4, res_6));
+
+ // Filter odd-index pixels
+ const __m128i src_1 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_3 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_5 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_7 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
+ const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
+ const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
+ const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
+
+ const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+ _mm_add_epi32(res_5, res_7));
+
+ // Rearrange pixels back into the order 0 ... 7
+ const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+ const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+
+ const __m128i res_lo_round = _mm_srai_epi32(
+ _mm_add_epi32(res_lo, round_const), FILTER_BITS + EXTRAPREC_BITS);
+ const __m128i res_hi_round = _mm_srai_epi32(
+ _mm_add_epi32(res_hi, round_const), FILTER_BITS + EXTRAPREC_BITS);
+
+ const __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
+ __m128i res_8bit = _mm_packus_epi16(res_16bit, res_16bit);
+
+ __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
+ _mm_storel_epi64(p, res_8bit);
+ }
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c b/third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c
new file mode 100644
index 0000000000..74ce80e50b
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <tmmintrin.h>
+#include <assert.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
+
+#if EXTRAPREC_BITS > 2
+#error "Highbd high-prec convolve filter only supports EXTRAPREC_BITS <= 2"
+#error "(need to use 32-bit intermediates for EXTRAPREC_BITS > 2)"
+#endif
+
+void aom_highbd_convolve8_add_src_hip_ssse3(
+ const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8,
+ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {
+ assert(x_step_q4 == 16 && y_step_q4 == 16);
+ assert(!(w & 7));
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ const uint16_t *const src = CONVERT_TO_SHORTPTR(src8);
+ uint16_t *const dst = CONVERT_TO_SHORTPTR(dst8);
+
+ uint16_t temp[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE];
+ int intermediate_height = h + SUBPEL_TAPS - 1;
+ int i, j;
+ const int center_tap = ((SUBPEL_TAPS - 1) / 2);
+ const uint16_t *const src_ptr = src - center_tap * src_stride - center_tap;
+
+ const __m128i zero = _mm_setzero_si128();
+ // Add an offset to account for the "add_src" part of the convolve function.
+ const __m128i offset = _mm_insert_epi16(zero, 1 << FILTER_BITS, 3);
+
+ /* Horizontal filter */
+ {
+ const __m128i coeffs_x =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_x), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS - EXTRAPREC_BITS - 1)) +
+ (1 << (bd + FILTER_BITS - 1)));
+
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i data =
+ _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
+ const __m128i data2 =
+ _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j + 8]);
+
+ // Filter even-index pixels
+ const __m128i res_0 = _mm_madd_epi16(data, coeff_01);
+ const __m128i res_2 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 4), coeff_23);
+ const __m128i res_4 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 8), coeff_45);
+ const __m128i res_6 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 12), coeff_67);
+
+ __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
+ _mm_add_epi32(res_2, res_6));
+ res_even = _mm_srai_epi32(_mm_add_epi32(res_even, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Filter odd-index pixels
+ const __m128i res_1 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 2), coeff_01);
+ const __m128i res_3 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 6), coeff_23);
+ const __m128i res_5 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 10), coeff_45);
+ const __m128i res_7 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 14), coeff_67);
+
+ __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
+ _mm_add_epi32(res_3, res_7));
+ res_odd = _mm_srai_epi32(_mm_add_epi32(res_odd, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7
+ const __m128i maxval = _mm_set1_epi16((EXTRAPREC_CLAMP_LIMIT(bd)) - 1);
+ __m128i res = _mm_packs_epi32(res_even, res_odd);
+ res = _mm_min_epi16(_mm_max_epi16(res, zero), maxval);
+ _mm_storeu_si128((__m128i *)&temp[i * MAX_SB_SIZE + j], res);
+ }
+ }
+ }
+
+ /* Vertical filter */
+ {
+ const __m128i coeffs_y =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_y), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS + EXTRAPREC_BITS - 1)) -
+ (1 << (bd + FILTER_BITS + EXTRAPREC_BITS - 1)));
+
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ // Filter even-index pixels
+ const uint16_t *data = &temp[i * MAX_SB_SIZE + j];
+ const __m128i src_0 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_2 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_4 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_6 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
+ const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
+ const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
+ const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
+
+ const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+ _mm_add_epi32(res_4, res_6));
+
+ // Filter odd-index pixels
+ const __m128i src_1 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_3 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_5 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_7 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
+ const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
+ const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
+ const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
+
+ const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+ _mm_add_epi32(res_5, res_7));
+
+ // Rearrange pixels back into the order 0 ... 7
+ const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+ const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+
+ const __m128i res_lo_round = _mm_srai_epi32(
+ _mm_add_epi32(res_lo, round_const), FILTER_BITS + EXTRAPREC_BITS);
+ const __m128i res_hi_round = _mm_srai_epi32(
+ _mm_add_epi32(res_hi, round_const), FILTER_BITS + EXTRAPREC_BITS);
+
+ const __m128i maxval = _mm_set1_epi16((1 << bd) - 1);
+ __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
+ res_16bit = _mm_min_epi16(_mm_max_epi16(res_16bit, zero), maxval);
+
+ __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
+ _mm_storeu_si128(p, res_16bit);
+ }
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c b/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
index bcdc20f638..1a6457402c 100644
--- a/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
+++ b/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
@@ -94,52 +94,6 @@ void aom_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
*min = _mm_extract_epi16(minabsdiff, 0);
}
-unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
- __m128i s0, s1, u0;
- unsigned int avg = 0;
- u0 = _mm_setzero_si128();
- s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
-
- s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 8));
- s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 32));
- s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
- avg = _mm_extract_epi16(s0, 0);
- return (avg + 32) >> 6;
-}
-
-unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) {
- __m128i s0, s1, u0;
- unsigned int avg = 0;
-
- u0 = _mm_setzero_si128();
- s0 = _mm_unpacklo_epi8(xx_loadl_32(s), u0);
- s1 = _mm_unpacklo_epi8(xx_loadl_32(s + p), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(xx_loadl_32(s + 2 * p), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(xx_loadl_32(s + 3 * p), u0);
- s0 = _mm_adds_epu16(s0, s1);
-
- s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 4));
- s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
- avg = _mm_extract_epi16(s0, 0);
- return (avg + 8) >> 4;
-}
-
static void hadamard_col8_sse2(__m128i *in, int iter) {
__m128i a0 = in[0];
__m128i a1 = in[1];
diff --git a/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c b/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c
index 7d96e26ae4..133640eb77 100644
--- a/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c
@@ -14,30 +14,6 @@
#include "./aom_dsp_rtcd.h"
#include "aom_dsp/x86/convolve.h"
-#define CONV8_ROUNDING_BITS (7)
-
-static const uint8_t signal_pattern_0[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
- 7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
- 4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
-
-static const uint8_t signal_pattern_1[32] = { 4, 5, 6, 7, 6, 7, 8, 9,
- 8, 9, 10, 11, 10, 11, 12, 13,
- 4, 5, 6, 7, 6, 7, 8, 9,
- 8, 9, 10, 11, 10, 11, 12, 13 };
-
-static const uint8_t signal_pattern_2[32] = { 6, 7, 8, 9, 8, 9, 10, 11,
- 10, 11, 12, 13, 12, 13, 14, 15,
- 6, 7, 8, 9, 8, 9, 10, 11,
- 10, 11, 12, 13, 12, 13, 14, 15 };
-
-static const uint32_t signal_index[8] = { 2, 3, 4, 5, 2, 3, 4, 5 };
-
-typedef enum { PACK_8x1, PACK_8x2, PACK_16x1 } PixelPackFormat;
-
-typedef void (*WritePixels)(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch);
-
// -----------------------------------------------------------------------------
// Copy and average
@@ -217,6 +193,27 @@ void aom_highbd_convolve_avg_avx2(const uint8_t *src8, ptrdiff_t src_stride,
}
// -----------------------------------------------------------------------------
+// Horizontal and vertical filtering
+
+#define CONV8_ROUNDING_BITS (7)
+
+static const uint8_t signal_pattern_0[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
+ 7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
+ 4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
+
+static const uint8_t signal_pattern_1[32] = { 4, 5, 6, 7, 6, 7, 8, 9,
+ 8, 9, 10, 11, 10, 11, 12, 13,
+ 4, 5, 6, 7, 6, 7, 8, 9,
+ 8, 9, 10, 11, 10, 11, 12, 13 };
+
+static const uint8_t signal_pattern_2[32] = { 6, 7, 8, 9, 8, 9, 10, 11,
+ 10, 11, 12, 13, 12, 13, 14, 15,
+ 6, 7, 8, 9, 8, 9, 10, 11,
+ 10, 11, 12, 13, 12, 13, 14, 15 };
+
+static const uint32_t signal_index[8] = { 2, 3, 4, 5, 2, 3, 4, 5 };
+
+// -----------------------------------------------------------------------------
// Horizontal Filtering
static INLINE void pack_pixels(const __m256i *s, __m256i *p /*p[4]*/) {
@@ -248,52 +245,30 @@ static INLINE void pack_16_pixels(const __m256i *s0, const __m256i *s1,
x[7] = _mm256_permute2x128_si256(pp[1], pp[5], 0x31);
}
-static INLINE void pack_pixels_with_format(const uint16_t *src,
- PixelPackFormat fmt,
- ptrdiff_t stride, __m256i *x) {
- switch (fmt) {
- case PACK_8x1: {
- __m256i pp[8];
- __m256i s0;
- s0 = _mm256_loadu_si256((const __m256i *)src);
- pack_pixels(&s0, pp);
- x[0] = _mm256_permute2x128_si256(pp[0], pp[2], 0x30);
- x[1] = _mm256_permute2x128_si256(pp[1], pp[3], 0x30);
- x[2] = _mm256_permute2x128_si256(pp[2], pp[0], 0x30);
- x[3] = _mm256_permute2x128_si256(pp[3], pp[1], 0x30);
- break;
- }
- case PACK_8x2: {
- __m256i s0, s1;
- s0 = _mm256_loadu_si256((const __m256i *)src);
- s1 = _mm256_loadu_si256((const __m256i *)(src + stride));
- pack_16_pixels(&s0, &s1, x);
- break;
- }
- case PACK_16x1: {
- __m256i s0, s1;
- s0 = _mm256_loadu_si256((const __m256i *)src);
- s1 = _mm256_loadu_si256((const __m256i *)(src + 8));
- pack_16_pixels(&s0, &s1, x);
- break;
- }
- default: { assert(0); }
- }
-}
-
-static INLINE void pack_8x1_pixels(const uint16_t *src, const ptrdiff_t pitch,
- __m256i *x /*x[4]*/) {
- pack_pixels_with_format(src, PACK_8x1, pitch, x);
+static INLINE void pack_8x1_pixels(const uint16_t *src, __m256i *x) {
+ __m256i pp[8];
+ __m256i s0;
+ s0 = _mm256_loadu_si256((const __m256i *)src);
+ pack_pixels(&s0, pp);
+ x[0] = _mm256_permute2x128_si256(pp[0], pp[2], 0x30);
+ x[1] = _mm256_permute2x128_si256(pp[1], pp[3], 0x30);
+ x[2] = _mm256_permute2x128_si256(pp[2], pp[0], 0x30);
+ x[3] = _mm256_permute2x128_si256(pp[3], pp[1], 0x30);
}
-static INLINE void pack_8x2_pixels(const uint16_t *src, const ptrdiff_t pitch,
- __m256i *x /*x[8]*/) {
- pack_pixels_with_format(src, PACK_8x2, pitch, x);
+static INLINE void pack_8x2_pixels(const uint16_t *src, ptrdiff_t stride,
+ __m256i *x) {
+ __m256i s0, s1;
+ s0 = _mm256_loadu_si256((const __m256i *)src);
+ s1 = _mm256_loadu_si256((const __m256i *)(src + stride));
+ pack_16_pixels(&s0, &s1, x);
}
-static INLINE void pack_16x1_pixels(const uint16_t *src, const ptrdiff_t pitch,
- __m256i *x /*x[8]*/) {
- pack_pixels_with_format(src, PACK_16x1, pitch, x);
+static INLINE void pack_16x1_pixels(const uint16_t *src, __m256i *x) {
+ __m256i s0, s1;
+ s0 = _mm256_loadu_si256((const __m256i *)src);
+ s1 = _mm256_loadu_si256((const __m256i *)(src + 8));
+ pack_16_pixels(&s0, &s1, x);
}
// Note:
@@ -323,51 +298,49 @@ static INLINE void filter_8x1_pixels(const __m256i *sig /*sig[4]*/,
a0 = _mm256_madd_epi16(fil[1], sig[1]);
a1 = _mm256_madd_epi16(fil[2], sig[2]);
- const __m256i min = _mm256_min_epi32(a0, a1);
- a = _mm256_add_epi32(a, min);
-
- const __m256i max = _mm256_max_epi32(a0, a1);
- a = _mm256_add_epi32(a, max);
-
- const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
- a = _mm256_add_epi32(a, rounding);
- *y = _mm256_srai_epi32(a, CONV8_ROUNDING_BITS);
+ {
+ const __m256i min = _mm256_min_epi32(a0, a1);
+ a = _mm256_add_epi32(a, min);
+ }
+ {
+ const __m256i max = _mm256_max_epi32(a0, a1);
+ a = _mm256_add_epi32(a, max);
+ }
+ {
+ const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
+ a = _mm256_add_epi32(a, rounding);
+ *y = _mm256_srai_epi32(a, CONV8_ROUNDING_BITS);
+ }
}
-static void write_8x1_pixels(const __m256i *y, const __m256i *z,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
+static INLINE void store_8x1_pixels(const __m256i *y, const __m256i *mask,
+ uint16_t *dst) {
const __m128i a0 = _mm256_castsi256_si128(*y);
const __m128i a1 = _mm256_extractf128_si256(*y, 1);
__m128i res = _mm_packus_epi32(a0, a1);
- (void)z;
- (void)pitch;
res = _mm_min_epi16(res, _mm256_castsi256_si128(*mask));
_mm_storeu_si128((__m128i *)dst, res);
}
-static void write_8x2_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
+static INLINE void store_8x2_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst,
+ ptrdiff_t pitch) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
a = _mm256_min_epi16(a, *mask);
_mm_storeu_si128((__m128i *)dst, _mm256_castsi256_si128(a));
_mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
}
-static void write_16x1_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t dst_pitch) {
- (void)dst_pitch;
+static INLINE void store_16x1_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
a = _mm256_min_epi16(a, *mask);
_mm256_storeu_si256((__m256i *)dst, a);
}
-static void filter_block_width8_horiz(
- const uint16_t *src_ptr, ptrdiff_t src_pitch, const WritePixels write_8x1,
- const WritePixels write_8x2, uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d8_h8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -379,32 +352,22 @@ static void filter_block_width8_horiz(
pack_8x2_pixels(src_ptr, src_pitch, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
- write_8x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
} while (height > 1);
if (height > 0) {
- pack_8x1_pixels(src_ptr, src_pitch, signal);
+ pack_8x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
- write_8x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x1_pixels(&res0, &max, dst_ptr);
}
}
-static void aom_highbd_filter_block1d8_h8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+static void aom_highbd_filter_block1d16_h8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_horiz(src, src_pitch, write_8x1_pixels, write_8x2_pixels,
- dst, dst_pitch, height, filter, bd);
-}
-
-static void filter_block_width16_horiz(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- const WritePixels write_16x1,
- uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter,
- int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -413,23 +376,17 @@ static void filter_block_width16_horiz(const uint16_t *src_ptr,
src_ptr -= 3;
do {
- pack_16x1_pixels(src_ptr, src_pitch, signal);
+ pack_16x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_16x1_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
-static void aom_highbd_filter_block1d16_h8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_horiz(src, src_pitch, write_16x1_pixels, dst, dst_pitch,
- height, filter, bd);
-}
-
+// -----------------------------------------------------------------------------
// 2-tap horizontal filtering
static INLINE void pack_2t_filter(const int16_t *filter, __m256i *f) {
@@ -493,16 +450,6 @@ static INLINE void filter_16_2t_pixels(const __m256i *sig, const __m256i *f,
*y1 = _mm256_srai_epi32(x1, CONV8_ROUNDING_BITS);
}
-static INLINE void filter_8x2_2t_pixels(const __m256i *sig, const __m256i *f,
- __m256i *y0, __m256i *y1) {
- filter_16_2t_pixels(sig, f, y0, y1);
-}
-
-static INLINE void filter_16x1_2t_pixels(const __m256i *sig, const __m256i *f,
- __m256i *y0, __m256i *y1) {
- filter_16_2t_pixels(sig, f, y0, y1);
-}
-
static INLINE void filter_8x1_2t_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0) {
const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
@@ -511,10 +458,9 @@ static INLINE void filter_8x1_2t_pixels(const __m256i *sig, const __m256i *f,
*y0 = _mm256_srai_epi32(x0, CONV8_ROUNDING_BITS);
}
-static void filter_block_width8_2t_horiz(
- const uint16_t *src_ptr, ptrdiff_t src_pitch, const WritePixels write_8x1,
- const WritePixels write_8x2, uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d8_h2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -524,8 +470,8 @@ static void filter_block_width8_2t_horiz(
src_ptr -= 3;
do {
pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
- filter_8x2_2t_pixels(signal, &ff, &res0, &res1);
- write_8x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
@@ -534,24 +480,13 @@ static void filter_block_width8_2t_horiz(
if (height > 0) {
pack_8x1_2t_pixels(src_ptr, signal);
filter_8x1_2t_pixels(signal, &ff, &res0);
- write_8x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x1_pixels(&res0, &max, dst_ptr);
}
}
-static void aom_highbd_filter_block1d8_h2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+static void aom_highbd_filter_block1d16_h2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_horiz(src, src_pitch, write_8x1_pixels,
- write_8x2_pixels, dst, dst_pitch, height, filter,
- bd);
-}
-
-static void filter_block_width16_2t_horiz(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- const WritePixels write_16x1,
- uint16_t *dst_ptr,
- ptrdiff_t dst_pitch, uint32_t height,
- const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -561,21 +496,15 @@ static void filter_block_width16_2t_horiz(const uint16_t *src_ptr,
src_ptr -= 3;
do {
pack_16x1_2t_pixels(src_ptr, signal);
- filter_16x1_2t_pixels(signal, &ff, &res0, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_16x1_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
-static void aom_highbd_filter_block1d16_h2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_horiz(src, src_pitch, write_16x1_pixels, dst,
- dst_pitch, height, filter, bd);
-}
-
+// -----------------------------------------------------------------------------
// Vertical Filtering
static void pack_8x9_init(const uint16_t *src, ptrdiff_t pitch, __m256i *sig) {
@@ -638,22 +567,9 @@ static INLINE void update_pixels(__m256i *sig) {
}
}
-static INLINE void write_8x1_pixels_ver(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)pitch;
- const __m128i v0 = _mm256_castsi256_si128(*y0);
- const __m128i v1 = _mm256_castsi256_si128(*y1);
- __m128i p = _mm_packus_epi32(v0, v1);
- p = _mm_min_epi16(p, _mm256_castsi256_si128(*mask));
- _mm_storeu_si128((__m128i *)dst, p);
-}
-
-static void filter_block_width8_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch, WritePixels write_8x1,
- WritePixels write_8x2, uint16_t *dst_ptr,
- ptrdiff_t dst_pitch, uint32_t height,
- const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d8_v8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[9], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -666,27 +582,13 @@ static void filter_block_width8_vert(const uint16_t *src_ptr,
pack_8x9_pixels(src_ptr, src_pitch, signal);
filter_8x9_pixels(signal, ff, &res0, &res1);
- write_8x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
- } while (height > 1);
-
- if (height > 0) {
- pack_8x9_pixels(src_ptr, src_pitch, signal);
- filter_8x9_pixels(signal, ff, &res0, &res1);
- write_8x1(&res0, &res1, &max, dst_ptr, dst_pitch);
- }
-}
-
-static void aom_highbd_filter_block1d8_v8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_vert(src, src_pitch, write_8x1_pixels_ver,
- write_8x2_pixels, dst, dst_pitch, height, filter,
- bd);
+ } while (height > 0);
}
static void pack_16x9_init(const uint16_t *src, ptrdiff_t pitch, __m256i *sig) {
@@ -770,13 +672,15 @@ static INLINE void filter_16x9_pixels(const __m256i *sig, const __m256i *f,
filter_8x1_pixels(&sig[i << 2], f, &res[i]);
}
- const __m256i l0l1 = _mm256_packus_epi32(res[0], res[1]);
- const __m256i h0h1 = _mm256_packus_epi32(res[2], res[3]);
- *y0 = _mm256_permute2x128_si256(l0l1, h0h1, 0x20);
- *y1 = _mm256_permute2x128_si256(l0l1, h0h1, 0x31);
+ {
+ const __m256i l0l1 = _mm256_packus_epi32(res[0], res[1]);
+ const __m256i h0h1 = _mm256_packus_epi32(res[2], res[3]);
+ *y0 = _mm256_permute2x128_si256(l0l1, h0h1, 0x20);
+ *y1 = _mm256_permute2x128_si256(l0l1, h0h1, 0x31);
+ }
}
-static INLINE void write_16x2_pixels(const __m256i *y0, const __m256i *y1,
+static INLINE void store_16x2_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
__m256i p = _mm256_min_epi16(*y0, *mask);
@@ -785,26 +689,14 @@ static INLINE void write_16x2_pixels(const __m256i *y0, const __m256i *y1,
_mm256_storeu_si256((__m256i *)(dst + pitch), p);
}
-static INLINE void write_16x1_pixels_ver(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)y1;
- (void)pitch;
- const __m256i p = _mm256_min_epi16(*y0, *mask);
- _mm256_storeu_si256((__m256i *)dst, p);
-}
-
static void update_16x9_pixels(__m256i *sig) {
update_pixels(&sig[0]);
update_pixels(&sig[8]);
}
-static void filter_block_width16_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- WritePixels write_16x1,
- WritePixels write_16x2, uint16_t *dst_ptr,
- ptrdiff_t dst_pitch, uint32_t height,
- const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d16_v8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[17], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -816,29 +708,16 @@ static void filter_block_width16_vert(const uint16_t *src_ptr,
do {
pack_16x9_pixels(src_ptr, src_pitch, signal);
filter_16x9_pixels(signal, ff, &res0, &res1);
- write_16x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_16x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_16x9_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
- } while (height > 1);
-
- if (height > 0) {
- pack_16x9_pixels(src_ptr, src_pitch, signal);
- filter_16x9_pixels(signal, ff, &res0, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
- }
-}
-
-static void aom_highbd_filter_block1d16_v8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_vert(src, src_pitch, write_16x1_pixels_ver,
- write_16x2_pixels, dst, dst_pitch, height, filter,
- bd);
+ } while (height > 0);
}
+// -----------------------------------------------------------------------------
// 2-tap vertical filtering
static void pack_16x2_init(const uint16_t *src, __m256i *sig) {
@@ -859,12 +738,9 @@ static INLINE void filter_16x2_2t_pixels(const __m256i *sig, const __m256i *f,
filter_16_2t_pixels(sig, f, y0, y1);
}
-static void filter_block_width16_2t_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- WritePixels write_16x1,
- uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter,
- int bd) {
+static void aom_highbd_filter_block1d16_v2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[3], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
@@ -875,7 +751,7 @@ static void filter_block_width16_2t_vert(const uint16_t *src_ptr,
do {
pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_16x1_pixels(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
@@ -883,13 +759,6 @@ static void filter_block_width16_2t_vert(const uint16_t *src_ptr,
} while (height > 0);
}
-static void aom_highbd_filter_block1d16_v2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_vert(src, src_pitch, write_16x1_pixels, dst,
- dst_pitch, height, filter, bd);
-}
-
static INLINE void pack_8x1_2t_filter(const int16_t *filter, __m128i *f) {
const __m128i h = _mm_loadu_si128((const __m128i *)filter);
const __m128i p = _mm_set1_epi32(0x09080706);
@@ -920,22 +789,16 @@ static INLINE void filter_8_2t_pixels(const __m128i *sig, const __m128i *f,
*y1 = _mm_srai_epi32(x1, CONV8_ROUNDING_BITS);
}
-static void write_8x1_2t_pixels_ver(const __m128i *y0, const __m128i *y1,
- const __m128i *mask, uint16_t *dst) {
+static INLINE void store_8x1_2t_pixels_ver(const __m128i *y0, const __m128i *y1,
+ const __m128i *mask, uint16_t *dst) {
__m128i res = _mm_packus_epi32(*y0, *y1);
res = _mm_min_epi16(res, *mask);
_mm_storeu_si128((__m128i *)dst, res);
}
-typedef void (*Write8Pixels)(const __m128i *y0, const __m128i *y1,
- const __m128i *mask, uint16_t *dst);
-
-static void filter_block_width8_2t_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- Write8Pixels write_8x1,
- uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter,
- int bd) {
+static void aom_highbd_filter_block1d8_v2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m128i signal[3], res0, res1;
const __m128i max = _mm_set1_epi16((1 << bd) - 1);
__m128i ff;
@@ -946,7 +809,7 @@ static void filter_block_width8_2t_vert(const uint16_t *src_ptr,
do {
pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
filter_8_2t_pixels(signal, &ff, &res0, &res1);
- write_8x1(&res0, &res1, &max, dst_ptr);
+ store_8x1_2t_pixels_ver(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
@@ -954,20 +817,10 @@ static void filter_block_width8_2t_vert(const uint16_t *src_ptr,
} while (height > 0);
}
-static void aom_highbd_filter_block1d8_v2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_vert(src, src_pitch, write_8x1_2t_pixels_ver, dst,
- dst_pitch, height, filter, bd);
-}
-
// Calculation with averaging the input pixels
-static void write_8x1_avg_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)y1;
- (void)pitch;
+static INLINE void store_8x1_avg_pixels(const __m256i *y0, const __m256i *mask,
+ uint16_t *dst) {
const __m128i a0 = _mm256_castsi256_si128(*y0);
const __m128i a1 = _mm256_extractf128_si256(*y0, 1);
__m128i res = _mm_packus_epi32(a0, a1);
@@ -977,9 +830,9 @@ static void write_8x1_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm_storeu_si128((__m128i *)dst, res);
}
-static void write_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
+static INLINE void store_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst,
+ ptrdiff_t pitch) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
const __m128i pix0 = _mm_loadu_si128((const __m128i *)dst);
const __m128i pix1 = _mm_loadu_si128((const __m128i *)(dst + pitch));
@@ -991,10 +844,8 @@ static void write_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
}
-static void write_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)pitch;
+static INLINE void store_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
const __m256i pix = _mm256_loadu_si256((const __m256i *)dst);
a = _mm256_min_epi16(a, *mask);
@@ -1002,21 +853,7 @@ static void write_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm256_storeu_si256((__m256i *)dst, a);
}
-static INLINE void write_8x1_avg_pixels_ver(const __m256i *y0,
- const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)pitch;
- const __m128i v0 = _mm256_castsi256_si128(*y0);
- const __m128i v1 = _mm256_castsi256_si128(*y1);
- __m128i p = _mm_packus_epi32(v0, v1);
- const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
- p = _mm_min_epi16(p, _mm256_castsi256_si128(*mask));
- p = _mm_avg_epu16(p, pix);
- _mm_storeu_si128((__m128i *)dst, p);
-}
-
-static INLINE void write_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
+static INLINE void store_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
const __m256i pix0 = _mm256_loadu_si256((const __m256i *)dst);
@@ -1030,20 +867,10 @@ static INLINE void write_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm256_storeu_si256((__m256i *)(dst + pitch), p);
}
-static INLINE void write_16x1_avg_pixels_ver(const __m256i *y0,
- const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)y1;
- (void)pitch;
- __m256i p = _mm256_min_epi16(*y0, *mask);
- const __m256i pix = _mm256_loadu_si256((const __m256i *)dst);
- p = _mm256_avg_epu16(p, pix);
- _mm256_storeu_si256((__m256i *)dst, p);
-}
-
-static void write_8x1_2t_avg_pixels_ver(const __m128i *y0, const __m128i *y1,
- const __m128i *mask, uint16_t *dst) {
+static INLINE void store_8x1_2t_avg_pixels_ver(const __m128i *y0,
+ const __m128i *y1,
+ const __m128i *mask,
+ uint16_t *dst) {
__m128i res = _mm_packus_epi32(*y0, *y1);
const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
res = _mm_min_epi16(res, *mask);
@@ -1052,96 +879,229 @@ static void write_8x1_2t_avg_pixels_ver(const __m128i *y0, const __m128i *y1,
}
static void aom_highbd_filter_block1d8_h8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_horiz(src, src_pitch, write_8x1_avg_pixels,
- write_8x2_avg_pixels, dst, dst_pitch, height,
- filter, bd);
+ __m256i signal[8], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ src_ptr -= 3;
+ do {
+ pack_8x2_pixels(src_ptr, src_pitch, signal);
+ filter_8x1_pixels(signal, ff, &res0);
+ filter_8x1_pixels(&signal[4], ff, &res1);
+ store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ height -= 2;
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ } while (height > 1);
+
+ if (height > 0) {
+ pack_8x1_pixels(src_ptr, signal);
+ filter_8x1_pixels(signal, ff, &res0);
+ store_8x1_avg_pixels(&res0, &max, dst_ptr);
+ }
}
static void aom_highbd_filter_block1d16_h8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_horiz(src, src_pitch, write_16x1_avg_pixels, dst,
- dst_pitch, height, filter, bd);
+ __m256i signal[8], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ src_ptr -= 3;
+ do {
+ pack_16x1_pixels(src_ptr, signal);
+ filter_8x1_pixels(signal, ff, &res0);
+ filter_8x1_pixels(&signal[4], ff, &res1);
+ store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
+ height -= 1;
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d8_v8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_vert(src, src_pitch, write_8x1_avg_pixels_ver,
- write_8x2_avg_pixels, dst, dst_pitch, height, filter,
- bd);
+ __m256i signal[9], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ pack_8x9_init(src_ptr, src_pitch, signal);
+
+ do {
+ pack_8x9_pixels(src_ptr, src_pitch, signal);
+
+ filter_8x9_pixels(signal, ff, &res0, &res1);
+ store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ update_pixels(signal);
+
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ height -= 2;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d16_v8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_vert(src, src_pitch, write_16x1_avg_pixels_ver,
- write_16x2_avg_pixels, dst, dst_pitch, height,
- filter, bd);
-}
+ __m256i signal[17], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ pack_16x9_init(src_ptr, src_pitch, signal);
+
+ do {
+ pack_16x9_pixels(src_ptr, src_pitch, signal);
+ filter_16x9_pixels(signal, ff, &res0, &res1);
+ store_16x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ update_16x9_pixels(signal);
-// 2-tap averaging
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ height -= 2;
+ } while (height > 0);
+}
static void aom_highbd_filter_block1d8_h2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_horiz(src, src_pitch, write_8x1_avg_pixels,
- write_8x2_avg_pixels, dst, dst_pitch, height,
- filter, bd);
+ __m256i signal[2], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff;
+ pack_2t_filter(filter, &ff);
+
+ src_ptr -= 3;
+ do {
+ pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ height -= 2;
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ } while (height > 1);
+
+ if (height > 0) {
+ pack_8x1_2t_pixels(src_ptr, signal);
+ filter_8x1_2t_pixels(signal, &ff, &res0);
+ store_8x1_avg_pixels(&res0, &max, dst_ptr);
+ }
}
static void aom_highbd_filter_block1d16_h2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_horiz(src, src_pitch, write_16x1_avg_pixels, dst,
- dst_pitch, height, filter, bd);
+ __m256i signal[2], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff;
+ pack_2t_filter(filter, &ff);
+
+ src_ptr -= 3;
+ do {
+ pack_16x1_2t_pixels(src_ptr, signal);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
+ height -= 1;
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d16_v2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_vert(src, src_pitch, write_16x1_avg_pixels, dst,
- dst_pitch, height, filter, bd);
+ __m256i signal[3], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+ __m256i ff;
+
+ pack_2t_filter(filter, &ff);
+ pack_16x2_init(src_ptr, signal);
+
+ do {
+ pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
+ filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
+ store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
+
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ height -= 1;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d8_v2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_vert(src, src_pitch, write_8x1_2t_avg_pixels_ver, dst,
- dst_pitch, height, filter, bd);
-}
+ __m128i signal[3], res0, res1;
+ const __m128i max = _mm_set1_epi16((1 << bd) - 1);
+ __m128i ff;
-typedef void HbdFilter1dFunc(const uint16_t *, ptrdiff_t, uint16_t *, ptrdiff_t,
- uint32_t, const int16_t *, int);
+ pack_8x1_2t_filter(filter, &ff);
+ pack_8x2_init(src_ptr, signal);
-#define HIGHBD_FUNC(width, dir, avg, opt) \
- aom_highbd_filter_block1d##width##_##dir##_##avg##opt
+ do {
+ pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
+ filter_8_2t_pixels(signal, &ff, &res0, &res1);
+ store_8x1_2t_avg_pixels_ver(&res0, &res1, &max, dst_ptr);
-HbdFilter1dFunc HIGHBD_FUNC(4, h8, , sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, h2, , sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v8, , sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v2, , sse2);
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ height -= 1;
+ } while (height > 0);
+}
-#define aom_highbd_filter_block1d4_h8_avx2 HIGHBD_FUNC(4, h8, , sse2)
-#define aom_highbd_filter_block1d4_h2_avx2 HIGHBD_FUNC(4, h2, , sse2)
-#define aom_highbd_filter_block1d4_v8_avx2 HIGHBD_FUNC(4, v8, , sse2)
-#define aom_highbd_filter_block1d4_v2_avx2 HIGHBD_FUNC(4, v2, , sse2)
+void aom_highbd_filter_block1d4_h8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+void aom_highbd_filter_block1d4_h2_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+void aom_highbd_filter_block1d4_v8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+void aom_highbd_filter_block1d4_v2_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+#define aom_highbd_filter_block1d4_h8_avx2 aom_highbd_filter_block1d4_h8_sse2
+#define aom_highbd_filter_block1d4_h2_avx2 aom_highbd_filter_block1d4_h2_sse2
+#define aom_highbd_filter_block1d4_v8_avx2 aom_highbd_filter_block1d4_v8_sse2
+#define aom_highbd_filter_block1d4_v2_avx2 aom_highbd_filter_block1d4_v2_sse2
HIGH_FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2);
HIGH_FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);
HIGH_FUN_CONV_2D(, avx2);
-HbdFilter1dFunc HIGHBD_FUNC(4, h8, avg_, sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, h2, avg_, sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v8, avg_, sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v2, avg_, sse2);
-
-#define aom_highbd_filter_block1d4_h8_avg_avx2 HIGHBD_FUNC(4, h8, avg_, sse2)
-#define aom_highbd_filter_block1d4_h2_avg_avx2 HIGHBD_FUNC(4, h2, avg_, sse2)
-#define aom_highbd_filter_block1d4_v8_avg_avx2 HIGHBD_FUNC(4, v8, avg_, sse2)
-#define aom_highbd_filter_block1d4_v2_avg_avx2 HIGHBD_FUNC(4, v2, avg_, sse2)
+void aom_highbd_filter_block1d4_h8_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+void aom_highbd_filter_block1d4_h2_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+void aom_highbd_filter_block1d4_v8_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+void aom_highbd_filter_block1d4_v2_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+#define aom_highbd_filter_block1d4_h8_avg_avx2 \
+ aom_highbd_filter_block1d4_h8_avg_sse2
+#define aom_highbd_filter_block1d4_h2_avg_avx2 \
+ aom_highbd_filter_block1d4_h2_avg_sse2
+#define aom_highbd_filter_block1d4_v8_avg_avx2 \
+ aom_highbd_filter_block1d4_v8_avg_sse2
+#define aom_highbd_filter_block1d4_v2_avg_avx2 \
+ aom_highbd_filter_block1d4_v2_avg_sse2
HIGH_FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, avx2);
HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_avx2.c b/third_party/aom/aom_dsp/x86/inv_txfm_avx2.c
new file mode 100644
index 0000000000..a9d6a127ca
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_avx2.c
@@ -0,0 +1,1238 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/inv_txfm.h"
+#include "aom_dsp/x86/inv_txfm_common_avx2.h"
+#include "aom_dsp/x86/txfm_common_avx2.h"
+
+void aom_idct16x16_256_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[16];
+ load_buffer_16x16(input, in);
+ mm256_transpose_16x16(in, in);
+ av1_idct16_avx2(in);
+ mm256_transpose_16x16(in, in);
+ av1_idct16_avx2(in);
+ store_buffer_16xN(in, stride, dest, 16);
+}
+
+static INLINE void transpose_col_to_row_nz4x4(__m256i *in /*in[4]*/) {
+ const __m256i u0 = _mm256_unpacklo_epi16(in[0], in[1]);
+ const __m256i u1 = _mm256_unpacklo_epi16(in[2], in[3]);
+ const __m256i v0 = _mm256_unpacklo_epi32(u0, u1);
+ const __m256i v1 = _mm256_unpackhi_epi32(u0, u1);
+ in[0] = _mm256_permute4x64_epi64(v0, 0xA8);
+ in[1] = _mm256_permute4x64_epi64(v0, 0xA9);
+ in[2] = _mm256_permute4x64_epi64(v1, 0xA8);
+ in[3] = _mm256_permute4x64_epi64(v1, 0xA9);
+}
+
+#define MM256_SHUFFLE_EPI64(x0, x1, imm8) \
+ _mm256_castpd_si256(_mm256_shuffle_pd(_mm256_castsi256_pd(x0), \
+ _mm256_castsi256_pd(x1), imm8))
+
+static INLINE void transpose_col_to_row_nz4x16(__m256i *in /*in[16]*/) {
+ int i;
+ for (i = 0; i < 16; i += 4) {
+ transpose_col_to_row_nz4x4(&in[i]);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ in[i] = MM256_SHUFFLE_EPI64(in[i], in[i + 4], 0);
+ in[i + 8] = MM256_SHUFFLE_EPI64(in[i + 8], in[i + 12], 0);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ in[i] = _mm256_permute2x128_si256(in[i], in[i + 8], 0x20);
+ }
+}
+
+// Coefficients 0-7 before the final butterfly
+static INLINE void idct16_10_first_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p28 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i c2p04 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ const __m256i v4 = _mm256_mulhrs_epi16(in[2], c2p28);
+ const __m256i v7 = _mm256_mulhrs_epi16(in[2], c2p04);
+
+ const __m256i c2p16 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i v0 = _mm256_mulhrs_epi16(in[0], c2p16);
+ const __m256i v1 = v0;
+
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ __m256i v5, v6;
+ unpack_butter_fly(&v7, &v4, &cospi_p16_m16, &cospi_p16_p16, &v5, &v6);
+
+ out[0] = _mm256_add_epi16(v0, v7);
+ out[1] = _mm256_add_epi16(v1, v6);
+ out[2] = _mm256_add_epi16(v1, v5);
+ out[3] = _mm256_add_epi16(v0, v4);
+ out[4] = _mm256_sub_epi16(v0, v4);
+ out[5] = _mm256_sub_epi16(v1, v5);
+ out[6] = _mm256_sub_epi16(v1, v6);
+ out[7] = _mm256_sub_epi16(v0, v7);
+}
+
+// Coefficients 8-15 before the final butterfly
+static INLINE void idct16_10_second_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p30 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i c2p02 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m256i t0 = _mm256_mulhrs_epi16(in[1], c2p30);
+ const __m256i t7 = _mm256_mulhrs_epi16(in[1], c2p02);
+
+ const __m256i c2m26 = pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i c2p06 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+ const __m256i t3 = _mm256_mulhrs_epi16(in[3], c2m26);
+ const __m256i t4 = _mm256_mulhrs_epi16(in[3], c2p06);
+
+ const __m256i cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i cospi_p24_p08 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ __m256i t1, t2, t5, t6;
+ unpack_butter_fly(&t0, &t7, &cospi_m08_p24, &cospi_p24_p08, &t1, &t6);
+ unpack_butter_fly(&t3, &t4, &cospi_m24_m08, &cospi_m08_p24, &t2, &t5);
+
+ out[0] = _mm256_add_epi16(t0, t3);
+ out[1] = _mm256_add_epi16(t1, t2);
+ out[6] = _mm256_add_epi16(t6, t5);
+ out[7] = _mm256_add_epi16(t7, t4);
+
+ const __m256i v2 = _mm256_sub_epi16(t1, t2);
+ const __m256i v3 = _mm256_sub_epi16(t0, t3);
+ const __m256i v4 = _mm256_sub_epi16(t7, t4);
+ const __m256i v5 = _mm256_sub_epi16(t6, t5);
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ unpack_butter_fly(&v5, &v2, &cospi_p16_m16, &cospi_p16_p16, &out[2], &out[5]);
+ unpack_butter_fly(&v4, &v3, &cospi_p16_m16, &cospi_p16_p16, &out[3], &out[4]);
+}
+
+static INLINE void add_sub_butterfly(const __m256i *in, __m256i *out,
+ int size) {
+ int i = 0;
+ const int num = size >> 1;
+ const int bound = size - 1;
+ while (i < num) {
+ out[i] = _mm256_add_epi16(in[i], in[bound - i]);
+ out[bound - i] = _mm256_sub_epi16(in[i], in[bound - i]);
+ i++;
+ }
+}
+
+static INLINE void idct16_10(__m256i *in /*in[16]*/) {
+ __m256i out[16];
+ idct16_10_first_half(in, out);
+ idct16_10_second_half(in, &out[8]);
+ add_sub_butterfly(out, in, 16);
+}
+
+void aom_idct16x16_10_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[16];
+
+ load_coeff(input, &in[0]);
+ load_coeff(input + 16, &in[1]);
+ load_coeff(input + 32, &in[2]);
+ load_coeff(input + 48, &in[3]);
+
+ transpose_col_to_row_nz4x4(in);
+ idct16_10(in);
+
+ transpose_col_to_row_nz4x16(in);
+ idct16_10(in);
+
+ store_buffer_16xN(in, stride, dest, 16);
+}
+
+// Note:
+// For 16x16 int16_t matrix
+// transpose first 8 columns into first 8 rows.
+// Since only upper-left 8x8 are non-zero, the input are first 8 rows (in[8]).
+// After transposing, the 8 row vectors are in in[8].
+void transpose_col_to_row_nz8x8(__m256i *in /*in[8]*/) {
+ __m256i u0 = _mm256_unpacklo_epi16(in[0], in[1]);
+ __m256i u1 = _mm256_unpackhi_epi16(in[0], in[1]);
+ __m256i u2 = _mm256_unpacklo_epi16(in[2], in[3]);
+ __m256i u3 = _mm256_unpackhi_epi16(in[2], in[3]);
+
+ const __m256i v0 = _mm256_unpacklo_epi32(u0, u2);
+ const __m256i v1 = _mm256_unpackhi_epi32(u0, u2);
+ const __m256i v2 = _mm256_unpacklo_epi32(u1, u3);
+ const __m256i v3 = _mm256_unpackhi_epi32(u1, u3);
+
+ u0 = _mm256_unpacklo_epi16(in[4], in[5]);
+ u1 = _mm256_unpackhi_epi16(in[4], in[5]);
+ u2 = _mm256_unpacklo_epi16(in[6], in[7]);
+ u3 = _mm256_unpackhi_epi16(in[6], in[7]);
+
+ const __m256i v4 = _mm256_unpacklo_epi32(u0, u2);
+ const __m256i v5 = _mm256_unpackhi_epi32(u0, u2);
+ const __m256i v6 = _mm256_unpacklo_epi32(u1, u3);
+ const __m256i v7 = _mm256_unpackhi_epi32(u1, u3);
+
+ in[0] = MM256_SHUFFLE_EPI64(v0, v4, 0);
+ in[1] = MM256_SHUFFLE_EPI64(v0, v4, 3);
+ in[2] = MM256_SHUFFLE_EPI64(v1, v5, 0);
+ in[3] = MM256_SHUFFLE_EPI64(v1, v5, 3);
+ in[4] = MM256_SHUFFLE_EPI64(v2, v6, 0);
+ in[5] = MM256_SHUFFLE_EPI64(v2, v6, 3);
+ in[6] = MM256_SHUFFLE_EPI64(v3, v7, 0);
+ in[7] = MM256_SHUFFLE_EPI64(v3, v7, 3);
+}
+
+// Note:
+// For 16x16 int16_t matrix
+// transpose first 8 columns into first 8 rows.
+// Since only matrix left 8x16 are non-zero, the input are total 16 rows
+// (in[16]).
+// After transposing, the 8 row vectors are in in[8]. All else are zero.
+static INLINE void transpose_col_to_row_nz8x16(__m256i *in /*in[16]*/) {
+ transpose_col_to_row_nz8x8(in);
+ transpose_col_to_row_nz8x8(&in[8]);
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ in[i] = _mm256_permute2x128_si256(in[i], in[i + 8], 0x20);
+ }
+}
+
+static INLINE void idct16_38_first_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p28 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i c2p04 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ __m256i t4 = _mm256_mulhrs_epi16(in[2], c2p28);
+ __m256i t7 = _mm256_mulhrs_epi16(in[2], c2p04);
+
+ const __m256i c2m20 = pair256_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
+ const __m256i c2p12 = pair256_set_epi16(2 * cospi_12_64, 2 * cospi_12_64);
+ __m256i t5 = _mm256_mulhrs_epi16(in[6], c2m20);
+ __m256i t6 = _mm256_mulhrs_epi16(in[6], c2p12);
+
+ const __m256i c2p16 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i c2p24 = pair256_set_epi16(2 * cospi_24_64, 2 * cospi_24_64);
+ const __m256i c2p08 = pair256_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
+ const __m256i u0 = _mm256_mulhrs_epi16(in[0], c2p16);
+ const __m256i u1 = _mm256_mulhrs_epi16(in[0], c2p16);
+ const __m256i u2 = _mm256_mulhrs_epi16(in[4], c2p24);
+ const __m256i u3 = _mm256_mulhrs_epi16(in[4], c2p08);
+
+ const __m256i u4 = _mm256_add_epi16(t4, t5);
+ const __m256i u5 = _mm256_sub_epi16(t4, t5);
+ const __m256i u6 = _mm256_sub_epi16(t7, t6);
+ const __m256i u7 = _mm256_add_epi16(t7, t6);
+
+ const __m256i t0 = _mm256_add_epi16(u0, u3);
+ const __m256i t1 = _mm256_add_epi16(u1, u2);
+ const __m256i t2 = _mm256_sub_epi16(u1, u2);
+ const __m256i t3 = _mm256_sub_epi16(u0, u3);
+
+ t4 = u4;
+ t7 = u7;
+
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ unpack_butter_fly(&u6, &u5, &cospi_p16_m16, &cospi_p16_p16, &t5, &t6);
+
+ out[0] = _mm256_add_epi16(t0, t7);
+ out[1] = _mm256_add_epi16(t1, t6);
+ out[2] = _mm256_add_epi16(t2, t5);
+ out[3] = _mm256_add_epi16(t3, t4);
+ out[4] = _mm256_sub_epi16(t3, t4);
+ out[5] = _mm256_sub_epi16(t2, t5);
+ out[6] = _mm256_sub_epi16(t1, t6);
+ out[7] = _mm256_sub_epi16(t0, t7);
+}
+
+static INLINE void idct16_38_second_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p30 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i c2p02 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ __m256i t0 = _mm256_mulhrs_epi16(in[1], c2p30);
+ __m256i t7 = _mm256_mulhrs_epi16(in[1], c2p02);
+
+ const __m256i c2m18 = pair256_set_epi16(-2 * cospi_18_64, -2 * cospi_18_64);
+ const __m256i c2p14 = pair256_set_epi16(2 * cospi_14_64, 2 * cospi_14_64);
+ __m256i t1 = _mm256_mulhrs_epi16(in[7], c2m18);
+ __m256i t6 = _mm256_mulhrs_epi16(in[7], c2p14);
+
+ const __m256i c2p22 = pair256_set_epi16(2 * cospi_22_64, 2 * cospi_22_64);
+ const __m256i c2p10 = pair256_set_epi16(2 * cospi_10_64, 2 * cospi_10_64);
+ __m256i t2 = _mm256_mulhrs_epi16(in[5], c2p22);
+ __m256i t5 = _mm256_mulhrs_epi16(in[5], c2p10);
+
+ const __m256i c2m26 = pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i c2p06 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+ __m256i t3 = _mm256_mulhrs_epi16(in[3], c2m26);
+ __m256i t4 = _mm256_mulhrs_epi16(in[3], c2p06);
+
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7;
+ v0 = _mm256_add_epi16(t0, t1);
+ v1 = _mm256_sub_epi16(t0, t1);
+ v2 = _mm256_sub_epi16(t3, t2);
+ v3 = _mm256_add_epi16(t2, t3);
+ v4 = _mm256_add_epi16(t4, t5);
+ v5 = _mm256_sub_epi16(t4, t5);
+ v6 = _mm256_sub_epi16(t7, t6);
+ v7 = _mm256_add_epi16(t6, t7);
+
+ t0 = v0;
+ t7 = v7;
+ t3 = v3;
+ t4 = v4;
+ const __m256i cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i cospi_p24_p08 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ unpack_butter_fly(&v1, &v6, &cospi_m08_p24, &cospi_p24_p08, &t1, &t6);
+ unpack_butter_fly(&v2, &v5, &cospi_m24_m08, &cospi_m08_p24, &t2, &t5);
+
+ v0 = _mm256_add_epi16(t0, t3);
+ v1 = _mm256_add_epi16(t1, t2);
+ v2 = _mm256_sub_epi16(t1, t2);
+ v3 = _mm256_sub_epi16(t0, t3);
+ v4 = _mm256_sub_epi16(t7, t4);
+ v5 = _mm256_sub_epi16(t6, t5);
+ v6 = _mm256_add_epi16(t6, t5);
+ v7 = _mm256_add_epi16(t7, t4);
+
+ // stage 6, (8-15)
+ out[0] = v0;
+ out[1] = v1;
+ out[6] = v6;
+ out[7] = v7;
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ unpack_butter_fly(&v5, &v2, &cospi_p16_m16, &cospi_p16_p16, &out[2], &out[5]);
+ unpack_butter_fly(&v4, &v3, &cospi_p16_m16, &cospi_p16_p16, &out[3], &out[4]);
+}
+
+static INLINE void idct16_38(__m256i *in /*in[16]*/) {
+ __m256i out[16];
+ idct16_38_first_half(in, out);
+ idct16_38_second_half(in, &out[8]);
+ add_sub_butterfly(out, in, 16);
+}
+
+void aom_idct16x16_38_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[16];
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ load_coeff(input + (i << 4), &in[i]);
+ }
+
+ transpose_col_to_row_nz8x8(in);
+ idct16_38(in);
+
+ transpose_col_to_row_nz8x16(in);
+ idct16_38(in);
+
+ store_buffer_16xN(in, stride, dest, 16);
+}
+
+static INLINE int calculate_dc(const tran_low_t *input) {
+ int dc = (int)dct_const_round_shift(input[0] * cospi_16_64);
+ dc = (int)dct_const_round_shift(dc * cospi_16_64);
+ dc = ROUND_POWER_OF_TWO(dc, IDCT_ROUNDING_POS);
+ return dc;
+}
+
+void aom_idct16x16_1_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ const int dc = calculate_dc(input);
+ if (dc == 0) return;
+
+ const __m256i dc_value = _mm256_set1_epi16(dc);
+
+ int i;
+ for (i = 0; i < 16; ++i) {
+ recon_and_store(&dc_value, dest);
+ dest += stride;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// 32x32 partial IDCT
+
+void aom_idct32x32_1_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ const int dc = calculate_dc(input);
+ if (dc == 0) return;
+
+ const __m256i dc_value = _mm256_set1_epi16(dc);
+
+ int i;
+ for (i = 0; i < 32; ++i) {
+ recon_and_store(&dc_value, dest);
+ recon_and_store(&dc_value, dest + 16);
+ dest += stride;
+ }
+}
+
+static void load_buffer_32x16(const tran_low_t *input, __m256i *in /*in[32]*/) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ load_coeff(input, &in[i]);
+ load_coeff(input + 16, &in[i + 16]);
+ input += 32;
+ }
+}
+
+// Note:
+// We extend SSSE3 operations to AVX2. Instead of operating on __m128i, we
+// operate coefficients on __m256i. Our operation capacity doubles for each
+// instruction.
+#define BUTTERFLY_PAIR(x0, x1, co0, co1) \
+ do { \
+ tmp0 = _mm256_madd_epi16(x0, co0); \
+ tmp1 = _mm256_madd_epi16(x1, co0); \
+ tmp2 = _mm256_madd_epi16(x0, co1); \
+ tmp3 = _mm256_madd_epi16(x1, co1); \
+ tmp0 = _mm256_add_epi32(tmp0, rounding); \
+ tmp1 = _mm256_add_epi32(tmp1, rounding); \
+ tmp2 = _mm256_add_epi32(tmp2, rounding); \
+ tmp3 = _mm256_add_epi32(tmp3, rounding); \
+ tmp0 = _mm256_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm256_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm256_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm256_srai_epi32(tmp3, DCT_CONST_BITS); \
+ } while (0)
+
+static INLINE void butterfly(const __m256i *x0, const __m256i *x1,
+ const __m256i *c0, const __m256i *c1, __m256i *y0,
+ __m256i *y1) {
+ __m256i tmp0, tmp1, tmp2, tmp3, u0, u1;
+ const __m256i rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
+
+ u0 = _mm256_unpacklo_epi16(*x0, *x1);
+ u1 = _mm256_unpackhi_epi16(*x0, *x1);
+ BUTTERFLY_PAIR(u0, u1, *c0, *c1);
+ *y0 = _mm256_packs_epi32(tmp0, tmp1);
+ *y1 = _mm256_packs_epi32(tmp2, tmp3);
+}
+
+static INLINE void butterfly_self(__m256i *x0, __m256i *x1, const __m256i *c0,
+ const __m256i *c1) {
+ __m256i tmp0, tmp1, tmp2, tmp3, u0, u1;
+ const __m256i rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
+
+ u0 = _mm256_unpacklo_epi16(*x0, *x1);
+ u1 = _mm256_unpackhi_epi16(*x0, *x1);
+ BUTTERFLY_PAIR(u0, u1, *c0, *c1);
+ *x0 = _mm256_packs_epi32(tmp0, tmp1);
+ *x1 = _mm256_packs_epi32(tmp2, tmp3);
+}
+
+// For each 16x32 block __m256i in[32],
+// Input with index, 2, 6, 10, 14, 18, 22, 26, 30
+// output pixels: 8-15 in __m256i in[32]
+static void idct32_full_16x32_quarter_2(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[16]*/) {
+ __m256i u8, u9, u10, u11, u12, u13, u14, u15; // stp2_
+ __m256i v8, v9, v10, v11, v12, v13, v14, v15; // stp1_
+
+ {
+ const __m256i stg2_0 = pair256_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m256i stg2_1 = pair256_set_epi16(cospi_2_64, cospi_30_64);
+ const __m256i stg2_2 = pair256_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m256i stg2_3 = pair256_set_epi16(cospi_18_64, cospi_14_64);
+ butterfly(&in[2], &in[30], &stg2_0, &stg2_1, &u8, &u15);
+ butterfly(&in[18], &in[14], &stg2_2, &stg2_3, &u9, &u14);
+ }
+
+ v8 = _mm256_add_epi16(u8, u9);
+ v9 = _mm256_sub_epi16(u8, u9);
+ v14 = _mm256_sub_epi16(u15, u14);
+ v15 = _mm256_add_epi16(u15, u14);
+
+ {
+ const __m256i stg2_4 = pair256_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m256i stg2_5 = pair256_set_epi16(cospi_10_64, cospi_22_64);
+ const __m256i stg2_6 = pair256_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m256i stg2_7 = pair256_set_epi16(cospi_26_64, cospi_6_64);
+ butterfly(&in[10], &in[22], &stg2_4, &stg2_5, &u10, &u13);
+ butterfly(&in[26], &in[6], &stg2_6, &stg2_7, &u11, &u12);
+ }
+
+ v10 = _mm256_sub_epi16(u11, u10);
+ v11 = _mm256_add_epi16(u11, u10);
+ v12 = _mm256_add_epi16(u12, u13);
+ v13 = _mm256_sub_epi16(u12, u13);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&v9, &v14, &stg4_4, &stg4_5);
+ butterfly_self(&v10, &v13, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(v8, v11);
+ out[1] = _mm256_add_epi16(v9, v10);
+ out[6] = _mm256_add_epi16(v14, v13);
+ out[7] = _mm256_add_epi16(v15, v12);
+
+ out[2] = _mm256_sub_epi16(v9, v10);
+ out[3] = _mm256_sub_epi16(v8, v11);
+ out[4] = _mm256_sub_epi16(v15, v12);
+ out[5] = _mm256_sub_epi16(v14, v13);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly_self(&out[2], &out[5], &stg6_0, &stg4_0);
+ butterfly_self(&out[3], &out[4], &stg6_0, &stg4_0);
+ }
+}
+
+// For each 8x32 block __m256i in[32],
+// Input with index, 0, 4, 8, 12, 16, 20, 24, 28
+// output pixels: 0-7 in __m256i in[32]
+static void idct32_full_16x32_quarter_1(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[8]*/) {
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7; // stp1_
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7; // stp2_
+
+ {
+ const __m256i stg3_0 = pair256_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m256i stg3_1 = pair256_set_epi16(cospi_4_64, cospi_28_64);
+ const __m256i stg3_2 = pair256_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m256i stg3_3 = pair256_set_epi16(cospi_20_64, cospi_12_64);
+ butterfly(&in[4], &in[28], &stg3_0, &stg3_1, &u4, &u7);
+ butterfly(&in[20], &in[12], &stg3_2, &stg3_3, &u5, &u6);
+ }
+
+ v4 = _mm256_add_epi16(u4, u5);
+ v5 = _mm256_sub_epi16(u4, u5);
+ v6 = _mm256_sub_epi16(u7, u6);
+ v7 = _mm256_add_epi16(u7, u6);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg4_1 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m256i stg4_2 = pair256_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m256i stg4_3 = pair256_set_epi16(cospi_8_64, cospi_24_64);
+ butterfly(&v6, &v5, &stg4_1, &stg4_0, &v5, &v6);
+
+ butterfly(&in[0], &in[16], &stg4_0, &stg4_1, &u0, &u1);
+ butterfly(&in[8], &in[24], &stg4_2, &stg4_3, &u2, &u3);
+ }
+
+ v0 = _mm256_add_epi16(u0, u3);
+ v1 = _mm256_add_epi16(u1, u2);
+ v2 = _mm256_sub_epi16(u1, u2);
+ v3 = _mm256_sub_epi16(u0, u3);
+
+ out[0] = _mm256_add_epi16(v0, v7);
+ out[1] = _mm256_add_epi16(v1, v6);
+ out[2] = _mm256_add_epi16(v2, v5);
+ out[3] = _mm256_add_epi16(v3, v4);
+ out[4] = _mm256_sub_epi16(v3, v4);
+ out[5] = _mm256_sub_epi16(v2, v5);
+ out[6] = _mm256_sub_epi16(v1, v6);
+ out[7] = _mm256_sub_epi16(v0, v7);
+}
+
+// For each 8x32 block __m256i in[32],
+// Input with odd index,
+// 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
+// output pixels: 16-23, 24-31 in __m256i in[32]
+// We avoid hide an offset, 16, inside this function. So we output 0-15 into
+// array out[16]
+static void idct32_full_16x32_quarter_3_4(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[16]*/) {
+ __m256i v16, v17, v18, v19, v20, v21, v22, v23;
+ __m256i v24, v25, v26, v27, v28, v29, v30, v31;
+ __m256i u16, u17, u18, u19, u20, u21, u22, u23;
+ __m256i u24, u25, u26, u27, u28, u29, u30, u31;
+
+ {
+ const __m256i stg1_0 = pair256_set_epi16(cospi_31_64, -cospi_1_64);
+ const __m256i stg1_1 = pair256_set_epi16(cospi_1_64, cospi_31_64);
+ const __m256i stg1_2 = pair256_set_epi16(cospi_15_64, -cospi_17_64);
+ const __m256i stg1_3 = pair256_set_epi16(cospi_17_64, cospi_15_64);
+ const __m256i stg1_4 = pair256_set_epi16(cospi_23_64, -cospi_9_64);
+ const __m256i stg1_5 = pair256_set_epi16(cospi_9_64, cospi_23_64);
+ const __m256i stg1_6 = pair256_set_epi16(cospi_7_64, -cospi_25_64);
+ const __m256i stg1_7 = pair256_set_epi16(cospi_25_64, cospi_7_64);
+ const __m256i stg1_8 = pair256_set_epi16(cospi_27_64, -cospi_5_64);
+ const __m256i stg1_9 = pair256_set_epi16(cospi_5_64, cospi_27_64);
+ const __m256i stg1_10 = pair256_set_epi16(cospi_11_64, -cospi_21_64);
+ const __m256i stg1_11 = pair256_set_epi16(cospi_21_64, cospi_11_64);
+ const __m256i stg1_12 = pair256_set_epi16(cospi_19_64, -cospi_13_64);
+ const __m256i stg1_13 = pair256_set_epi16(cospi_13_64, cospi_19_64);
+ const __m256i stg1_14 = pair256_set_epi16(cospi_3_64, -cospi_29_64);
+ const __m256i stg1_15 = pair256_set_epi16(cospi_29_64, cospi_3_64);
+ butterfly(&in[1], &in[31], &stg1_0, &stg1_1, &u16, &u31);
+ butterfly(&in[17], &in[15], &stg1_2, &stg1_3, &u17, &u30);
+ butterfly(&in[9], &in[23], &stg1_4, &stg1_5, &u18, &u29);
+ butterfly(&in[25], &in[7], &stg1_6, &stg1_7, &u19, &u28);
+
+ butterfly(&in[5], &in[27], &stg1_8, &stg1_9, &u20, &u27);
+ butterfly(&in[21], &in[11], &stg1_10, &stg1_11, &u21, &u26);
+
+ butterfly(&in[13], &in[19], &stg1_12, &stg1_13, &u22, &u25);
+ butterfly(&in[29], &in[3], &stg1_14, &stg1_15, &u23, &u24);
+ }
+
+ v16 = _mm256_add_epi16(u16, u17);
+ v17 = _mm256_sub_epi16(u16, u17);
+ v18 = _mm256_sub_epi16(u19, u18);
+ v19 = _mm256_add_epi16(u19, u18);
+
+ v20 = _mm256_add_epi16(u20, u21);
+ v21 = _mm256_sub_epi16(u20, u21);
+ v22 = _mm256_sub_epi16(u23, u22);
+ v23 = _mm256_add_epi16(u23, u22);
+
+ v24 = _mm256_add_epi16(u24, u25);
+ v25 = _mm256_sub_epi16(u24, u25);
+ v26 = _mm256_sub_epi16(u27, u26);
+ v27 = _mm256_add_epi16(u27, u26);
+
+ v28 = _mm256_add_epi16(u28, u29);
+ v29 = _mm256_sub_epi16(u28, u29);
+ v30 = _mm256_sub_epi16(u31, u30);
+ v31 = _mm256_add_epi16(u31, u30);
+
+ {
+ const __m256i stg3_4 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m256i stg3_5 = pair256_set_epi16(cospi_28_64, cospi_4_64);
+ const __m256i stg3_6 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m256i stg3_8 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m256i stg3_9 = pair256_set_epi16(cospi_12_64, cospi_20_64);
+ const __m256i stg3_10 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+ butterfly_self(&v17, &v30, &stg3_4, &stg3_5);
+ butterfly_self(&v18, &v29, &stg3_6, &stg3_4);
+ butterfly_self(&v21, &v26, &stg3_8, &stg3_9);
+ butterfly_self(&v22, &v25, &stg3_10, &stg3_8);
+ }
+
+ u16 = _mm256_add_epi16(v16, v19);
+ u17 = _mm256_add_epi16(v17, v18);
+ u18 = _mm256_sub_epi16(v17, v18);
+ u19 = _mm256_sub_epi16(v16, v19);
+ u20 = _mm256_sub_epi16(v23, v20);
+ u21 = _mm256_sub_epi16(v22, v21);
+ u22 = _mm256_add_epi16(v22, v21);
+ u23 = _mm256_add_epi16(v23, v20);
+
+ u24 = _mm256_add_epi16(v24, v27);
+ u25 = _mm256_add_epi16(v25, v26);
+ u26 = _mm256_sub_epi16(v25, v26);
+ u27 = _mm256_sub_epi16(v24, v27);
+
+ u28 = _mm256_sub_epi16(v31, v28);
+ u29 = _mm256_sub_epi16(v30, v29);
+ u30 = _mm256_add_epi16(v29, v30);
+ u31 = _mm256_add_epi16(v28, v31);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&u18, &u29, &stg4_4, &stg4_5);
+ butterfly_self(&u19, &u28, &stg4_4, &stg4_5);
+ butterfly_self(&u20, &u27, &stg4_6, &stg4_4);
+ butterfly_self(&u21, &u26, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(u16, u23);
+ out[1] = _mm256_add_epi16(u17, u22);
+ out[2] = _mm256_add_epi16(u18, u21);
+ out[3] = _mm256_add_epi16(u19, u20);
+ out[4] = _mm256_sub_epi16(u19, u20);
+ out[5] = _mm256_sub_epi16(u18, u21);
+ out[6] = _mm256_sub_epi16(u17, u22);
+ out[7] = _mm256_sub_epi16(u16, u23);
+
+ out[8] = _mm256_sub_epi16(u31, u24);
+ out[9] = _mm256_sub_epi16(u30, u25);
+ out[10] = _mm256_sub_epi16(u29, u26);
+ out[11] = _mm256_sub_epi16(u28, u27);
+ out[12] = _mm256_add_epi16(u27, u28);
+ out[13] = _mm256_add_epi16(u26, u29);
+ out[14] = _mm256_add_epi16(u25, u30);
+ out[15] = _mm256_add_epi16(u24, u31);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly_self(&out[4], &out[11], &stg6_0, &stg4_0);
+ butterfly_self(&out[5], &out[10], &stg6_0, &stg4_0);
+ butterfly_self(&out[6], &out[9], &stg6_0, &stg4_0);
+ butterfly_self(&out[7], &out[8], &stg6_0, &stg4_0);
+ }
+}
+
+static void idct32_full_16x32_quarter_1_2(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i temp[16];
+ idct32_full_16x32_quarter_1(in, temp);
+ idct32_full_16x32_quarter_2(in, &temp[8]);
+ add_sub_butterfly(temp, out, 16);
+}
+
+static void idct32_16x32(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i temp[32];
+ idct32_full_16x32_quarter_1_2(in, temp);
+ idct32_full_16x32_quarter_3_4(in, &temp[16]);
+ add_sub_butterfly(temp, out, 32);
+}
+
+void aom_idct32x32_1024_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i col[64], in[32];
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ load_buffer_32x16(input, in);
+ input += 32 << 4;
+
+ mm256_transpose_16x16(in, in);
+ mm256_transpose_16x16(&in[16], &in[16]);
+ idct32_16x32(in, col + (i << 5));
+ }
+
+ for (i = 0; i < 2; ++i) {
+ int j = i << 4;
+ mm256_transpose_16x16(col + j, in);
+ mm256_transpose_16x16(col + j + 32, &in[16]);
+ idct32_16x32(in, in);
+ store_buffer_16xN(in, stride, dest, 32);
+ dest += 16;
+ }
+}
+
+// Group the coefficient calculation into smaller functions
+// to prevent stack spillover:
+// quarter_1: 0-7
+// quarter_2: 8-15
+// quarter_3_4: 16-23, 24-31
+static void idct32_16x32_135_quarter_1(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[8]*/) {
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7;
+
+ {
+ const __m256i stk4_0 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i stk4_2 = pair256_set_epi16(2 * cospi_24_64, 2 * cospi_24_64);
+ const __m256i stk4_3 = pair256_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
+ u0 = _mm256_mulhrs_epi16(in[0], stk4_0);
+ u2 = _mm256_mulhrs_epi16(in[8], stk4_2);
+ u3 = _mm256_mulhrs_epi16(in[8], stk4_3);
+ u1 = u0;
+ }
+
+ v0 = _mm256_add_epi16(u0, u3);
+ v1 = _mm256_add_epi16(u1, u2);
+ v2 = _mm256_sub_epi16(u1, u2);
+ v3 = _mm256_sub_epi16(u0, u3);
+
+ {
+ const __m256i stk3_0 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i stk3_1 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ const __m256i stk3_2 =
+ pair256_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
+ const __m256i stk3_3 = pair256_set_epi16(2 * cospi_12_64, 2 * cospi_12_64);
+ u4 = _mm256_mulhrs_epi16(in[4], stk3_0);
+ u7 = _mm256_mulhrs_epi16(in[4], stk3_1);
+ u5 = _mm256_mulhrs_epi16(in[12], stk3_2);
+ u6 = _mm256_mulhrs_epi16(in[12], stk3_3);
+ }
+
+ v4 = _mm256_add_epi16(u4, u5);
+ v5 = _mm256_sub_epi16(u4, u5);
+ v6 = _mm256_sub_epi16(u7, u6);
+ v7 = _mm256_add_epi16(u7, u6);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg4_1 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ butterfly(&v6, &v5, &stg4_1, &stg4_0, &v5, &v6);
+ }
+
+ out[0] = _mm256_add_epi16(v0, v7);
+ out[1] = _mm256_add_epi16(v1, v6);
+ out[2] = _mm256_add_epi16(v2, v5);
+ out[3] = _mm256_add_epi16(v3, v4);
+ out[4] = _mm256_sub_epi16(v3, v4);
+ out[5] = _mm256_sub_epi16(v2, v5);
+ out[6] = _mm256_sub_epi16(v1, v6);
+ out[7] = _mm256_sub_epi16(v0, v7);
+}
+
+static void idct32_16x32_135_quarter_2(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[8]*/) {
+ __m256i u8, u9, u10, u11, u12, u13, u14, u15;
+ __m256i v8, v9, v10, v11, v12, v13, v14, v15;
+
+ {
+ const __m256i stk2_0 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i stk2_1 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m256i stk2_2 =
+ pair256_set_epi16(-2 * cospi_18_64, -2 * cospi_18_64);
+ const __m256i stk2_3 = pair256_set_epi16(2 * cospi_14_64, 2 * cospi_14_64);
+ const __m256i stk2_4 = pair256_set_epi16(2 * cospi_22_64, 2 * cospi_22_64);
+ const __m256i stk2_5 = pair256_set_epi16(2 * cospi_10_64, 2 * cospi_10_64);
+ const __m256i stk2_6 =
+ pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i stk2_7 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+ u8 = _mm256_mulhrs_epi16(in[2], stk2_0);
+ u15 = _mm256_mulhrs_epi16(in[2], stk2_1);
+ u9 = _mm256_mulhrs_epi16(in[14], stk2_2);
+ u14 = _mm256_mulhrs_epi16(in[14], stk2_3);
+ u10 = _mm256_mulhrs_epi16(in[10], stk2_4);
+ u13 = _mm256_mulhrs_epi16(in[10], stk2_5);
+ u11 = _mm256_mulhrs_epi16(in[6], stk2_6);
+ u12 = _mm256_mulhrs_epi16(in[6], stk2_7);
+ }
+
+ v8 = _mm256_add_epi16(u8, u9);
+ v9 = _mm256_sub_epi16(u8, u9);
+ v10 = _mm256_sub_epi16(u11, u10);
+ v11 = _mm256_add_epi16(u11, u10);
+ v12 = _mm256_add_epi16(u12, u13);
+ v13 = _mm256_sub_epi16(u12, u13);
+ v14 = _mm256_sub_epi16(u15, u14);
+ v15 = _mm256_add_epi16(u15, u14);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&v9, &v14, &stg4_4, &stg4_5);
+ butterfly_self(&v10, &v13, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(v8, v11);
+ out[1] = _mm256_add_epi16(v9, v10);
+ out[2] = _mm256_sub_epi16(v9, v10);
+ out[3] = _mm256_sub_epi16(v8, v11);
+ out[4] = _mm256_sub_epi16(v15, v12);
+ out[5] = _mm256_sub_epi16(v14, v13);
+ out[6] = _mm256_add_epi16(v14, v13);
+ out[7] = _mm256_add_epi16(v15, v12);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly_self(&out[2], &out[5], &stg6_0, &stg4_0);
+ butterfly_self(&out[3], &out[4], &stg6_0, &stg4_0);
+ }
+}
+
+// 8x32 block even indexed 8 inputs of in[16],
+// output first half 16 to out[32]
+static void idct32_16x32_quarter_1_2(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i temp[16];
+ idct32_16x32_135_quarter_1(in, temp);
+ idct32_16x32_135_quarter_2(in, &temp[8]);
+ add_sub_butterfly(temp, out, 16);
+}
+
+// 8x32 block odd indexed 8 inputs of in[16],
+// output second half 16 to out[32]
+static void idct32_16x32_quarter_3_4(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i v16, v17, v18, v19, v20, v21, v22, v23;
+ __m256i v24, v25, v26, v27, v28, v29, v30, v31;
+ __m256i u16, u17, u18, u19, u20, u21, u22, u23;
+ __m256i u24, u25, u26, u27, u28, u29, u30, u31;
+
+ {
+ const __m256i stk1_0 = pair256_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
+ const __m256i stk1_1 = pair256_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
+ const __m256i stk1_2 =
+ pair256_set_epi16(-2 * cospi_17_64, -2 * cospi_17_64);
+ const __m256i stk1_3 = pair256_set_epi16(2 * cospi_15_64, 2 * cospi_15_64);
+
+ const __m256i stk1_4 = pair256_set_epi16(2 * cospi_23_64, 2 * cospi_23_64);
+ const __m256i stk1_5 = pair256_set_epi16(2 * cospi_9_64, 2 * cospi_9_64);
+ const __m256i stk1_6 =
+ pair256_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
+ const __m256i stk1_7 = pair256_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
+ const __m256i stk1_8 = pair256_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
+ const __m256i stk1_9 = pair256_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
+ const __m256i stk1_10 =
+ pair256_set_epi16(-2 * cospi_21_64, -2 * cospi_21_64);
+ const __m256i stk1_11 = pair256_set_epi16(2 * cospi_11_64, 2 * cospi_11_64);
+
+ const __m256i stk1_12 = pair256_set_epi16(2 * cospi_19_64, 2 * cospi_19_64);
+ const __m256i stk1_13 = pair256_set_epi16(2 * cospi_13_64, 2 * cospi_13_64);
+ const __m256i stk1_14 =
+ pair256_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
+ const __m256i stk1_15 = pair256_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
+ u16 = _mm256_mulhrs_epi16(in[1], stk1_0);
+ u31 = _mm256_mulhrs_epi16(in[1], stk1_1);
+ u17 = _mm256_mulhrs_epi16(in[15], stk1_2);
+ u30 = _mm256_mulhrs_epi16(in[15], stk1_3);
+
+ u18 = _mm256_mulhrs_epi16(in[9], stk1_4);
+ u29 = _mm256_mulhrs_epi16(in[9], stk1_5);
+ u19 = _mm256_mulhrs_epi16(in[7], stk1_6);
+ u28 = _mm256_mulhrs_epi16(in[7], stk1_7);
+
+ u20 = _mm256_mulhrs_epi16(in[5], stk1_8);
+ u27 = _mm256_mulhrs_epi16(in[5], stk1_9);
+ u21 = _mm256_mulhrs_epi16(in[11], stk1_10);
+ u26 = _mm256_mulhrs_epi16(in[11], stk1_11);
+
+ u22 = _mm256_mulhrs_epi16(in[13], stk1_12);
+ u25 = _mm256_mulhrs_epi16(in[13], stk1_13);
+ u23 = _mm256_mulhrs_epi16(in[3], stk1_14);
+ u24 = _mm256_mulhrs_epi16(in[3], stk1_15);
+ }
+
+ v16 = _mm256_add_epi16(u16, u17);
+ v17 = _mm256_sub_epi16(u16, u17);
+ v18 = _mm256_sub_epi16(u19, u18);
+ v19 = _mm256_add_epi16(u19, u18);
+
+ v20 = _mm256_add_epi16(u20, u21);
+ v21 = _mm256_sub_epi16(u20, u21);
+ v22 = _mm256_sub_epi16(u23, u22);
+ v23 = _mm256_add_epi16(u23, u22);
+
+ v24 = _mm256_add_epi16(u24, u25);
+ v25 = _mm256_sub_epi16(u24, u25);
+ v26 = _mm256_sub_epi16(u27, u26);
+ v27 = _mm256_add_epi16(u27, u26);
+
+ v28 = _mm256_add_epi16(u28, u29);
+ v29 = _mm256_sub_epi16(u28, u29);
+ v30 = _mm256_sub_epi16(u31, u30);
+ v31 = _mm256_add_epi16(u31, u30);
+
+ {
+ const __m256i stg3_4 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m256i stg3_5 = pair256_set_epi16(cospi_28_64, cospi_4_64);
+ const __m256i stg3_6 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m256i stg3_8 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m256i stg3_9 = pair256_set_epi16(cospi_12_64, cospi_20_64);
+ const __m256i stg3_10 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ butterfly_self(&v17, &v30, &stg3_4, &stg3_5);
+ butterfly_self(&v18, &v29, &stg3_6, &stg3_4);
+ butterfly_self(&v21, &v26, &stg3_8, &stg3_9);
+ butterfly_self(&v22, &v25, &stg3_10, &stg3_8);
+ }
+
+ u16 = _mm256_add_epi16(v16, v19);
+ u17 = _mm256_add_epi16(v17, v18);
+ u18 = _mm256_sub_epi16(v17, v18);
+ u19 = _mm256_sub_epi16(v16, v19);
+ u20 = _mm256_sub_epi16(v23, v20);
+ u21 = _mm256_sub_epi16(v22, v21);
+ u22 = _mm256_add_epi16(v22, v21);
+ u23 = _mm256_add_epi16(v23, v20);
+
+ u24 = _mm256_add_epi16(v24, v27);
+ u25 = _mm256_add_epi16(v25, v26);
+ u26 = _mm256_sub_epi16(v25, v26);
+ u27 = _mm256_sub_epi16(v24, v27);
+ u28 = _mm256_sub_epi16(v31, v28);
+ u29 = _mm256_sub_epi16(v30, v29);
+ u30 = _mm256_add_epi16(v29, v30);
+ u31 = _mm256_add_epi16(v28, v31);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&u18, &u29, &stg4_4, &stg4_5);
+ butterfly_self(&u19, &u28, &stg4_4, &stg4_5);
+ butterfly_self(&u20, &u27, &stg4_6, &stg4_4);
+ butterfly_self(&u21, &u26, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(u16, u23);
+ out[1] = _mm256_add_epi16(u17, u22);
+ out[2] = _mm256_add_epi16(u18, u21);
+ out[3] = _mm256_add_epi16(u19, u20);
+ v20 = _mm256_sub_epi16(u19, u20);
+ v21 = _mm256_sub_epi16(u18, u21);
+ v22 = _mm256_sub_epi16(u17, u22);
+ v23 = _mm256_sub_epi16(u16, u23);
+
+ v24 = _mm256_sub_epi16(u31, u24);
+ v25 = _mm256_sub_epi16(u30, u25);
+ v26 = _mm256_sub_epi16(u29, u26);
+ v27 = _mm256_sub_epi16(u28, u27);
+ out[12] = _mm256_add_epi16(u27, u28);
+ out[13] = _mm256_add_epi16(u26, u29);
+ out[14] = _mm256_add_epi16(u25, u30);
+ out[15] = _mm256_add_epi16(u24, u31);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly(&v20, &v27, &stg6_0, &stg4_0, &out[4], &out[11]);
+ butterfly(&v21, &v26, &stg6_0, &stg4_0, &out[5], &out[10]);
+ butterfly(&v22, &v25, &stg6_0, &stg4_0, &out[6], &out[9]);
+ butterfly(&v23, &v24, &stg6_0, &stg4_0, &out[7], &out[8]);
+ }
+}
+
+// 16x16 block input __m256i in[32], output 16x32 __m256i in[32]
+static void idct32_16x32_135(__m256i *in /*in[32]*/) {
+ __m256i out[32];
+ idct32_16x32_quarter_1_2(in, out);
+ idct32_16x32_quarter_3_4(in, &out[16]);
+ add_sub_butterfly(out, in, 32);
+}
+
+static INLINE void load_buffer_from_32x32(const tran_low_t *coeff, __m256i *in,
+ int size) {
+ int i = 0;
+ while (i < size) {
+ load_coeff(coeff + (i << 5), &in[i]);
+ i += 1;
+ }
+}
+
+static INLINE void zero_buffer(__m256i *in, int num) {
+ int i;
+ for (i = 0; i < num; ++i) {
+ in[i] = _mm256_setzero_si256();
+ }
+}
+
+// Only upper-left 16x16 has non-zero coeff
+void aom_idct32x32_135_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[32];
+ zero_buffer(in, 32);
+ load_buffer_from_32x32(input, in, 16);
+ mm256_transpose_16x16(in, in);
+ idct32_16x32_135(in);
+
+ __m256i out[32];
+ mm256_transpose_16x16(in, out);
+ idct32_16x32_135(out);
+ store_buffer_16xN(out, stride, dest, 32);
+ mm256_transpose_16x16(&in[16], in);
+ idct32_16x32_135(in);
+ store_buffer_16xN(in, stride, dest + 16, 32);
+}
+
+static void idct32_34_first_half(const __m256i *in, __m256i *stp1) {
+ const __m256i stk2_0 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i stk2_1 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m256i stk2_6 = pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i stk2_7 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+
+ const __m256i stk3_0 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i stk3_1 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stk4_0 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i stg4_1 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m256i x0, x1, x4, x5, x6, x7;
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
+
+ // phase 1
+
+ // 0, 15
+ u2 = _mm256_mulhrs_epi16(in[2], stk2_1); // stp2_15
+ u3 = _mm256_mulhrs_epi16(in[6], stk2_7); // stp2_12
+ v15 = _mm256_add_epi16(u2, u3);
+ // in[0], in[4]
+ x0 = _mm256_mulhrs_epi16(in[0], stk4_0); // stp1[0]
+ x7 = _mm256_mulhrs_epi16(in[4], stk3_1); // stp1[7]
+ v0 = _mm256_add_epi16(x0, x7); // stp2_0
+ stp1[0] = _mm256_add_epi16(v0, v15);
+ stp1[15] = _mm256_sub_epi16(v0, v15);
+
+ // in[2], in[6]
+ u0 = _mm256_mulhrs_epi16(in[2], stk2_0); // stp2_8
+ u1 = _mm256_mulhrs_epi16(in[6], stk2_6); // stp2_11
+ butterfly(&u0, &u2, &stg4_4, &stg4_5, &u4, &u5); // stp2_9, stp2_14
+ butterfly(&u1, &u3, &stg4_6, &stg4_4, &u6, &u7); // stp2_10, stp2_13
+
+ v8 = _mm256_add_epi16(u0, u1);
+ v9 = _mm256_add_epi16(u4, u6);
+ v10 = _mm256_sub_epi16(u4, u6);
+ v11 = _mm256_sub_epi16(u0, u1);
+ v12 = _mm256_sub_epi16(u2, u3);
+ v13 = _mm256_sub_epi16(u5, u7);
+ v14 = _mm256_add_epi16(u5, u7);
+
+ butterfly_self(&v10, &v13, &stg6_0, &stg4_0);
+ butterfly_self(&v11, &v12, &stg6_0, &stg4_0);
+
+ // 1, 14
+ x1 = _mm256_mulhrs_epi16(in[0], stk4_0); // stp1[1], stk4_1 = stk4_0
+ // stp1[2] = stp1[0], stp1[3] = stp1[1]
+ x4 = _mm256_mulhrs_epi16(in[4], stk3_0); // stp1[4]
+ butterfly(&x7, &x4, &stg4_1, &stg4_0, &x5, &x6);
+ v1 = _mm256_add_epi16(x1, x6); // stp2_1
+ v2 = _mm256_add_epi16(x0, x5); // stp2_2
+ stp1[1] = _mm256_add_epi16(v1, v14);
+ stp1[14] = _mm256_sub_epi16(v1, v14);
+
+ stp1[2] = _mm256_add_epi16(v2, v13);
+ stp1[13] = _mm256_sub_epi16(v2, v13);
+
+ v3 = _mm256_add_epi16(x1, x4); // stp2_3
+ v4 = _mm256_sub_epi16(x1, x4); // stp2_4
+
+ v5 = _mm256_sub_epi16(x0, x5); // stp2_5
+
+ v6 = _mm256_sub_epi16(x1, x6); // stp2_6
+ v7 = _mm256_sub_epi16(x0, x7); // stp2_7
+ stp1[3] = _mm256_add_epi16(v3, v12);
+ stp1[12] = _mm256_sub_epi16(v3, v12);
+
+ stp1[6] = _mm256_add_epi16(v6, v9);
+ stp1[9] = _mm256_sub_epi16(v6, v9);
+
+ stp1[7] = _mm256_add_epi16(v7, v8);
+ stp1[8] = _mm256_sub_epi16(v7, v8);
+
+ stp1[4] = _mm256_add_epi16(v4, v11);
+ stp1[11] = _mm256_sub_epi16(v4, v11);
+
+ stp1[5] = _mm256_add_epi16(v5, v10);
+ stp1[10] = _mm256_sub_epi16(v5, v10);
+}
+
+static void idct32_34_second_half(const __m256i *in, __m256i *stp1) {
+ const __m256i stk1_0 = pair256_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
+ const __m256i stk1_1 = pair256_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
+ const __m256i stk1_6 = pair256_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
+ const __m256i stk1_7 = pair256_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
+ const __m256i stk1_8 = pair256_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
+ const __m256i stk1_9 = pair256_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
+ const __m256i stk1_14 = pair256_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
+ const __m256i stk1_15 = pair256_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
+ const __m256i stg3_4 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m256i stg3_5 = pair256_set_epi16(cospi_28_64, cospi_4_64);
+ const __m256i stg3_6 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m256i stg3_8 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m256i stg3_9 = pair256_set_epi16(cospi_12_64, cospi_20_64);
+ const __m256i stg3_10 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ __m256i v16, v17, v18, v19, v20, v21, v22, v23;
+ __m256i v24, v25, v26, v27, v28, v29, v30, v31;
+ __m256i u16, u17, u18, u19, u20, u21, u22, u23;
+ __m256i u24, u25, u26, u27, u28, u29, u30, u31;
+
+ v16 = _mm256_mulhrs_epi16(in[1], stk1_0);
+ v31 = _mm256_mulhrs_epi16(in[1], stk1_1);
+
+ v19 = _mm256_mulhrs_epi16(in[7], stk1_6);
+ v28 = _mm256_mulhrs_epi16(in[7], stk1_7);
+
+ v20 = _mm256_mulhrs_epi16(in[5], stk1_8);
+ v27 = _mm256_mulhrs_epi16(in[5], stk1_9);
+
+ v23 = _mm256_mulhrs_epi16(in[3], stk1_14);
+ v24 = _mm256_mulhrs_epi16(in[3], stk1_15);
+
+ butterfly(&v16, &v31, &stg3_4, &stg3_5, &v17, &v30);
+ butterfly(&v19, &v28, &stg3_6, &stg3_4, &v18, &v29);
+ butterfly(&v20, &v27, &stg3_8, &stg3_9, &v21, &v26);
+ butterfly(&v23, &v24, &stg3_10, &stg3_8, &v22, &v25);
+
+ u16 = _mm256_add_epi16(v16, v19);
+ u17 = _mm256_add_epi16(v17, v18);
+ u18 = _mm256_sub_epi16(v17, v18);
+ u19 = _mm256_sub_epi16(v16, v19);
+ u20 = _mm256_sub_epi16(v23, v20);
+ u21 = _mm256_sub_epi16(v22, v21);
+ u22 = _mm256_add_epi16(v22, v21);
+ u23 = _mm256_add_epi16(v23, v20);
+ u24 = _mm256_add_epi16(v24, v27);
+ u27 = _mm256_sub_epi16(v24, v27);
+ u25 = _mm256_add_epi16(v25, v26);
+ u26 = _mm256_sub_epi16(v25, v26);
+ u28 = _mm256_sub_epi16(v31, v28);
+ u31 = _mm256_add_epi16(v28, v31);
+ u29 = _mm256_sub_epi16(v30, v29);
+ u30 = _mm256_add_epi16(v29, v30);
+
+ butterfly_self(&u18, &u29, &stg4_4, &stg4_5);
+ butterfly_self(&u19, &u28, &stg4_4, &stg4_5);
+ butterfly_self(&u20, &u27, &stg4_6, &stg4_4);
+ butterfly_self(&u21, &u26, &stg4_6, &stg4_4);
+
+ stp1[0] = _mm256_add_epi16(u16, u23);
+ stp1[7] = _mm256_sub_epi16(u16, u23);
+
+ stp1[1] = _mm256_add_epi16(u17, u22);
+ stp1[6] = _mm256_sub_epi16(u17, u22);
+
+ stp1[2] = _mm256_add_epi16(u18, u21);
+ stp1[5] = _mm256_sub_epi16(u18, u21);
+
+ stp1[3] = _mm256_add_epi16(u19, u20);
+ stp1[4] = _mm256_sub_epi16(u19, u20);
+
+ stp1[8] = _mm256_sub_epi16(u31, u24);
+ stp1[15] = _mm256_add_epi16(u24, u31);
+
+ stp1[9] = _mm256_sub_epi16(u30, u25);
+ stp1[14] = _mm256_add_epi16(u25, u30);
+
+ stp1[10] = _mm256_sub_epi16(u29, u26);
+ stp1[13] = _mm256_add_epi16(u26, u29);
+
+ stp1[11] = _mm256_sub_epi16(u28, u27);
+ stp1[12] = _mm256_add_epi16(u27, u28);
+
+ butterfly_self(&stp1[4], &stp1[11], &stg6_0, &stg4_0);
+ butterfly_self(&stp1[5], &stp1[10], &stg6_0, &stg4_0);
+ butterfly_self(&stp1[6], &stp1[9], &stg6_0, &stg4_0);
+ butterfly_self(&stp1[7], &stp1[8], &stg6_0, &stg4_0);
+}
+
+// 16x16 block input __m256i in[32], output 16x32 __m256i in[32]
+static void idct32_16x32_34(__m256i *in /*in[32]*/) {
+ __m256i out[32];
+ idct32_34_first_half(in, out);
+ idct32_34_second_half(in, &out[16]);
+ add_sub_butterfly(out, in, 32);
+}
+
+// Only upper-left 8x8 has non-zero coeff
+void aom_idct32x32_34_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[32];
+ zero_buffer(in, 32);
+ load_buffer_from_32x32(input, in, 8);
+ mm256_transpose_16x16(in, in);
+ idct32_16x32_34(in);
+
+ __m256i out[32];
+ mm256_transpose_16x16(in, out);
+ idct32_16x32_34(out);
+ store_buffer_16xN(out, stride, dest, 32);
+ mm256_transpose_16x16(&in[16], in);
+ idct32_16x32_34(in);
+ store_buffer_16xN(in, stride, dest + 16, 32);
+}
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h b/third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h
new file mode 100644
index 0000000000..4238e651b0
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_DSP_X86_INV_TXFM_COMMON_AVX2_H
+#define AOM_DSP_X86_INV_TXFM_COMMON_AVX2_H
+
+#include <immintrin.h>
+
+#include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/txfm_common_avx2.h"
+
+static INLINE void load_coeff(const tran_low_t *coeff, __m256i *in) {
+#if CONFIG_HIGHBITDEPTH
+ *in = _mm256_setr_epi16(
+ (int16_t)coeff[0], (int16_t)coeff[1], (int16_t)coeff[2],
+ (int16_t)coeff[3], (int16_t)coeff[4], (int16_t)coeff[5],
+ (int16_t)coeff[6], (int16_t)coeff[7], (int16_t)coeff[8],
+ (int16_t)coeff[9], (int16_t)coeff[10], (int16_t)coeff[11],
+ (int16_t)coeff[12], (int16_t)coeff[13], (int16_t)coeff[14],
+ (int16_t)coeff[15]);
+#else
+ *in = _mm256_loadu_si256((const __m256i *)coeff);
+#endif
+}
+
+static INLINE void load_buffer_16x16(const tran_low_t *coeff, __m256i *in) {
+ int i = 0;
+ while (i < 16) {
+ load_coeff(coeff + (i << 4), &in[i]);
+ i += 1;
+ }
+}
+
+static INLINE void recon_and_store(const __m256i *res, uint8_t *output) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i x = _mm_loadu_si128((__m128i const *)output);
+ __m128i p0 = _mm_unpacklo_epi8(x, zero);
+ __m128i p1 = _mm_unpackhi_epi8(x, zero);
+
+ p0 = _mm_add_epi16(p0, _mm256_castsi256_si128(*res));
+ p1 = _mm_add_epi16(p1, _mm256_extractf128_si256(*res, 1));
+ x = _mm_packus_epi16(p0, p1);
+ _mm_storeu_si128((__m128i *)output, x);
+}
+
+#define IDCT_ROUNDING_POS (6)
+static INLINE void store_buffer_16xN(__m256i *in, const int stride,
+ uint8_t *output, int num) {
+ const __m256i rounding = _mm256_set1_epi16(1 << (IDCT_ROUNDING_POS - 1));
+ int i = 0;
+
+ while (i < num) {
+ in[i] = _mm256_adds_epi16(in[i], rounding);
+ in[i] = _mm256_srai_epi16(in[i], IDCT_ROUNDING_POS);
+ recon_and_store(&in[i], output + i * stride);
+ i += 1;
+ }
+}
+
+static INLINE void unpack_butter_fly(const __m256i *a0, const __m256i *a1,
+ const __m256i *c0, const __m256i *c1,
+ __m256i *b0, __m256i *b1) {
+ __m256i x0, x1;
+ x0 = _mm256_unpacklo_epi16(*a0, *a1);
+ x1 = _mm256_unpackhi_epi16(*a0, *a1);
+ *b0 = butter_fly(&x0, &x1, c0);
+ *b1 = butter_fly(&x0, &x1, c1);
+}
+
+void av1_idct16_avx2(__m256i *in);
+
+#endif // AOM_DSP_X86_INV_TXFM_COMMON_AVX2_H
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
index 5795a1845c..be200df4c9 100644
--- a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
@@ -3628,4 +3628,107 @@ void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
}
+void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
+ tran_low_t out[8 * 8] = { 0 };
+ tran_low_t *outptr = out;
+ int i, j, test;
+ __m128i inptr[8];
+ __m128i min_input, max_input, temp1, temp2, sign_bits;
+ uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+ const __m128i zero = _mm_set1_epi16(0);
+ const __m128i sixteen = _mm_set1_epi16(16);
+ const __m128i max = _mm_set1_epi16(6201);
+ const __m128i min = _mm_set1_epi16(-6201);
+ int optimised_cols = 0;
+
+ // Load input into __m128i & pack to 16 bits
+ for (i = 0; i < 8; i++) {
+ temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i));
+ temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4));
+ inptr[i] = _mm_packs_epi32(temp1, temp2);
+ }
+
+ // Find the min & max for the row transform
+ // only first 4 row has non-zero coefs
+ max_input = _mm_max_epi16(inptr[0], inptr[1]);
+ min_input = _mm_min_epi16(inptr[0], inptr[1]);
+ for (i = 2; i < 4; i++) {
+ max_input = _mm_max_epi16(max_input, inptr[i]);
+ min_input = _mm_min_epi16(min_input, inptr[i]);
+ }
+ max_input = _mm_cmpgt_epi16(max_input, max);
+ min_input = _mm_cmplt_epi16(min_input, min);
+ temp1 = _mm_or_si128(max_input, min_input);
+ test = _mm_movemask_epi8(temp1);
+
+ if (!test) {
+ // Do the row transform
+ aom_idct8_sse2(inptr);
+
+ // Find the min & max for the column transform
+ // N.B. Only first 4 cols contain non-zero coeffs
+ max_input = _mm_max_epi16(inptr[0], inptr[1]);
+ min_input = _mm_min_epi16(inptr[0], inptr[1]);
+ for (i = 2; i < 8; i++) {
+ max_input = _mm_max_epi16(max_input, inptr[i]);
+ min_input = _mm_min_epi16(min_input, inptr[i]);
+ }
+ max_input = _mm_cmpgt_epi16(max_input, max);
+ min_input = _mm_cmplt_epi16(min_input, min);
+ temp1 = _mm_or_si128(max_input, min_input);
+ test = _mm_movemask_epi8(temp1);
+
+ if (test) {
+ // Use fact only first 4 rows contain non-zero coeffs
+ array_transpose_4X8(inptr, inptr);
+ for (i = 0; i < 4; i++) {
+ sign_bits = _mm_cmplt_epi16(inptr[i], zero);
+ temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
+ temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits);
+ _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1);
+ _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2);
+ }
+ } else {
+ // Set to use the optimised transform for the column
+ optimised_cols = 1;
+ }
+ } else {
+ // Run the un-optimised row transform
+ for (i = 0; i < 4; ++i) {
+ aom_highbd_idct8_c(input, outptr, bd);
+ input += 8;
+ outptr += 8;
+ }
+ }
+
+ if (optimised_cols) {
+ aom_idct8_sse2(inptr);
+
+ // Final round & shift and Reconstruction and Store
+ {
+ __m128i d[8];
+ for (i = 0; i < 8; i++) {
+ inptr[i] = _mm_add_epi16(inptr[i], sixteen);
+ d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+ inptr[i] = _mm_srai_epi16(inptr[i], 5);
+ d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
+ // Store
+ _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
+ }
+ }
+ } else {
+ // Run the un-optimised column transform
+ tran_low_t temp_in[8], temp_out[8];
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
+ aom_highbd_idct8_c(temp_in, temp_out, bd);
+ for (j = 0; j < 8; ++j) {
+ dest[j * stride + i] = highbd_clip_pixel_add(
+ dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
+ }
+ }
+ }
+}
+
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
index 5166e9e0af..9d16a3e841 100644
--- a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
@@ -9,49 +9,70 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#include <stdlib.h>
-#include <emmintrin.h>
+#include <stdio.h>
#include <tmmintrin.h>
-#include "aom_ports/mem.h"
#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/blend.h"
#include "aom/aom_integer.h"
+#include "aom_dsp/x86/synonyms.h"
-static INLINE __m128i width8_load_2rows(const uint8_t *ptr, int stride) {
- __m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
- __m128i temp2 = _mm_loadl_epi64((const __m128i *)(ptr + stride));
- return _mm_unpacklo_epi64(temp1, temp2);
-}
-
-static INLINE __m128i width4_load_4rows(const uint8_t *ptr, int stride) {
- __m128i temp1 = _mm_cvtsi32_si128(*(const uint32_t *)ptr);
- __m128i temp2 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride));
- __m128i temp3 = _mm_unpacklo_epi32(temp1, temp2);
- temp1 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride * 2));
- temp2 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride * 3));
- temp1 = _mm_unpacklo_epi32(temp1, temp2);
- return _mm_unpacklo_epi64(temp3, temp1);
-}
-
-static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
+// For width a multiple of 16
+static INLINE unsigned int masked_sad_ssse3(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *a_ptr, int a_stride,
const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride,
int width, int height);
static INLINE unsigned int masked_sad8xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height);
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height);
static INLINE unsigned int masked_sad4xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height);
-
-#define MASKSADMXN_SSSE3(m, n) \
- unsigned int aom_masked_sad##m##x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, msk_stride, \
- m, n); \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height);
+
+#define MASKSADMXN_SSSE3(m, n) \
+ unsigned int aom_masked_sad##m##x##n##_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad_ssse3(src, src_stride, ref, ref_stride, second_pred, \
+ m, msk, msk_stride, m, n); \
+ else \
+ return masked_sad_ssse3(src, src_stride, second_pred, m, ref, \
+ ref_stride, msk, msk_stride, m, n); \
+ }
+
+#define MASKSAD8XN_SSSE3(n) \
+ unsigned int aom_masked_sad8x##n##_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, \
+ second_pred, 8, msk, msk_stride, n); \
+ else \
+ return masked_sad8xh_ssse3(src, src_stride, second_pred, 8, ref, \
+ ref_stride, msk, msk_stride, n); \
+ }
+
+#define MASKSAD4XN_SSSE3(n) \
+ unsigned int aom_masked_sad4x##n##_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, \
+ second_pred, 4, msk, msk_stride, n); \
+ else \
+ return masked_sad4xh_ssse3(src, src_stride, second_pred, 4, ref, \
+ ref_stride, msk, msk_stride, n); \
}
#if CONFIG_EXT_PARTITION
@@ -67,165 +88,181 @@ MASKSADMXN_SSSE3(32, 16)
MASKSADMXN_SSSE3(16, 32)
MASKSADMXN_SSSE3(16, 16)
MASKSADMXN_SSSE3(16, 8)
-
-#define MASKSAD8XN_SSSE3(n) \
- unsigned int aom_masked_sad8x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, n); \
- }
-
MASKSAD8XN_SSSE3(16)
MASKSAD8XN_SSSE3(8)
MASKSAD8XN_SSSE3(4)
-
-#define MASKSAD4XN_SSSE3(n) \
- unsigned int aom_masked_sad4x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, n); \
- }
-
MASKSAD4XN_SSSE3(8)
MASKSAD4XN_SSSE3(4)
-// For width a multiple of 16
-// Assumes values in m are <=64
-static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
+static INLINE unsigned int masked_sad_ssse3(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *a_ptr, int a_stride,
const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride,
int width, int height) {
- int y, x;
- __m128i a, b, m, temp1, temp2;
+ int x, y;
__m128i res = _mm_setzero_si128();
- __m128i one = _mm_set1_epi16(1);
- // For each row
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
+
for (y = 0; y < height; y++) {
- // Covering the full width
for (x = 0; x < width; x += 16) {
- // Load a, b, m in xmm registers
- a = _mm_loadu_si128((const __m128i *)(a_ptr + x));
- b = _mm_loadu_si128((const __m128i *)(b_ptr + x));
- m = _mm_loadu_si128((const __m128i *)(m_ptr + x));
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu8(a, b);
- temp2 = _mm_subs_epu8(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- temp2 = _mm_maddubs_epi16(temp1, m);
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(temp2, one));
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ const __m128i m = _mm_loadu_si128((const __m128i *)&m_ptr[x]);
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ // Calculate 16 predicted pixels.
+ // Note that the maximum value of any entry of 'pred_l' or 'pred_r'
+ // is 64 * 255, so we have plenty of space to add rounding constants.
+ const __m128i data_l = _mm_unpacklo_epi8(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_l = _mm_maddubs_epi16(data_l, mask_l);
+ pred_l = xx_roundn_epu16(pred_l, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi8(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi8(m, m_inv);
+ __m128i pred_r = _mm_maddubs_epi16(data_r, mask_r);
+ pred_r = xx_roundn_epu16(pred_r, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packus_epi16(pred_l, pred_r);
+ res = _mm_add_epi32(res, _mm_sad_epu8(pred, src));
}
- // Move onto the next row
+
+ src_ptr += src_stride;
a_ptr += a_stride;
b_ptr += b_stride;
m_ptr += m_stride;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ // At this point, we have two 32-bit partial SADs in lanes 0 and 2 of 'res'.
+ int32_t sad =
+ _mm_cvtsi128_si32(res) + _mm_cvtsi128_si32(_mm_srli_si128(res, 8));
+ return (sad + 31) >> 6;
}
static INLINE unsigned int masked_sad8xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height) {
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height) {
int y;
- __m128i a, b, m, temp1, temp2, row_res;
__m128i res = _mm_setzero_si128();
- __m128i one = _mm_set1_epi16(1);
- // Add the masked SAD for 2 rows at a time
- for (y = 0; y < height; y += 2) {
- // Load a, b, m in xmm registers
- a = width8_load_2rows(a_ptr, a_stride);
- b = width8_load_2rows(b_ptr, b_stride);
- m = width8_load_2rows(m_ptr, m_stride);
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu8(a, b);
- temp2 = _mm_subs_epu8(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- row_res = _mm_maddubs_epi16(temp1, m);
-
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
- // Move onto the next rows
+ for (y = 0; y < height; y += 2) {
+ const __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a0 = _mm_loadl_epi64((const __m128i *)a_ptr);
+ const __m128i a1 = _mm_loadl_epi64((const __m128i *)&a_ptr[a_stride]);
+ const __m128i b0 = _mm_loadl_epi64((const __m128i *)b_ptr);
+ const __m128i b1 = _mm_loadl_epi64((const __m128i *)&b_ptr[b_stride]);
+ const __m128i m =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)m_ptr),
+ _mm_loadl_epi64((const __m128i *)&m_ptr[m_stride]));
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi8(a0, b0);
+ const __m128i mask_l = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_l = _mm_maddubs_epi16(data_l, mask_l);
+ pred_l = xx_roundn_epu16(pred_l, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpacklo_epi8(a1, b1);
+ const __m128i mask_r = _mm_unpackhi_epi8(m, m_inv);
+ __m128i pred_r = _mm_maddubs_epi16(data_r, mask_r);
+ pred_r = xx_roundn_epu16(pred_r, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packus_epi16(pred_l, pred_r);
+ res = _mm_add_epi32(res, _mm_sad_epu8(pred, src));
+
+ src_ptr += src_stride * 2;
a_ptr += a_stride * 2;
b_ptr += b_stride * 2;
m_ptr += m_stride * 2;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ int32_t sad =
+ _mm_cvtsi128_si32(res) + _mm_cvtsi128_si32(_mm_srli_si128(res, 8));
+ return (sad + 31) >> 6;
}
static INLINE unsigned int masked_sad4xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height) {
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height) {
int y;
- __m128i a, b, m, temp1, temp2, row_res;
__m128i res = _mm_setzero_si128();
- __m128i one = _mm_set1_epi16(1);
- // Add the masked SAD for 4 rows at a time
- for (y = 0; y < height; y += 4) {
- // Load a, b, m in xmm registers
- a = width4_load_4rows(a_ptr, a_stride);
- b = width4_load_4rows(b_ptr, b_stride);
- m = width4_load_4rows(m_ptr, m_stride);
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu8(a, b);
- temp2 = _mm_subs_epu8(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- row_res = _mm_maddubs_epi16(temp1, m);
-
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
-
- // Move onto the next rows
- a_ptr += a_stride * 4;
- b_ptr += b_stride * 4;
- m_ptr += m_stride * 4;
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
+
+ for (y = 0; y < height; y += 2) {
+ // Load two rows at a time, this seems to be a bit faster
+ // than four rows at a time in this case.
+ const __m128i src = _mm_unpacklo_epi32(
+ _mm_cvtsi32_si128(*(uint32_t *)src_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&src_ptr[src_stride]));
+ const __m128i a =
+ _mm_unpacklo_epi32(_mm_cvtsi32_si128(*(uint32_t *)a_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&a_ptr[a_stride]));
+ const __m128i b =
+ _mm_unpacklo_epi32(_mm_cvtsi32_si128(*(uint32_t *)b_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&b_ptr[b_stride]));
+ const __m128i m =
+ _mm_unpacklo_epi32(_mm_cvtsi32_si128(*(uint32_t *)m_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&m_ptr[m_stride]));
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ const __m128i data = _mm_unpacklo_epi8(a, b);
+ const __m128i mask = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_16bit = _mm_maddubs_epi16(data, mask);
+ pred_16bit = xx_roundn_epu16(pred_16bit, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packus_epi16(pred_16bit, _mm_setzero_si128());
+ res = _mm_add_epi32(res, _mm_sad_epu8(pred, src));
+
+ src_ptr += src_stride * 2;
+ a_ptr += a_stride * 2;
+ b_ptr += b_stride * 2;
+ m_ptr += m_stride * 2;
}
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ // At this point, the SAD is stored in lane 0 of 'res'
+ int32_t sad = _mm_cvtsi128_si32(res);
+ return (sad + 31) >> 6;
}
#if CONFIG_HIGHBITDEPTH
-static INLINE __m128i highbd_width4_load_2rows(const uint16_t *ptr,
- int stride) {
- __m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
- __m128i temp2 = _mm_loadl_epi64((const __m128i *)(ptr + stride));
- return _mm_unpacklo_epi64(temp1, temp2);
-}
-
+// For width a multiple of 8
static INLINE unsigned int highbd_masked_sad_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int width, int height);
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int width, int height);
static INLINE unsigned int highbd_masked_sad4xh_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height);
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height);
#define HIGHBD_MASKSADMXN_SSSE3(m, n) \
unsigned int aom_highbd_masked_sad##m##x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return highbd_masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, m, n); \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
+ int msk_stride, int invert_mask) { \
+ if (!invert_mask) \
+ return highbd_masked_sad_ssse3(src8, src_stride, ref8, ref_stride, \
+ second_pred8, m, msk, msk_stride, m, n); \
+ else \
+ return highbd_masked_sad_ssse3(src8, src_stride, second_pred8, m, ref8, \
+ ref_stride, msk, msk_stride, m, n); \
+ }
+
+#define HIGHBD_MASKSAD4XN_SSSE3(n) \
+ unsigned int aom_highbd_masked_sad4x##n##_ssse3( \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
+ int msk_stride, int invert_mask) { \
+ if (!invert_mask) \
+ return highbd_masked_sad4xh_ssse3(src8, src_stride, ref8, ref_stride, \
+ second_pred8, 4, msk, msk_stride, n); \
+ else \
+ return highbd_masked_sad4xh_ssse3(src8, src_stride, second_pred8, 4, \
+ ref8, ref_stride, msk, msk_stride, n); \
}
#if CONFIG_EXT_PARTITION
@@ -244,91 +281,124 @@ HIGHBD_MASKSADMXN_SSSE3(16, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 16)
HIGHBD_MASKSADMXN_SSSE3(8, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 4)
-
-#define HIGHBD_MASKSAD4XN_SSSE3(n) \
- unsigned int aom_highbd_masked_sad4x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return highbd_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, n); \
- }
-
HIGHBD_MASKSAD4XN_SSSE3(8)
HIGHBD_MASKSAD4XN_SSSE3(4)
-// For width a multiple of 8
-// Assumes values in m are <=64
static INLINE unsigned int highbd_masked_sad_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int width, int height) {
- int y, x;
- __m128i a, b, m, temp1, temp2;
- const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
- const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int width, int height) {
+ const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8);
+ const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8);
+ int x, y;
__m128i res = _mm_setzero_si128();
- // For each row
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i one = _mm_set1_epi16(1);
+
for (y = 0; y < height; y++) {
- // Covering the full width
for (x = 0; x < width; x += 8) {
- // Load a, b, m in xmm registers
- a = _mm_loadu_si128((const __m128i *)(a_ptr + x));
- b = _mm_loadu_si128((const __m128i *)(b_ptr + x));
- m = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(m_ptr + x)),
- _mm_setzero_si128());
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu16(a, b);
- temp2 = _mm_subs_epu16(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Add result of multiplying by m and add pairs together to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ // Zero-extend mask to 16 bits
+ const __m128i m = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i *)&m_ptr[x]), _mm_setzero_si128());
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ // Note: the maximum value in pred_l/r is (2^bd)-1 < 2^15,
+ // so it is safe to do signed saturation here.
+ const __m128i pred = _mm_packs_epi32(pred_l, pred_r);
+ // There is no 16-bit SAD instruction, so we have to synthesize
+ // an 8-element SAD. We do this by storing 4 32-bit partial SADs,
+ // and accumulating them at the end
+ const __m128i diff = _mm_abs_epi16(_mm_sub_epi16(pred, src));
+ res = _mm_add_epi32(res, _mm_madd_epi16(diff, one));
}
- // Move onto the next row
+
+ src_ptr += src_stride;
a_ptr += a_stride;
b_ptr += b_stride;
m_ptr += m_stride;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ // At this point, we have four 32-bit partial SADs stored in 'res'.
+ res = _mm_hadd_epi32(res, res);
+ res = _mm_hadd_epi32(res, res);
+ int sad = _mm_cvtsi128_si32(res);
+ return (sad + 31) >> 6;
}
static INLINE unsigned int highbd_masked_sad4xh_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height) {
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height) {
+ const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8);
+ const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8);
int y;
- __m128i a, b, m, temp1, temp2;
- const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
- const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
__m128i res = _mm_setzero_si128();
- // Add the masked SAD for 2 rows at a time
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i one = _mm_set1_epi16(1);
+
for (y = 0; y < height; y += 2) {
- // Load a, b, m in xmm registers
- a = highbd_width4_load_2rows(a_ptr, a_stride);
- b = highbd_width4_load_2rows(b_ptr, b_stride);
- temp1 = _mm_loadl_epi64((const __m128i *)m_ptr);
- temp2 = _mm_loadl_epi64((const __m128i *)(m_ptr + m_stride));
- m = _mm_unpacklo_epi8(_mm_unpacklo_epi32(temp1, temp2),
- _mm_setzero_si128());
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu16(a, b);
- temp2 = _mm_subs_epu16(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
-
- // Move onto the next rows
+ const __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)a_ptr),
+ _mm_loadl_epi64((const __m128i *)&a_ptr[a_stride]));
+ const __m128i b =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)b_ptr),
+ _mm_loadl_epi64((const __m128i *)&b_ptr[b_stride]));
+ // Zero-extend mask to 16 bits
+ const __m128i m = _mm_unpacklo_epi8(
+ _mm_unpacklo_epi32(
+ _mm_cvtsi32_si128(*(const uint32_t *)m_ptr),
+ _mm_cvtsi32_si128(*(const uint32_t *)&m_ptr[m_stride])),
+ _mm_setzero_si128());
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packs_epi32(pred_l, pred_r);
+ const __m128i diff = _mm_abs_epi16(_mm_sub_epi16(pred, src));
+ res = _mm_add_epi32(res, _mm_madd_epi16(diff, one));
+
+ src_ptr += src_stride * 2;
a_ptr += a_stride * 2;
b_ptr += b_stride * 2;
m_ptr += m_stride * 2;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ res = _mm_hadd_epi32(res, res);
+ res = _mm_hadd_epi32(res, res);
+ int sad = _mm_cvtsi128_si32(res);
+ return (sad + 31) >> 6;
}
-#endif // CONFIG_HIGHBITDEPTH
+
+#endif
diff --git a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
index fe14597f64..be9d437d25 100644
--- a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
@@ -9,1940 +9,1003 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#include <assert.h>
#include <stdlib.h>
-#include <emmintrin.h>
+#include <string.h>
#include <tmmintrin.h>
#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/blend.h"
#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#include "aom_dsp/aom_filter.h"
-
-// Half pixel shift
-#define HALF_PIXEL_OFFSET (BIL_SUBPEL_SHIFTS / 2)
-
-/*****************************************************************************
- * Horizontal additions
- *****************************************************************************/
-
-static INLINE int32_t hsum_epi32_si32(__m128i v_d) {
- v_d = _mm_hadd_epi32(v_d, v_d);
- v_d = _mm_hadd_epi32(v_d, v_d);
- return _mm_cvtsi128_si32(v_d);
-}
-
-static INLINE int64_t hsum_epi64_si64(__m128i v_q) {
- v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
-#if ARCH_X86_64
- return _mm_cvtsi128_si64(v_q);
-#else
- {
- int64_t tmp;
- _mm_storel_epi64((__m128i *)&tmp, v_q);
- return tmp;
- }
-#endif
-}
-
-#if CONFIG_HIGHBITDEPTH
-static INLINE int64_t hsum_epi32_si64(__m128i v_d) {
- const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
- const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
- const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
- return hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
-}
-#endif // CONFIG_HIGHBITDEPTH
-
-static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
- uint32_t *sse, int w, int h) {
- int64_t sum64;
- uint64_t sse64;
-
- // Horizontal sum
- sum64 = hsum_epi32_si32(v_sum_d);
- sse64 = hsum_epi64_si64(v_sse_q);
-
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
-
- // Round
- sum64 = ROUND_POWER_OF_TWO(sum64, 6);
- sse64 = ROUND_POWER_OF_TWO(sse64, 12);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute the variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-/*****************************************************************************
- * n*16 Wide versions
- *****************************************************************************/
-
-static INLINE unsigned int masked_variancewxh_ssse3(
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- int ii, jj;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((w % 16) == 0);
-
- for (ii = 0; ii < h; ii++) {
- for (jj = 0; jj < w; jj += 16) {
- // Load inputs - 8 bits
- const __m128i v_a_b = _mm_loadu_si128((const __m128i *)(a + jj));
- const __m128i v_b_b = _mm_loadu_si128((const __m128i *)(b + jj));
- const __m128i v_m_b = _mm_loadu_si128((const __m128i *)(m + jj));
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a0_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b0_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m0_w = _mm_unpacklo_epi8(v_m_b, v_zero);
- const __m128i v_a1_w = _mm_unpackhi_epi8(v_a_b, v_zero);
- const __m128i v_b1_w = _mm_unpackhi_epi8(v_b_b, v_zero);
- const __m128i v_m1_w = _mm_unpackhi_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d0_w = _mm_sub_epi16(v_a0_w, v_b0_w);
- const __m128i v_d1_w = _mm_sub_epi16(v_a1_w, v_b1_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e0_w = _mm_mullo_epi16(v_d0_w, v_m0_w);
- const __m128i v_e0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
- const __m128i v_e1_w = _mm_mullo_epi16(v_d1_w, v_m1_w);
- const __m128i v_e1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se0_d = _mm_madd_epi16(v_e0_w, v_e0_w);
- const __m128i v_se1_d = _mm_madd_epi16(v_e1_w, v_e1_w);
-
- // Sum of v_se{0,1}_d - 31 bits + 31 bits = 32 bits
- const __m128i v_se_d = _mm_add_epi32(v_se0_d, v_se1_d);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e0_d);
- v_sum_d = _mm_add_epi32(v_sum_d, v_e1_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
- }
-
- // Move on to next row
- a += a_stride;
- b += b_stride;
- m += m_stride;
- }
-
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-
-#define MASKED_VARWXH(W, H) \
- unsigned int aom_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- return masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, m_stride, W, \
- H, sse); \
+#include "aom_dsp/x86/synonyms.h"
+
+// For width a multiple of 16
+static void bilinear_filter(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int w, int h);
+
+static void bilinear_filter8xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h);
+
+static void bilinear_filter4xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h);
+
+// For width a multiple of 16
+static void masked_variance(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride, int width,
+ int height, unsigned int *sse, int *sum_);
+
+static void masked_variance8xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_);
+
+static void masked_variance4xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_);
+
+#define MASK_SUBPIX_VAR_SSSE3(W, H) \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ int sum; \
+ uint8_t temp[(H + 1) * W]; \
+ \
+ bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, sse, &sum); \
+ else \
+ masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ }
+
+#define MASK_SUBPIX_VAR8XH_SSSE3(H) \
+ unsigned int aom_masked_sub_pixel_variance8x##H##_ssse3( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ int sum; \
+ uint8_t temp[(H + 1) * 8]; \
+ \
+ bilinear_filter8xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ masked_variance8xh(ref, ref_stride, temp, second_pred, msk, msk_stride, \
+ H, sse, &sum); \
+ else \
+ masked_variance8xh(ref, ref_stride, second_pred, temp, msk, msk_stride, \
+ H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (8 * H)); \
+ }
+
+#define MASK_SUBPIX_VAR4XH_SSSE3(H) \
+ unsigned int aom_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ int sum; \
+ uint8_t temp[(H + 1) * 4]; \
+ \
+ bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ masked_variance4xh(ref, ref_stride, temp, second_pred, msk, msk_stride, \
+ H, sse, &sum); \
+ else \
+ masked_variance4xh(ref, ref_stride, second_pred, temp, msk, msk_stride, \
+ H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
}
-MASKED_VARWXH(16, 8)
-MASKED_VARWXH(16, 16)
-MASKED_VARWXH(16, 32)
-MASKED_VARWXH(32, 16)
-MASKED_VARWXH(32, 32)
-MASKED_VARWXH(32, 64)
-MASKED_VARWXH(64, 32)
-MASKED_VARWXH(64, 64)
#if CONFIG_EXT_PARTITION
-MASKED_VARWXH(64, 128)
-MASKED_VARWXH(128, 64)
-MASKED_VARWXH(128, 128)
-#endif // CONFIG_EXT_PARTITION
-
-/*****************************************************************************
- * 8 Wide versions
- *****************************************************************************/
-
-static INLINE unsigned int masked_variance8xh_ssse3(
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int h, unsigned int *sse) {
- int ii;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- for (ii = 0; ii < h; ii++) {
- // Load inputs - 8 bits
- const __m128i v_a_b = _mm_loadl_epi64((const __m128i *)a);
- const __m128i v_b_b = _mm_loadl_epi64((const __m128i *)b);
- const __m128i v_m_b = _mm_loadl_epi64((const __m128i *)m);
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e_w = _mm_mullo_epi16(v_d_w, v_m_w);
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se_d = _mm_madd_epi16(v_e_w, v_e_w);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
-
- // Move on to next row
- a += a_stride;
- b += b_stride;
- m += m_stride;
- }
-
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
-}
-
-#define MASKED_VAR8XH(H) \
- unsigned int aom_masked_variance8x##H##_ssse3( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- return masked_variance8xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
- sse); \
- }
-
-MASKED_VAR8XH(4)
-MASKED_VAR8XH(8)
-MASKED_VAR8XH(16)
-
-/*****************************************************************************
- * 4 Wide versions
- *****************************************************************************/
-
-static INLINE unsigned int masked_variance4xh_ssse3(
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int h, unsigned int *sse) {
- int ii;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((h % 2) == 0);
-
- for (ii = 0; ii < h / 2; ii++) {
- // Load 2 input rows - 8 bits
- const __m128i v_a0_b = _mm_cvtsi32_si128(*(const uint32_t *)a);
- const __m128i v_b0_b = _mm_cvtsi32_si128(*(const uint32_t *)b);
- const __m128i v_m0_b = _mm_cvtsi32_si128(*(const uint32_t *)m);
- const __m128i v_a1_b = _mm_cvtsi32_si128(*(const uint32_t *)(a + a_stride));
- const __m128i v_b1_b = _mm_cvtsi32_si128(*(const uint32_t *)(b + b_stride));
- const __m128i v_m1_b = _mm_cvtsi32_si128(*(const uint32_t *)(m + m_stride));
-
- // Interleave 2 rows into a single register
- const __m128i v_a_b = _mm_unpacklo_epi32(v_a0_b, v_a1_b);
- const __m128i v_b_b = _mm_unpacklo_epi32(v_b0_b, v_b1_b);
- const __m128i v_m_b = _mm_unpacklo_epi32(v_m0_b, v_m1_b);
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e_w = _mm_mullo_epi16(v_d_w, v_m_w);
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se_d = _mm_madd_epi16(v_e_w, v_e_w);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
-
- // Move on to next 2 row
- a += a_stride * 2;
- b += b_stride * 2;
- m += m_stride * 2;
- }
-
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
-}
-
-#define MASKED_VAR4XH(H) \
- unsigned int aom_masked_variance4x##H##_ssse3( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- return masked_variance4xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
- sse); \
- }
-
-MASKED_VAR4XH(4)
-MASKED_VAR4XH(8)
-
-#if CONFIG_HIGHBITDEPTH
-
-// Main calculation for n*8 wide blocks
-static INLINE void highbd_masked_variance64_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, int64_t *sum, uint64_t *sse) {
- int ii, jj;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((w % 8) == 0);
-
- for (ii = 0; ii < h; ii++) {
- for (jj = 0; jj < w; jj += 8) {
- // Load inputs - 8 bits
- const __m128i v_a_w = _mm_loadu_si128((const __m128i *)(a + jj));
- const __m128i v_b_w = _mm_loadu_si128((const __m128i *)(b + jj));
- const __m128i v_m_b = _mm_loadl_epi64((const __m128i *)(m + jj));
-
- // Unpack m to 16 bits - still containing max 8 bits
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-4095, 4095]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-4095, 4095] * [0, 64] => sum of 2 of these fits in 19 bits
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
- const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
- const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
- const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
- const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
- const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
- const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
- const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
- // Square and sum the errors -> 36bits * 4 = 38bits
- __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
- v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
- v_elo1_d = _mm_srli_si128(v_elo_d, 4);
- v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
- v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
- v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
- v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
- v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
- v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
- v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_q);
- }
-
- // Move on to next row
- a += a_stride;
- b += b_stride;
- m += m_stride;
- }
-
- // Horizontal sum
- *sum = hsum_epi32_si64(v_sum_d);
- *sse = hsum_epi64_si64(v_sse_q);
-
- // Round
- *sum = (*sum >= 0) ? *sum : -*sum;
- *sum = ROUND_POWER_OF_TWO(*sum, 6);
- *sse = ROUND_POWER_OF_TWO(*sse, 12);
-}
-
-// Main calculation for 4 wide blocks
-static INLINE void highbd_masked_variance64_4wide_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int h, int64_t *sum, uint64_t *sse) {
- int ii;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((h % 2) == 0);
-
- for (ii = 0; ii < h / 2; ii++) {
- // Load 2 input rows - 8 bits
- const __m128i v_a0_w = _mm_loadl_epi64((const __m128i *)a);
- const __m128i v_b0_w = _mm_loadl_epi64((const __m128i *)b);
- const __m128i v_m0_b = _mm_cvtsi32_si128(*(const uint32_t *)m);
- const __m128i v_a1_w = _mm_loadl_epi64((const __m128i *)(a + a_stride));
- const __m128i v_b1_w = _mm_loadl_epi64((const __m128i *)(b + b_stride));
- const __m128i v_m1_b = _mm_cvtsi32_si128(*(const uint32_t *)(m + m_stride));
-
- // Interleave 2 rows into a single register
- const __m128i v_a_w = _mm_unpacklo_epi64(v_a0_w, v_a1_w);
- const __m128i v_b_w = _mm_unpacklo_epi64(v_b0_w, v_b1_w);
- const __m128i v_m_b = _mm_unpacklo_epi32(v_m0_b, v_m1_b);
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-4095, 4095]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-4095, 4095] * [0, 64] => fits in 19 bits (incld sign bit)
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
- const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
- const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
- const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
- const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
- const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
- const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
- const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
- // Square and sum the errors -> 36bits * 4 = 38bits
- __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
- v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
- v_elo1_d = _mm_srli_si128(v_elo_d, 4);
- v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
- v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
- v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
- v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
- v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
- v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
- v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_q);
-
- // Move on to next row
- a += a_stride * 2;
- b += b_stride * 2;
- m += m_stride * 2;
- }
-
- // Horizontal sum
- *sum = hsum_epi32_si32(v_sum_d);
- *sse = hsum_epi64_si64(v_sse_q);
-
- // Round
- *sum = (*sum >= 0) ? *sum : -*sum;
- *sum = ROUND_POWER_OF_TWO(*sum, 6);
- *sse = ROUND_POWER_OF_TWO(*sse, 12);
-}
-
-static INLINE unsigned int highbd_masked_variancewxh_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- uint64_t sse64;
- int64_t sum64;
-
- if (w == 4)
- highbd_masked_variance64_4wide_ssse3(a, a_stride, b, b_stride, m, m_stride,
- h, &sum64, &sse64);
- else
- highbd_masked_variance64_ssse3(a, a_stride, b, b_stride, m, m_stride, w, h,
- &sum64, &sse64);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute and return variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- uint64_t sse64;
- int64_t sum64;
-
- if (w == 4)
- highbd_masked_variance64_4wide_ssse3(a, a_stride, b, b_stride, m, m_stride,
- h, &sum64, &sse64);
- else
- highbd_masked_variance64_ssse3(a, a_stride, b, b_stride, m, m_stride, w, h,
- &sum64, &sse64);
-
- // Normalise
- sum64 = ROUND_POWER_OF_TWO(sum64, 2);
- sse64 = ROUND_POWER_OF_TWO(sse64, 4);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute and return variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- uint64_t sse64;
- int64_t sum64;
-
- if (w == 4)
- highbd_masked_variance64_4wide_ssse3(a, a_stride, b, b_stride, m, m_stride,
- h, &sum64, &sse64);
- else
- highbd_masked_variance64_ssse3(a, a_stride, b, b_stride, m, m_stride, w, h,
- &sum64, &sse64);
-
- sum64 = ROUND_POWER_OF_TWO(sum64, 4);
- sse64 = ROUND_POWER_OF_TWO(sse64, 8);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute and return variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-#define HIGHBD_MASKED_VARWXH(W, H) \
- unsigned int aom_highbd_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
- uint16_t *b = CONVERT_TO_SHORTPTR(b8); \
- return highbd_masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, \
- m_stride, W, H, sse); \
- } \
- \
- unsigned int aom_highbd_10_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
- uint16_t *b = CONVERT_TO_SHORTPTR(b8); \
- return highbd_10_masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, \
- m_stride, W, H, sse); \
- } \
- \
- unsigned int aom_highbd_12_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
- uint16_t *b = CONVERT_TO_SHORTPTR(b8); \
- return highbd_12_masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, \
- m_stride, W, H, sse); \
- }
-
-HIGHBD_MASKED_VARWXH(4, 4)
-HIGHBD_MASKED_VARWXH(4, 8)
-HIGHBD_MASKED_VARWXH(8, 4)
-HIGHBD_MASKED_VARWXH(8, 8)
-HIGHBD_MASKED_VARWXH(8, 16)
-HIGHBD_MASKED_VARWXH(16, 8)
-HIGHBD_MASKED_VARWXH(16, 16)
-HIGHBD_MASKED_VARWXH(16, 32)
-HIGHBD_MASKED_VARWXH(32, 16)
-HIGHBD_MASKED_VARWXH(32, 32)
-HIGHBD_MASKED_VARWXH(32, 64)
-HIGHBD_MASKED_VARWXH(64, 32)
-HIGHBD_MASKED_VARWXH(64, 64)
-#if CONFIG_EXT_PARTITION
-HIGHBD_MASKED_VARWXH(64, 128)
-HIGHBD_MASKED_VARWXH(128, 64)
-HIGHBD_MASKED_VARWXH(128, 128)
-#endif // CONFIG_EXT_PARTITION
-
+MASK_SUBPIX_VAR_SSSE3(128, 128)
+MASK_SUBPIX_VAR_SSSE3(128, 64)
+MASK_SUBPIX_VAR_SSSE3(64, 128)
#endif
-
-//////////////////////////////////////////////////////////////////////////////
-// Sub pixel versions
-//////////////////////////////////////////////////////////////////////////////
-
-typedef __m128i (*filter_fn_t)(__m128i v_a_b, __m128i v_b_b,
- __m128i v_filter_b);
-
-static INLINE __m128i apply_filter_avg(const __m128i v_a_b, const __m128i v_b_b,
- const __m128i v_filter_b) {
- (void)v_filter_b;
- return _mm_avg_epu8(v_a_b, v_b_b);
-}
-
-static INLINE __m128i apply_filter(const __m128i v_a_b, const __m128i v_b_b,
- const __m128i v_filter_b) {
- const __m128i v_rounding_w = _mm_set1_epi16(1 << (FILTER_BITS - 1));
- __m128i v_input_lo_b = _mm_unpacklo_epi8(v_a_b, v_b_b);
- __m128i v_input_hi_b = _mm_unpackhi_epi8(v_a_b, v_b_b);
- __m128i v_temp0_w = _mm_maddubs_epi16(v_input_lo_b, v_filter_b);
- __m128i v_temp1_w = _mm_maddubs_epi16(v_input_hi_b, v_filter_b);
- __m128i v_res_lo_w =
- _mm_srai_epi16(_mm_add_epi16(v_temp0_w, v_rounding_w), FILTER_BITS);
- __m128i v_res_hi_w =
- _mm_srai_epi16(_mm_add_epi16(v_temp1_w, v_rounding_w), FILTER_BITS);
- return _mm_packus_epi16(v_res_lo_w, v_res_hi_w);
-}
-
-// Apply the filter to the contents of the lower half of a and b
-static INLINE void apply_filter_lo(const __m128i v_a_lo_b,
- const __m128i v_b_lo_b,
- const __m128i v_filter_b, __m128i *v_res_w) {
- const __m128i v_rounding_w = _mm_set1_epi16(1 << (FILTER_BITS - 1));
- __m128i v_input_b = _mm_unpacklo_epi8(v_a_lo_b, v_b_lo_b);
- __m128i v_temp0_w = _mm_maddubs_epi16(v_input_b, v_filter_b);
- *v_res_w =
- _mm_srai_epi16(_mm_add_epi16(v_temp0_w, v_rounding_w), FILTER_BITS);
-}
-
-static void sum_and_sse(const __m128i v_a_b, const __m128i v_b_b,
- const __m128i v_m_b, __m128i *v_sum_d,
- __m128i *v_sse_q) {
- const __m128i v_zero = _mm_setzero_si128();
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a0_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b0_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m0_w = _mm_unpacklo_epi8(v_m_b, v_zero);
- const __m128i v_a1_w = _mm_unpackhi_epi8(v_a_b, v_zero);
- const __m128i v_b1_w = _mm_unpackhi_epi8(v_b_b, v_zero);
- const __m128i v_m1_w = _mm_unpackhi_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d0_w = _mm_sub_epi16(v_a0_w, v_b0_w);
- const __m128i v_d1_w = _mm_sub_epi16(v_a1_w, v_b1_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e0_w = _mm_mullo_epi16(v_d0_w, v_m0_w);
- const __m128i v_e0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
- const __m128i v_e1_w = _mm_mullo_epi16(v_d1_w, v_m1_w);
- const __m128i v_e1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se0_d = _mm_madd_epi16(v_e0_w, v_e0_w);
- const __m128i v_se1_d = _mm_madd_epi16(v_e1_w, v_e1_w);
-
- // Sum of v_se{0,1}_d - 31 bits + 31 bits = 32 bits
- const __m128i v_se_d = _mm_add_epi32(v_se0_d, v_se1_d);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- *v_sum_d = _mm_add_epi32(*v_sum_d, v_e0_d);
- *v_sum_d = _mm_add_epi32(*v_sum_d, v_e1_d);
- *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_lo_q);
- *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_hi_q);
-}
-
-// Functions for width (W) >= 16
-unsigned int aom_masked_subpel_varWxH_xzero(const uint8_t *src, int src_stride,
- int yoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int w, int h,
- filter_fn_t filter_fn) {
+MASK_SUBPIX_VAR_SSSE3(64, 64)
+MASK_SUBPIX_VAR_SSSE3(64, 32)
+MASK_SUBPIX_VAR_SSSE3(32, 64)
+MASK_SUBPIX_VAR_SSSE3(32, 32)
+MASK_SUBPIX_VAR_SSSE3(32, 16)
+MASK_SUBPIX_VAR_SSSE3(16, 32)
+MASK_SUBPIX_VAR_SSSE3(16, 16)
+MASK_SUBPIX_VAR_SSSE3(16, 8)
+MASK_SUBPIX_VAR8XH_SSSE3(16)
+MASK_SUBPIX_VAR8XH_SSSE3(8)
+MASK_SUBPIX_VAR8XH_SSSE3(4)
+MASK_SUBPIX_VAR4XH_SSSE3(8)
+MASK_SUBPIX_VAR4XH_SSSE3(4)
+
+static INLINE __m128i filter_block(const __m128i a, const __m128i b,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi8(a, b);
+ v0 = _mm_maddubs_epi16(v0, filter);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
+
+ __m128i v1 = _mm_unpackhi_epi8(a, b);
+ v1 = _mm_maddubs_epi16(v1, filter);
+ v1 = xx_roundn_epu16(v1, FILTER_BITS);
+
+ return _mm_packus_epi16(v0, v1);
+}
+
+static void bilinear_filter(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int w, int h) {
int i, j;
- __m128i v_src0_b, v_src1_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_b = _mm_set1_epi16(
- (bilinear_filters_2t[yoffset][1] << 8) + bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 16) {
- // Load the first row ready
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j));
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row apply the filter
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + j + src_stride));
- v_res_b = filter_fn(v_src0_b, v_src1_b, v_filter_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j));
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row apply the filter
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j + src_stride * 2));
- v_res_b = filter_fn(v_src1_b, v_src0_b, v_filter_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j + dst_stride));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j + msk_stride));
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 16) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ _mm_storeu_si128((__m128i *)&b[j], x);
+ }
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-unsigned int aom_masked_subpel_varWxH_yzero(const uint8_t *src, int src_stride,
- int xoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int w, int h,
- filter_fn_t filter_fn) {
- int i, j;
- __m128i v_src0_b, v_src1_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_b = _mm_set1_epi16(
- (bilinear_filters_2t[xoffset][1] << 8) + bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i++) {
- for (j = 0; j < w; j += 16) {
- // Load this row and one below & apply the filter to them
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_res_b = filter_fn(v_src0_b, v_src1_b, v_filter_b);
-
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j));
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+ } else if (xoffset == 4) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 16) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&src[j + 16]);
+ __m128i z = _mm_alignr_epi8(y, x, 1);
+ _mm_storeu_si128((__m128i *)&b[j], _mm_avg_epu8(x, z));
+ }
+ src += src_stride;
+ b += w;
}
- src += src_stride;
- dst += dst_stride;
- msk += msk_stride;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-unsigned int aom_masked_subpel_varWxH_xnonzero_ynonzero(
- const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int w, int h, filter_fn_t xfilter_fn,
- filter_fn_t yfilter_fn) {
- int i, j;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b;
- __m128i v_filtered0_b, v_filtered1_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filterx_b = _mm_set1_epi16(
- (bilinear_filters_2t[xoffset][1] << 8) + bilinear_filters_2t[xoffset][0]);
- const __m128i v_filtery_b = _mm_set1_epi16(
- (bilinear_filters_2t[yoffset][1] << 8) + bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 16) {
- // Load the first row ready
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_filtered0_b = xfilter_fn(v_src0_b, v_src1_b, v_filterx_b);
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row & apply the filter
- v_src2_b = _mm_loadu_si128((const __m128i *)(src + src_stride + j));
- v_src3_b = _mm_loadu_si128((const __m128i *)(src + src_stride + j + 1));
- v_filtered1_b = xfilter_fn(v_src2_b, v_src3_b, v_filterx_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j));
- // Complete the calculation for this row and add it to the running total
- v_res_b = yfilter_fn(v_filtered0_b, v_filtered1_b, v_filtery_b);
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row & apply the filter
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j));
- v_src1_b =
- _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j + 1));
- v_filtered0_b = xfilter_fn(v_src0_b, v_src1_b, v_filterx_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + dst_stride + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + msk_stride + j));
- // Complete the calculation for this row and add it to the running total
- v_res_b = yfilter_fn(v_filtered1_b, v_filtered0_b, v_filtery_b);
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ } else {
+ uint8_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi16(hfilter[0] | (hfilter[1] << 8));
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 16) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&src[j + 16]);
+ const __m128i z = _mm_alignr_epi8(y, x, 1);
+ const __m128i res = filter_block(x, z, hfilter_vec);
+ _mm_storeu_si128((__m128i *)&b[j], res);
+ }
+
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
}
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
-// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int aom_masked_subpel_var4xH_xzero(const uint8_t *src, int src_stride,
- int yoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered1_w, v_filtered2_w;
- __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b;
- __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_res_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first row of src data ready
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- for (i = 0; i < h; i += 4) {
- // Load the rest of the source data for these rows
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- v_src1_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
- v_src2_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- v_src3_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 3));
- v_src3_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
- v_src0_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 4));
- // Load the dst data
- v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 0));
- v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 1));
- v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
- v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 2));
- v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 3));
- v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
- v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
- // Load the mask data
- v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 0));
- v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 1));
- v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
- v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 2));
- v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 3));
- v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
- v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
- // Apply the y filter
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src3_b, v_src1_b);
- v_src2_b =
- _mm_or_si128(_mm_slli_si128(v_src1_b, 4),
- _mm_and_si128(v_src0_b, _mm_setr_epi32(-1, 0, 0, 0)));
- v_res_b = _mm_avg_epu8(v_src1_b, v_src2_b);
- } else {
- v_src2_b =
- _mm_or_si128(_mm_slli_si128(v_src1_b, 4),
- _mm_and_si128(v_src2_b, _mm_setr_epi32(-1, 0, 0, 0)));
- apply_filter_lo(v_src1_b, v_src2_b, v_filter_b, &v_filtered1_w);
- v_src2_b =
- _mm_or_si128(_mm_slli_si128(v_src3_b, 4),
- _mm_and_si128(v_src0_b, _mm_setr_epi32(-1, 0, 0, 0)));
- apply_filter_lo(v_src3_b, v_src2_b, v_filter_b, &v_filtered2_w);
- v_res_b = _mm_packus_epi16(v_filtered2_w, v_filtered1_w);
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 16) {
+ __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ _mm_storeu_si128((__m128i *)&dst[j], _mm_avg_epu8(x, y));
+ }
+ dst += w;
}
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
-}
-
-// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int aom_masked_subpel_var8xH_xzero(const uint8_t *src, int src_stride,
- int yoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w, v_res_b;
- __m128i v_dst_b = _mm_setzero_si128();
- __m128i v_msk_b = _mm_setzero_si128();
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first row of src data ready
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- for (i = 0; i < h; i += 2) {
- if (yoffset == HALF_PIXEL_OFFSET) {
- // Load the rest of the source data for these rows
- v_src1_b = _mm_or_si128(
- _mm_slli_si128(v_src0_b, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 1)));
- v_src0_b = _mm_or_si128(
- _mm_slli_si128(v_src1_b, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 2)));
- // Apply the y filter
- v_res_b = _mm_avg_epu8(v_src1_b, v_src0_b);
- } else {
- // Load the data and apply the y filter
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- apply_filter_lo(v_src0_b, v_src1_b, v_filter_b, &v_filtered0_w);
- v_src0_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- apply_filter_lo(v_src1_b, v_src0_b, v_filter_b, &v_filtered1_w);
- v_res_b = _mm_packus_epi16(v_filtered1_w, v_filtered0_w);
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi16(vfilter[0] | (vfilter[1] << 8));
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 16) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ const __m128i res = filter_block(x, y, vfilter_vec);
+ _mm_storeu_si128((__m128i *)&dst[j], res);
+ }
+
+ dst += w;
}
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
}
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
}
-// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
-// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int aom_masked_subpel_var4xH_yzero(const uint8_t *src, int src_stride,
- int xoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered0_w, v_filtered2_w;
- __m128i v_src0_shift_b, v_src1_shift_b, v_src2_shift_b, v_src3_shift_b;
- __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b;
- __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_res_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 4) {
- // Load the src data
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- v_src0_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- v_src2_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- v_src0_shift_b = _mm_unpacklo_epi32(v_src1_shift_b, v_src0_shift_b);
- v_src2_shift_b = _mm_srli_si128(v_src2_b, 1);
- v_src3_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 3));
- v_src2_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
- v_src3_shift_b = _mm_srli_si128(v_src3_b, 1);
- v_src2_shift_b = _mm_unpacklo_epi32(v_src3_shift_b, v_src2_shift_b);
- // Load the dst data
- v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 0));
- v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 1));
- v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
- v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 2));
- v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 3));
- v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
- v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
- // Load the mask data
- v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 0));
- v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 1));
- v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
- v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 2));
- v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 3));
- v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
- v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src0_b = _mm_unpacklo_epi64(v_src2_b, v_src0_b);
- v_src0_shift_b = _mm_unpacklo_epi64(v_src2_shift_b, v_src0_shift_b);
- v_res_b = _mm_avg_epu8(v_src0_b, v_src0_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filter_b, &v_filtered0_w);
- apply_filter_lo(v_src2_b, v_src2_shift_b, v_filter_b, &v_filtered2_w);
- v_res_b = _mm_packus_epi16(v_filtered2_w, v_filtered0_w);
- }
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
-}
+static INLINE __m128i filter_block_2rows(const __m128i a0, const __m128i b0,
+ const __m128i a1, const __m128i b1,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi8(a0, b0);
+ v0 = _mm_maddubs_epi16(v0, filter);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
-unsigned int aom_masked_subpel_var8xH_yzero(const uint8_t *src, int src_stride,
- int xoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w;
- __m128i v_src0_shift_b, v_src1_shift_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 2) {
- // Load the src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_res_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filter_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filter_b, &v_filtered1_w);
- v_res_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
- }
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
+ __m128i v1 = _mm_unpacklo_epi8(a1, b1);
+ v1 = _mm_maddubs_epi16(v1, filter);
+ v1 = xx_roundn_epu16(v1, FILTER_BITS);
+
+ return _mm_packus_epi16(v0, v1);
}
-// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
-// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int aom_masked_subpel_var4xH_xnonzero_ynonzero(
- const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int h) {
+static void bilinear_filter8xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h) {
int i;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered0_w, v_filtered2_w;
- __m128i v_src0_shift_b, v_src1_shift_b, v_src2_shift_b, v_src3_shift_b;
- __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b, v_temp_b;
- __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_extra_row_b, v_res_b;
- __m128i v_xres_b[2];
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filterx_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- __m128i v_filtery_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 4) {
- // Load the src data
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- v_src0_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- v_src2_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- v_src0_shift_b = _mm_unpacklo_epi32(v_src1_shift_b, v_src0_shift_b);
- v_src2_shift_b = _mm_srli_si128(v_src2_b, 1);
- v_src3_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 3));
- v_src2_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
- v_src3_shift_b = _mm_srli_si128(v_src3_b, 1);
- v_src2_shift_b = _mm_unpacklo_epi32(v_src3_shift_b, v_src2_shift_b);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src0_b = _mm_unpacklo_epi64(v_src2_b, v_src0_b);
- v_src0_shift_b = _mm_unpacklo_epi64(v_src2_shift_b, v_src0_shift_b);
- v_xres_b[i == 0 ? 0 : 1] = _mm_avg_epu8(v_src0_b, v_src0_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src2_b, v_src2_shift_b, v_filterx_b, &v_filtered2_w);
- v_xres_b[i == 0 ? 0 : 1] = _mm_packus_epi16(v_filtered2_w, v_filtered0_w);
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)src);
+ _mm_storel_epi64((__m128i *)b, x);
+ src += src_stride;
+ b += 8;
+ }
+ } else if (xoffset == 4) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadu_si128((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 1);
+ _mm_storel_epi64((__m128i *)b, _mm_avg_epu8(x, z));
+ src += src_stride;
+ b += 8;
}
- // Move onto the next set of rows
- src += src_stride * 4;
- }
- // Load one more row to be used in the y filter
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_extra_row_b = _mm_and_si128(_mm_avg_epu8(v_src0_b, v_src0_shift_b),
- _mm_setr_epi32(-1, 0, 0, 0));
} else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- v_extra_row_b =
- _mm_and_si128(_mm_packus_epi16(v_filtered0_w, _mm_setzero_si128()),
- _mm_setr_epi32(-1, 0, 0, 0));
- }
+ uint8_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi16(hfilter[0] | (hfilter[1] << 8));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 1);
+ const __m128i x1 = _mm_loadu_si128((__m128i *)&src[src_stride]);
+ const __m128i z1 = _mm_srli_si128(x1, 1);
+ const __m128i res = filter_block_2rows(x0, z0, x1, z1, hfilter_vec);
+ _mm_storeu_si128((__m128i *)b, res);
- for (i = 0; i < h; i += 4) {
- if (h == 8 && i == 0) {
- v_temp_b = _mm_or_si128(_mm_slli_si128(v_xres_b[0], 4),
- _mm_srli_si128(v_xres_b[1], 12));
- } else {
- v_temp_b = _mm_or_si128(_mm_slli_si128(v_xres_b[i == 0 ? 0 : 1], 4),
- v_extra_row_b);
+ src += src_stride * 2;
+ b += 16;
}
- // Apply the y filter
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_b = _mm_avg_epu8(v_xres_b[i == 0 ? 0 : 1], v_temp_b);
- } else {
- v_res_b = apply_filter(v_xres_b[i == 0 ? 0 : 1], v_temp_b, v_filtery_b);
+ // Handle i = h separately
+ const __m128i x0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 1);
+
+ __m128i v0 = _mm_unpacklo_epi8(x0, z0);
+ v0 = _mm_maddubs_epi16(v0, hfilter_vec);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
+
+ _mm_storel_epi64((__m128i *)b, _mm_packus_epi16(v0, v0));
+ }
+
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ __m128i y = _mm_loadl_epi64((__m128i *)&dst[8]);
+ _mm_storel_epi64((__m128i *)dst, _mm_avg_epu8(x, y));
+ dst += 8;
}
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi16(vfilter[0] | (vfilter[1] << 8));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ const __m128i y = _mm_loadl_epi64((__m128i *)&dst[8]);
+ const __m128i z = _mm_loadl_epi64((__m128i *)&dst[16]);
+ const __m128i res = filter_block_2rows(x, y, y, z, vfilter_vec);
+ _mm_storeu_si128((__m128i *)dst, res);
- // Load the dst data
- v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 0));
- v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 1));
- v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
- v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 2));
- v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 3));
- v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
- v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
- // Load the mask data
- v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 0));
- v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 1));
- v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
- v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 2));
- v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 3));
- v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
- v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- dst += dst_stride * 4;
- msk += msk_stride * 4;
+ dst += 16;
+ }
}
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int aom_masked_subpel_var8xH_xnonzero_ynonzero(
- const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int h) {
+static void bilinear_filter4xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h) {
int i;
- __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w, v_dst_b, v_msk_b;
- __m128i v_src0_shift_b, v_src1_shift_b;
- __m128i v_xres0_b, v_xres1_b, v_res_b, v_temp_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filterx_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- __m128i v_filtery_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first block of src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_xres0_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
- v_xres0_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
- }
- for (i = 0; i < h; i += 4) {
- // Load the next block of src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 2));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 3));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_xres1_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
- v_xres1_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = xx_loadl_32((__m128i *)src);
+ xx_storel_32((__m128i *)b, x);
+ src += src_stride;
+ b += 4;
}
- // Apply the y filter to the previous block
- v_temp_b = _mm_or_si128(_mm_srli_si128(v_xres0_b, 8),
- _mm_slli_si128(v_xres1_b, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_b = _mm_avg_epu8(v_xres0_b, v_temp_b);
- } else {
- v_res_b = apply_filter(v_xres0_b, v_temp_b, v_filtery_b);
+ } else if (xoffset == 4) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 1);
+ xx_storel_32((__m128i *)b, _mm_avg_epu8(x, z));
+ src += src_stride;
+ b += 4;
}
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next block of src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 4));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 5));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_xres0_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
- v_xres0_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+ } else {
+ uint8_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi16(hfilter[0] | (hfilter[1] << 8));
+ for (i = 0; i < h; i += 4) {
+ const __m128i x0 = _mm_loadl_epi64((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 1);
+ const __m128i x1 = _mm_loadl_epi64((__m128i *)&src[src_stride]);
+ const __m128i z1 = _mm_srli_si128(x1, 1);
+ const __m128i x2 = _mm_loadl_epi64((__m128i *)&src[src_stride * 2]);
+ const __m128i z2 = _mm_srli_si128(x2, 1);
+ const __m128i x3 = _mm_loadl_epi64((__m128i *)&src[src_stride * 3]);
+ const __m128i z3 = _mm_srli_si128(x3, 1);
+
+ const __m128i a0 = _mm_unpacklo_epi32(x0, x1);
+ const __m128i b0 = _mm_unpacklo_epi32(z0, z1);
+ const __m128i a1 = _mm_unpacklo_epi32(x2, x3);
+ const __m128i b1 = _mm_unpacklo_epi32(z2, z3);
+ const __m128i res = filter_block_2rows(a0, b0, a1, b1, hfilter_vec);
+ _mm_storeu_si128((__m128i *)b, res);
+
+ src += src_stride * 4;
+ b += 16;
}
- // Apply the y filter to the previous block
- v_temp_b = _mm_or_si128(_mm_srli_si128(v_xres1_b, 8),
- _mm_slli_si128(v_xres0_b, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_b = _mm_avg_epu8(v_xres1_b, v_temp_b);
- } else {
- v_res_b = apply_filter(v_xres1_b, v_temp_b, v_filtery_b);
+ // Handle i = h separately
+ const __m128i x = _mm_loadl_epi64((__m128i *)src);
+ const __m128i z = _mm_srli_si128(x, 1);
+
+ __m128i v0 = _mm_unpacklo_epi8(x, z);
+ v0 = _mm_maddubs_epi16(v0, hfilter_vec);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
+
+ xx_storel_32((__m128i *)b, _mm_packus_epi16(v0, v0));
+ }
+
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ __m128i x = xx_loadl_32((__m128i *)dst);
+ __m128i y = xx_loadl_32((__m128i *)&dst[4]);
+ xx_storel_32((__m128i *)dst, _mm_avg_epu8(x, y));
+ dst += 4;
+ }
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi16(vfilter[0] | (vfilter[1] << 8));
+ for (i = 0; i < h; i += 4) {
+ const __m128i a = xx_loadl_32((__m128i *)dst);
+ const __m128i b = xx_loadl_32((__m128i *)&dst[4]);
+ const __m128i c = xx_loadl_32((__m128i *)&dst[8]);
+ const __m128i d = xx_loadl_32((__m128i *)&dst[12]);
+ const __m128i e = xx_loadl_32((__m128i *)&dst[16]);
+
+ const __m128i a0 = _mm_unpacklo_epi32(a, b);
+ const __m128i b0 = _mm_unpacklo_epi32(b, c);
+ const __m128i a1 = _mm_unpacklo_epi32(c, d);
+ const __m128i b1 = _mm_unpacklo_epi32(d, e);
+ const __m128i res = filter_block_2rows(a0, b0, a1, b1, vfilter_vec);
+ _mm_storeu_si128((__m128i *)dst, res);
+
+ dst += 16;
}
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 3)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 3)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
-}
-
-// For W >=16
-#define MASK_SUBPIX_VAR_LARGE(W, H) \
- unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- assert(W % 16 == 0); \
- if (xoffset == 0) { \
- if (yoffset == 0) \
- return aom_masked_variance##W##x##H##_ssse3( \
- src, src_stride, dst, dst_stride, msk, msk_stride, sse); \
- else if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_xzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_xzero(src, src_stride, yoffset, dst, \
- dst_stride, msk, msk_stride, \
- sse, W, H, apply_filter); \
- } else if (yoffset == 0) { \
- if (xoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_yzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_yzero(src, src_stride, xoffset, dst, \
- dst_stride, msk, msk_stride, \
- sse, W, H, apply_filter); \
- } else if (xoffset == HALF_PIXEL_OFFSET) { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst, \
- dst_stride, msk, msk_stride, sse, W, H, apply_filter_avg, \
- apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter_avg, apply_filter); \
- } else { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter, apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter, apply_filter); \
- } \
- }
-
-// For W < 16
-#define MASK_SUBPIX_VAR_SMALL(W, H) \
- unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- assert(W == 4 || W == 8); \
- if (xoffset == 0 && yoffset == 0) \
- return aom_masked_variance##W##x##H##_ssse3( \
- src, src_stride, dst, dst_stride, msk, msk_stride, sse); \
- else if (xoffset == 0) \
- return aom_masked_subpel_var##W##xH_xzero( \
- src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H); \
- else if (yoffset == 0) \
- return aom_masked_subpel_var##W##xH_yzero( \
- src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H); \
- else \
- return aom_masked_subpel_var##W##xH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
- sse, H); \
}
-
-MASK_SUBPIX_VAR_SMALL(4, 4)
-MASK_SUBPIX_VAR_SMALL(4, 8)
-MASK_SUBPIX_VAR_SMALL(8, 4)
-MASK_SUBPIX_VAR_SMALL(8, 8)
-MASK_SUBPIX_VAR_SMALL(8, 16)
-MASK_SUBPIX_VAR_LARGE(16, 8)
-MASK_SUBPIX_VAR_LARGE(16, 16)
-MASK_SUBPIX_VAR_LARGE(16, 32)
-MASK_SUBPIX_VAR_LARGE(32, 16)
-MASK_SUBPIX_VAR_LARGE(32, 32)
-MASK_SUBPIX_VAR_LARGE(32, 64)
-MASK_SUBPIX_VAR_LARGE(64, 32)
-MASK_SUBPIX_VAR_LARGE(64, 64)
-#if CONFIG_EXT_PARTITION
-MASK_SUBPIX_VAR_LARGE(64, 128)
-MASK_SUBPIX_VAR_LARGE(128, 64)
-MASK_SUBPIX_VAR_LARGE(128, 128)
-#endif // CONFIG_EXT_PARTITION
-
-#if CONFIG_HIGHBITDEPTH
-typedef uint32_t (*highbd_calc_masked_var_t)(__m128i v_sum_d, __m128i v_sse_q,
- uint32_t *sse, int w, int h);
-typedef unsigned int (*highbd_variance_fn_t)(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- const uint8_t *m, int m_stride,
- unsigned int *sse);
-typedef __m128i (*highbd_filter_fn_t)(__m128i v_a_w, __m128i v_b_w,
- __m128i v_filter_w);
-
-static INLINE __m128i highbd_apply_filter_avg(const __m128i v_a_w,
- const __m128i v_b_w,
- const __m128i v_filter_w) {
- (void)v_filter_w;
- return _mm_avg_epu16(v_a_w, v_b_w);
}
-static INLINE __m128i highbd_apply_filter(const __m128i v_a_w,
- const __m128i v_b_w,
- const __m128i v_filter_w) {
- const __m128i v_rounding_d = _mm_set1_epi32(1 << (FILTER_BITS - 1));
- __m128i v_input_lo_w = _mm_unpacklo_epi16(v_a_w, v_b_w);
- __m128i v_input_hi_w = _mm_unpackhi_epi16(v_a_w, v_b_w);
- __m128i v_temp0_d = _mm_madd_epi16(v_input_lo_w, v_filter_w);
- __m128i v_temp1_d = _mm_madd_epi16(v_input_hi_w, v_filter_w);
- __m128i v_res_lo_d =
- _mm_srai_epi32(_mm_add_epi32(v_temp0_d, v_rounding_d), FILTER_BITS);
- __m128i v_res_hi_d =
- _mm_srai_epi32(_mm_add_epi32(v_temp1_d, v_rounding_d), FILTER_BITS);
- return _mm_packs_epi32(v_res_lo_d, v_res_hi_d);
-}
-// Apply the filter to the contents of the lower half of a and b
-static INLINE void highbd_apply_filter_lo(const __m128i v_a_lo_w,
- const __m128i v_b_lo_w,
- const __m128i v_filter_w,
- __m128i *v_res_d) {
- const __m128i v_rounding_d = _mm_set1_epi32(1 << (FILTER_BITS - 1));
- __m128i v_input_w = _mm_unpacklo_epi16(v_a_lo_w, v_b_lo_w);
- __m128i v_temp0_d = _mm_madd_epi16(v_input_w, v_filter_w);
- *v_res_d =
- _mm_srai_epi32(_mm_add_epi32(v_temp0_d, v_rounding_d), FILTER_BITS);
-}
+static INLINE void accumulate_block(const __m128i src, const __m128i a,
+ const __m128i b, const __m128i m,
+ __m128i *sum, __m128i *sum_sq) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ // Calculate 16 predicted pixels.
+ // Note that the maximum value of any entry of 'pred_l' or 'pred_r'
+ // is 64 * 255, so we have plenty of space to add rounding constants.
+ const __m128i data_l = _mm_unpacklo_epi8(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_l = _mm_maddubs_epi16(data_l, mask_l);
+ pred_l = xx_roundn_epu16(pred_l, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi8(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi8(m, m_inv);
+ __m128i pred_r = _mm_maddubs_epi16(data_r, mask_r);
+ pred_r = xx_roundn_epu16(pred_r, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i src_l = _mm_unpacklo_epi8(src, zero);
+ const __m128i src_r = _mm_unpackhi_epi8(src, zero);
+ const __m128i diff_l = _mm_sub_epi16(pred_l, src_l);
+ const __m128i diff_r = _mm_sub_epi16(pred_r, src_r);
+
+ // Update partial sums and partial sums of squares
+ *sum =
+ _mm_add_epi32(*sum, _mm_madd_epi16(_mm_add_epi16(diff_l, diff_r), one));
+ *sum_sq =
+ _mm_add_epi32(*sum_sq, _mm_add_epi32(_mm_madd_epi16(diff_l, diff_l),
+ _mm_madd_epi16(diff_r, diff_r)));
+}
+
+static void masked_variance(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride, int width,
+ int height, unsigned int *sse, int *sum_) {
+ int x, y;
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 16) {
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ const __m128i m = _mm_loadu_si128((const __m128i *)&m_ptr[x]);
+ accumulate_block(src, a, b, m, &sum, &sum_sq);
+ }
-static void highbd_sum_and_sse(const __m128i v_a_w, const __m128i v_b_w,
- const __m128i v_m_b, __m128i *v_sum_d,
- __m128i *v_sse_q) {
- const __m128i v_zero = _mm_setzero_si128();
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-2^12, 2^12] => 13 bits (incld sign bit)
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-4095, 4095] * [0, 64] & sum pairs => fits in 19 + 1 bits
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
- const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
- const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
- const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
- const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
- const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
- const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
- const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
- // Square and sum the errors -> 36bits * 4 = 38bits
- __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
- v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
- v_elo1_d = _mm_srli_si128(v_elo_d, 4);
- v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
- v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
- v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
- v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
- v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
- v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
- v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
-
- // Accumulate
- *v_sum_d = _mm_add_epi32(*v_sum_d, v_e_d);
- *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
+ src_ptr += src_stride;
+ a_ptr += a_stride;
+ b_ptr += b_stride;
+ m_ptr += m_stride;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, sum);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
+}
+
+static void masked_variance8xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_) {
+ int y;
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+
+ for (y = 0; y < height; y += 2) {
+ __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a = _mm_loadu_si128((const __m128i *)a_ptr);
+ const __m128i b = _mm_loadu_si128((const __m128i *)b_ptr);
+ const __m128i m =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)m_ptr),
+ _mm_loadl_epi64((const __m128i *)&m_ptr[m_stride]));
+ accumulate_block(src, a, b, m, &sum, &sum_sq);
+
+ src_ptr += src_stride * 2;
+ a_ptr += 16;
+ b_ptr += 16;
+ m_ptr += m_stride * 2;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, sum);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
+}
+
+static void masked_variance4xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_) {
+ int y;
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+
+ for (y = 0; y < height; y += 4) {
+ // Load four rows at a time
+ __m128i src =
+ _mm_setr_epi32(*(uint32_t *)src_ptr, *(uint32_t *)&src_ptr[src_stride],
+ *(uint32_t *)&src_ptr[src_stride * 2],
+ *(uint32_t *)&src_ptr[src_stride * 3]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)a_ptr);
+ const __m128i b = _mm_loadu_si128((const __m128i *)b_ptr);
+ const __m128i m = _mm_setr_epi32(
+ *(uint32_t *)m_ptr, *(uint32_t *)&m_ptr[m_stride],
+ *(uint32_t *)&m_ptr[m_stride * 2], *(uint32_t *)&m_ptr[m_stride * 3]);
+ accumulate_block(src, a, b, m, &sum, &sum_sq);
+
+ src_ptr += src_stride * 4;
+ a_ptr += 16;
+ b_ptr += 16;
+ m_ptr += m_stride * 4;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, sum);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
}
-static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- uint32_t *sse, int w,
- int h) {
- int64_t sum64;
- uint64_t sse64;
-
- // Horizontal sum
- sum64 = hsum_epi32_si32(v_sum_d);
- sse64 = hsum_epi64_si64(v_sse_q);
-
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
-
- // Round
- sum64 = ROUND_POWER_OF_TWO(sum64, 6);
- sse64 = ROUND_POWER_OF_TWO(sse64, 12);
-
- // Normalise
- sum64 = ROUND_POWER_OF_TWO(sum64, 2);
- sse64 = ROUND_POWER_OF_TWO(sse64, 4);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute the variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- uint32_t *sse, int w,
- int h) {
- int64_t sum64;
- uint64_t sse64;
-
- // Horizontal sum
- sum64 = hsum_epi32_si64(v_sum_d);
- sse64 = hsum_epi64_si64(v_sse_q);
-
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
-
- // Round
- sum64 = ROUND_POWER_OF_TWO(sum64, 6);
- sse64 = ROUND_POWER_OF_TWO(sse64, 12);
-
- // Normalise
- sum64 = ROUND_POWER_OF_TWO(sum64, 4);
- sse64 = ROUND_POWER_OF_TWO(sse64, 8);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute the variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
+#if CONFIG_HIGHBITDEPTH
+// For width a multiple of 8
+static void highbd_bilinear_filter(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int w, int h);
+
+static void highbd_bilinear_filter4xh(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int h);
+
+// For width a multiple of 8
+static void highbd_masked_variance(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr, int a_stride,
+ const uint16_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride,
+ int width, int height, uint64_t *sse,
+ int *sum_);
+
+static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr,
+ const uint16_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride,
+ int height, int *sse, int *sum_);
+
+#define HIGHBD_MASK_SUBPIX_VAR_SSSE3(W, H) \
+ unsigned int aom_highbd_8_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ uint64_t sse64; \
+ int sum; \
+ uint16_t temp[(H + 1) * W]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ else \
+ highbd_masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ *sse = (uint32_t)sse64; \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ } \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ uint64_t sse64; \
+ int sum; \
+ uint16_t temp[(H + 1) * W]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ else \
+ highbd_masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 4); \
+ sum = ROUND_POWER_OF_TWO(sum, 2); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ } \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ uint64_t sse64; \
+ int sum; \
+ uint16_t temp[(H + 1) * W]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ else \
+ highbd_masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 8); \
+ sum = ROUND_POWER_OF_TWO(sum, 4); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ }
+
+#define HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(H) \
+ unsigned int aom_highbd_8_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ int sse_; \
+ int sum; \
+ uint16_t temp[(H + 1) * 4]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance4xh(ref, ref_stride, temp, second_pred, msk, \
+ msk_stride, H, &sse_, &sum); \
+ else \
+ highbd_masked_variance4xh(ref, ref_stride, second_pred, temp, msk, \
+ msk_stride, H, &sse_, &sum); \
+ *sse = (uint32_t)sse_; \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ } \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ int sse_; \
+ int sum; \
+ uint16_t temp[(H + 1) * 4]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance4xh(ref, ref_stride, temp, second_pred, msk, \
+ msk_stride, H, &sse_, &sum); \
+ else \
+ highbd_masked_variance4xh(ref, ref_stride, second_pred, temp, msk, \
+ msk_stride, H, &sse_, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 4); \
+ sum = ROUND_POWER_OF_TWO(sum, 2); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ } \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ int sse_; \
+ int sum; \
+ uint16_t temp[(H + 1) * 4]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance4xh(ref, ref_stride, temp, second_pred, msk, \
+ msk_stride, H, &sse_, &sum); \
+ else \
+ highbd_masked_variance4xh(ref, ref_stride, second_pred, temp, msk, \
+ msk_stride, H, &sse_, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 8); \
+ sum = ROUND_POWER_OF_TWO(sum, 4); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ }
-// High bit depth functions for width (W) >= 8
-unsigned int aom_highbd_masked_subpel_varWxH_xzero(
- const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int w, int h, highbd_filter_fn_t filter_fn,
- highbd_calc_masked_var_t calc_var) {
+#if CONFIG_EXT_PARTITION
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(128, 128)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(128, 64)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(64, 128)
+#endif
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(64, 64)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(64, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 64)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 8)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 8)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 4)
+HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(8)
+HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(4)
+
+static INLINE __m128i highbd_filter_block(const __m128i a, const __m128i b,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi16(a, b);
+ v0 = _mm_madd_epi16(v0, filter);
+ v0 = xx_roundn_epu32(v0, FILTER_BITS);
+
+ __m128i v1 = _mm_unpackhi_epi16(a, b);
+ v1 = _mm_madd_epi16(v1, filter);
+ v1 = xx_roundn_epu32(v1, FILTER_BITS);
+
+ return _mm_packs_epi32(v0, v1);
+}
+
+static void highbd_bilinear_filter(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int w, int h) {
int i, j;
- __m128i v_src0_w, v_src1_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_w =
- _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 8) {
- // Load the first row ready
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j));
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row apply the filter
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + j + src_stride));
- v_res_w = filter_fn(v_src0_w, v_src1_w, v_filter_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j));
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row apply the filter
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j + src_stride * 2));
- v_res_w = filter_fn(v_src1_w, v_src0_w, v_filter_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j + dst_stride));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j + msk_stride));
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 8) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ _mm_storeu_si128((__m128i *)&b[j], x);
+ }
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
- }
- return calc_var(v_sum_d, v_sse_q, sse, w, h);
-}
-unsigned int aom_highbd_masked_subpel_varWxH_yzero(
- const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int w, int h, highbd_filter_fn_t filter_fn,
- highbd_calc_masked_var_t calc_var) {
- int i, j;
- __m128i v_src0_w, v_src1_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_w =
- _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i++) {
- for (j = 0; j < w; j += 8) {
- // Load this row & apply the filter to them
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_res_w = filter_fn(v_src0_w, v_src1_w, v_filter_w);
-
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j));
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+ } else if (xoffset == 4) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 8) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&src[j + 8]);
+ __m128i z = _mm_alignr_epi8(y, x, 2);
+ _mm_storeu_si128((__m128i *)&b[j], _mm_avg_epu16(x, z));
+ }
+ src += src_stride;
+ b += w;
}
- src += src_stride;
- dst += dst_stride;
- msk += msk_stride;
- }
- return calc_var(v_sum_d, v_sse_q, sse, w, h);
-}
-
-unsigned int aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
- const uint16_t *src, int src_stride, int xoffset, int yoffset,
- const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int w, int h, highbd_filter_fn_t xfilter_fn,
- highbd_filter_fn_t yfilter_fn, highbd_calc_masked_var_t calc_var) {
- int i, j;
- __m128i v_src0_w, v_src1_w, v_src2_w, v_src3_w;
- __m128i v_filtered0_w, v_filtered1_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filterx_w =
- _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- const __m128i v_filtery_w =
- _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 8) {
- // Load the first row ready
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_filtered0_w = xfilter_fn(v_src0_w, v_src1_w, v_filterx_w);
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row & apply the filter
- v_src2_w = _mm_loadu_si128((const __m128i *)(src + src_stride + j));
- v_src3_w = _mm_loadu_si128((const __m128i *)(src + src_stride + j + 1));
- v_filtered1_w = xfilter_fn(v_src2_w, v_src3_w, v_filterx_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j));
- // Complete the calculation for this row and add it to the running total
- v_res_w = yfilter_fn(v_filtered0_w, v_filtered1_w, v_filtery_w);
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row & apply the filter
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j));
- v_src1_w =
- _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j + 1));
- v_filtered0_w = xfilter_fn(v_src0_w, v_src1_w, v_filterx_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + dst_stride + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + msk_stride + j));
- // Complete the calculation for this row and add it to the running total
- v_res_w = yfilter_fn(v_filtered1_w, v_filtered0_w, v_filtery_w);
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ } else {
+ uint16_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi32(hfilter[0] | (hfilter[1] << 16));
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&src[j + 8]);
+ const __m128i z = _mm_alignr_epi8(y, x, 2);
+ const __m128i res = highbd_filter_block(x, z, hfilter_vec);
+ _mm_storeu_si128((__m128i *)&b[j], res);
+ }
+
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
}
- return calc_var(v_sum_d, v_sse_q, sse, w, h);
-}
-// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int aom_highbd_masked_subpel_var4xH_xzero(
- const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int h, highbd_calc_masked_var_t calc_var) {
- int i;
- __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d, v_res_w;
- __m128i v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_w = _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first row of src data ready
- v_src0_w = _mm_loadl_epi64((const __m128i *)src);
- for (i = 0; i < h; i += 2) {
- if (yoffset == HALF_PIXEL_OFFSET) {
- // Load the rest of the source data for these rows
- v_src1_w = _mm_or_si128(
- _mm_slli_si128(v_src0_w, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 1)));
- v_src0_w = _mm_or_si128(
- _mm_slli_si128(v_src1_w, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 2)));
- // Apply the y filter
- v_res_w = _mm_avg_epu16(v_src1_w, v_src0_w);
- } else {
- // Load the data and apply the y filter
- v_src1_w = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- highbd_apply_filter_lo(v_src0_w, v_src1_w, v_filter_w, &v_filtered0_d);
- v_src0_w = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- highbd_apply_filter_lo(v_src1_w, v_src0_w, v_filter_w, &v_filtered1_d);
- v_res_w = _mm_packs_epi32(v_filtered1_d, v_filtered0_d);
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ _mm_storeu_si128((__m128i *)&dst[j], _mm_avg_epu16(x, y));
+ }
+ dst += w;
+ }
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi32(vfilter[0] | (vfilter[1] << 16));
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ const __m128i res = highbd_filter_block(x, y, vfilter_vec);
+ _mm_storeu_si128((__m128i *)&dst[j], res);
+ }
+
+ dst += w;
}
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
}
- return calc_var(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int aom_highbd_masked_subpel_var4xH_yzero(
- const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int h, highbd_calc_masked_var_t calc_var) {
- int i;
- __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d;
- __m128i v_src0_shift_w, v_src1_shift_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_w = _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 2) {
- // Load the src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_res_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filter_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filter_w,
- &v_filtered1_d);
- v_res_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
- }
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
- }
- return calc_var(v_sum_d, v_sse_q, sse, 4, h);
+static INLINE __m128i highbd_filter_block_2rows(const __m128i a0,
+ const __m128i b0,
+ const __m128i a1,
+ const __m128i b1,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi16(a0, b0);
+ v0 = _mm_madd_epi16(v0, filter);
+ v0 = xx_roundn_epu32(v0, FILTER_BITS);
+
+ __m128i v1 = _mm_unpacklo_epi16(a1, b1);
+ v1 = _mm_madd_epi16(v1, filter);
+ v1 = xx_roundn_epu32(v1, FILTER_BITS);
+
+ return _mm_packs_epi32(v0, v1);
}
-unsigned int aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
- const uint16_t *src, int src_stride, int xoffset, int yoffset,
- const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
+static void highbd_bilinear_filter4xh(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int h) {
int i;
- __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d, v_dst_w, v_msk_b;
- __m128i v_src0_shift_w, v_src1_shift_w;
- __m128i v_xres0_w, v_xres1_w, v_res_w, v_temp_w;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filterx_w = _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- __m128i v_filtery_w = _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first block of src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_xres0_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
- &v_filtered1_d);
- v_xres0_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
- }
- for (i = 0; i < h; i += 4) {
- // Load the next block of src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 2));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 3));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_xres1_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
- &v_filtered1_d);
- v_xres1_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)src);
+ _mm_storel_epi64((__m128i *)b, x);
+ src += src_stride;
+ b += 4;
}
- // Apply the y filter to the previous block
- v_temp_w = _mm_or_si128(_mm_srli_si128(v_xres0_w, 8),
- _mm_slli_si128(v_xres1_w, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_w = _mm_avg_epu16(v_xres0_w, v_temp_w);
- } else {
- v_res_w = highbd_apply_filter(v_xres0_w, v_temp_w, v_filtery_w);
+ } else if (xoffset == 4) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadu_si128((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 2);
+ _mm_storel_epi64((__m128i *)b, _mm_avg_epu16(x, z));
+ src += src_stride;
+ b += 4;
}
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next block of src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 4));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 5));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_xres0_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
- &v_filtered1_d);
- v_xres0_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+ } else {
+ uint16_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi32(hfilter[0] | (hfilter[1] << 16));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 2);
+ const __m128i x1 = _mm_loadu_si128((__m128i *)&src[src_stride]);
+ const __m128i z1 = _mm_srli_si128(x1, 2);
+ const __m128i res =
+ highbd_filter_block_2rows(x0, z0, x1, z1, hfilter_vec);
+ _mm_storeu_si128((__m128i *)b, res);
+
+ src += src_stride * 2;
+ b += 8;
}
- // Apply the y filter to the previous block
- v_temp_w = _mm_or_si128(_mm_srli_si128(v_xres1_w, 8),
- _mm_slli_si128(v_xres0_w, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_w = _mm_avg_epu16(v_xres1_w, v_temp_w);
- } else {
- v_res_w = highbd_apply_filter(v_xres1_w, v_temp_w, v_filtery_w);
+ // Process i = h separately
+ __m128i x = _mm_loadu_si128((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 2);
+
+ __m128i v0 = _mm_unpacklo_epi16(x, z);
+ v0 = _mm_madd_epi16(v0, hfilter_vec);
+ v0 = xx_roundn_epu32(v0, FILTER_BITS);
+
+ _mm_storel_epi64((__m128i *)b, _mm_packs_epi32(v0, v0));
+ }
+
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ __m128i y = _mm_loadl_epi64((__m128i *)&dst[4]);
+ _mm_storel_epi64((__m128i *)dst, _mm_avg_epu16(x, y));
+ dst += 4;
}
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 3)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 3)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_var(v_sum_d, v_sse_q, sse, 4, h);
-}
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi32(vfilter[0] | (vfilter[1] << 16));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ const __m128i y = _mm_loadl_epi64((__m128i *)&dst[4]);
+ const __m128i z = _mm_loadl_epi64((__m128i *)&dst[8]);
+ const __m128i res = highbd_filter_block_2rows(x, y, y, z, vfilter_vec);
+ _mm_storeu_si128((__m128i *)dst, res);
-// For W >=8
-#define HIGHBD_MASK_SUBPIX_VAR_LARGE(W, H) \
- unsigned int highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse, highbd_calc_masked_var_t calc_var, \
- highbd_variance_fn_t full_variance_function) { \
- uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
- assert(W % 8 == 0); \
- if (xoffset == 0) { \
- if (yoffset == 0) \
- return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
- msk_stride, sse); \
- else if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_xzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_xzero( \
- src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, \
- W, H, highbd_apply_filter, calc_var); \
- } else if (yoffset == 0) { \
- if (xoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_yzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_yzero( \
- src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, \
- W, H, highbd_apply_filter, calc_var); \
- } else if (xoffset == HALF_PIXEL_OFFSET) { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst, \
- dst_stride, msk, msk_stride, sse, W, H, highbd_apply_filter_avg, \
- highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter_avg, \
- highbd_apply_filter, calc_var); \
- } else { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter, \
- highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter, highbd_apply_filter, \
- calc_var); \
- } \
+ dst += 8;
+ }
}
+}
-// For W < 8
-#define HIGHBD_MASK_SUBPIX_VAR_SMALL(W, H) \
- unsigned int highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse, highbd_calc_masked_var_t calc_var, \
- highbd_variance_fn_t full_variance_function) { \
- uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
- assert(W == 4); \
- if (xoffset == 0 && yoffset == 0) \
- return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
- msk_stride, sse); \
- else if (xoffset == 0) \
- return aom_highbd_masked_subpel_var4xH_xzero( \
- src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H, \
- calc_var); \
- else if (yoffset == 0) \
- return aom_highbd_masked_subpel_var4xH_yzero( \
- src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H, \
- calc_var); \
- else \
- return aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
- sse, H, calc_var); \
- }
+static void highbd_masked_variance(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr, int a_stride,
+ const uint16_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride,
+ int width, int height, uint64_t *sse,
+ int *sum_) {
+ int x, y;
+ // Note on bit widths:
+ // The maximum value of 'sum' is (2^12 - 1) * 128 * 128 =~ 2^26,
+ // so this can be kept as four 32-bit values.
+ // But the maximum value of 'sum_sq' is (2^12 - 1)^2 * 128 * 128 =~ 2^38,
+ // so this must be stored as two 64-bit values.
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i zero = _mm_setzero_si128();
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 8) {
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ const __m128i m =
+ _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)&m_ptr[x]), zero);
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ // Calculate 8 predicted pixels.
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i src_l = _mm_unpacklo_epi16(src, zero);
+ const __m128i src_r = _mm_unpackhi_epi16(src, zero);
+ __m128i diff_l = _mm_sub_epi32(pred_l, src_l);
+ __m128i diff_r = _mm_sub_epi32(pred_r, src_r);
+
+ // Update partial sums and partial sums of squares
+ sum = _mm_add_epi32(sum, _mm_add_epi32(diff_l, diff_r));
+ // A trick: Now each entry of diff_l and diff_r is stored in a 32-bit
+ // field, but the range of values is only [-(2^12 - 1), 2^12 - 1].
+ // So we can re-pack into 16-bit fields and use _mm_madd_epi16
+ // to calculate the squares and partially sum them.
+ const __m128i tmp = _mm_packs_epi32(diff_l, diff_r);
+ const __m128i prod = _mm_madd_epi16(tmp, tmp);
+ // Then we want to sign-extend to 64 bits and accumulate
+ const __m128i sign = _mm_srai_epi32(prod, 31);
+ const __m128i tmp_0 = _mm_unpacklo_epi32(prod, sign);
+ const __m128i tmp_1 = _mm_unpackhi_epi32(prod, sign);
+ sum_sq = _mm_add_epi64(sum_sq, _mm_add_epi64(tmp_0, tmp_1));
+ }
-#define HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(W, H) \
- unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
- sse, calc_masked_variance, \
- aom_highbd_masked_variance##W##x##H##_ssse3); \
- } \
- unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
- sse, highbd_10_calc_masked_variance, \
- aom_highbd_10_masked_variance##W##x##H##_ssse3); \
- } \
- unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
- sse, highbd_12_calc_masked_variance, \
- aom_highbd_12_masked_variance##W##x##H##_ssse3); \
- }
+ src_ptr += src_stride;
+ a_ptr += a_stride;
+ b_ptr += b_stride;
+ m_ptr += m_stride;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, zero);
+ sum = _mm_hadd_epi32(sum, zero);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ sum_sq = _mm_add_epi64(sum_sq, _mm_srli_si128(sum_sq, 8));
+ _mm_storel_epi64((__m128i *)sse, sum_sq);
+}
+
+static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr,
+ const uint16_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride,
+ int height, int *sse, int *sum_) {
+ int y;
+ // Note: For this function, h <= 8 (or maybe 16 if we add 4:1 partitions).
+ // So the maximum value of sum is (2^12 - 1) * 4 * 16 =~ 2^18
+ // and the maximum value of sum_sq is (2^12 - 1)^2 * 4 * 16 =~ 2^30.
+ // So we can safely pack sum_sq into 32-bit fields, which is slightly more
+ // convenient.
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i zero = _mm_setzero_si128();
+
+ for (y = 0; y < height; y += 2) {
+ __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a = _mm_loadu_si128((const __m128i *)a_ptr);
+ const __m128i b = _mm_loadu_si128((const __m128i *)b_ptr);
+ const __m128i m = _mm_unpacklo_epi8(
+ _mm_unpacklo_epi32(
+ _mm_cvtsi32_si128(*(const uint32_t *)m_ptr),
+ _mm_cvtsi32_si128(*(const uint32_t *)&m_ptr[m_stride])),
+ zero);
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i src_l = _mm_unpacklo_epi16(src, zero);
+ const __m128i src_r = _mm_unpackhi_epi16(src, zero);
+ __m128i diff_l = _mm_sub_epi32(pred_l, src_l);
+ __m128i diff_r = _mm_sub_epi32(pred_r, src_r);
+
+ // Update partial sums and partial sums of squares
+ sum = _mm_add_epi32(sum, _mm_add_epi32(diff_l, diff_r));
+ const __m128i tmp = _mm_packs_epi32(diff_l, diff_r);
+ const __m128i prod = _mm_madd_epi16(tmp, tmp);
+ sum_sq = _mm_add_epi32(sum_sq, prod);
+
+ src_ptr += src_stride * 2;
+ a_ptr += 8;
+ b_ptr += 8;
+ m_ptr += m_stride * 2;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, zero);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
+}
-HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 4)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(4, 4)
-HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 8)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(4, 8)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 4)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 4)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 8)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 8)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 16)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 16)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 8)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 8)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 16)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 16)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 32)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 32)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 16)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 16)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 32)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 32)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 64)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 64)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 32)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 32)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 64)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 64)
-#if CONFIG_EXT_PARTITION
-HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 128)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 128)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(128, 64)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(128, 64)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(128, 128)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(128, 128)
-#endif // CONFIG_EXT_PARTITION
#endif
diff --git a/third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h b/third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h
new file mode 100644
index 0000000000..73589a32aa
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_DSP_X86_OBMC_INTRINSIC_SSSE3_H_
+#define AOM_DSP_X86_OBMC_INTRINSIC_SSSE3_H_
+
+#include <immintrin.h>
+
+#include "./aom_config.h"
+
+static INLINE int32_t xx_hsum_epi32_si32(__m128i v_d) {
+ v_d = _mm_hadd_epi32(v_d, v_d);
+ v_d = _mm_hadd_epi32(v_d, v_d);
+ return _mm_cvtsi128_si32(v_d);
+}
+
+static INLINE int64_t xx_hsum_epi64_si64(__m128i v_q) {
+ v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
+#if ARCH_X86_64
+ return _mm_cvtsi128_si64(v_q);
+#else
+ {
+ int64_t tmp;
+ _mm_storel_epi64((__m128i *)&tmp, v_q);
+ return tmp;
+ }
+#endif
+}
+
+static INLINE int64_t xx_hsum_epi32_si64(__m128i v_d) {
+ const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
+ const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
+ const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
+ return xx_hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
+}
+
+#endif // AOM_DSP_X86_OBMC_INTRINSIC_SSSE3_H_
diff --git a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
index ad77f974c7..21632644fb 100644
--- a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
@@ -17,6 +17,7 @@
#include "aom/aom_integer.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/x86/obmc_intrinsic_ssse3.h"
#include "aom_dsp/x86/synonyms.h"
////////////////////////////////////////////////////////////////////////////////
diff --git a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
index efb3659cfc..1797ded80c 100644
--- a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
@@ -17,8 +17,9 @@
#include "aom/aom_integer.h"
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/x86/synonyms.h"
#include "aom_dsp/aom_filter.h"
+#include "aom_dsp/x86/obmc_intrinsic_ssse3.h"
+#include "aom_dsp/x86/synonyms.h"
////////////////////////////////////////////////////////////////////////////////
// 8 bit
diff --git a/third_party/aom/aom_dsp/x86/synonyms.h b/third_party/aom/aom_dsp/x86/synonyms.h
index bef606dae7..cd049a4544 100644
--- a/third_party/aom/aom_dsp/x86/synonyms.h
+++ b/third_party/aom/aom_dsp/x86/synonyms.h
@@ -89,32 +89,4 @@ static INLINE __m128i xx_roundn_epi32(__m128i v_val_d, int bits) {
return _mm_srai_epi32(v_tmp_d, bits);
}
-#ifdef __SSSE3__
-static INLINE int32_t xx_hsum_epi32_si32(__m128i v_d) {
- v_d = _mm_hadd_epi32(v_d, v_d);
- v_d = _mm_hadd_epi32(v_d, v_d);
- return _mm_cvtsi128_si32(v_d);
-}
-
-static INLINE int64_t xx_hsum_epi64_si64(__m128i v_q) {
- v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
-#if ARCH_X86_64
- return _mm_cvtsi128_si64(v_q);
-#else
- {
- int64_t tmp;
- _mm_storel_epi64((__m128i *)&tmp, v_q);
- return tmp;
- }
-#endif
-}
-
-static INLINE int64_t xx_hsum_epi32_si64(__m128i v_d) {
- const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
- const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
- const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
- return xx_hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
-}
-#endif // __SSSE3__
-
#endif // AOM_DSP_X86_SYNONYMS_H_
diff --git a/third_party/aom/aom_dsp/x86/txfm_common_avx2.h b/third_party/aom/aom_dsp/x86/txfm_common_avx2.h
index 39e9b8e2ad..4f7a60c22e 100644
--- a/third_party/aom/aom_dsp/x86/txfm_common_avx2.h
+++ b/third_party/aom/aom_dsp/x86/txfm_common_avx2.h
@@ -34,7 +34,8 @@ static INLINE void mm256_reverse_epi16(__m256i *u) {
*u = _mm256_permute2x128_si256(v, v, 1);
}
-static INLINE void mm256_transpose_16x16(__m256i *in) {
+// Note: in and out could have the same value
+static INLINE void mm256_transpose_16x16(const __m256i *in, __m256i *out) {
__m256i tr0_0 = _mm256_unpacklo_epi16(in[0], in[1]);
__m256i tr0_1 = _mm256_unpackhi_epi16(in[0], in[1]);
__m256i tr0_2 = _mm256_unpacklo_epi16(in[2], in[3]);
@@ -143,29 +144,30 @@ static INLINE void mm256_transpose_16x16(__m256i *in) {
// 86 96 a6 b6 c6 d6 e6 f6 8e ae 9e be ce de ee fe
// 87 97 a7 b7 c7 d7 e7 f7 8f 9f af bf cf df ef ff
- in[0] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x20); // 0010 0000
- in[8] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x31); // 0011 0001
- in[1] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x20);
- in[9] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x31);
- in[2] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x20);
- in[10] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x31);
- in[3] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x20);
- in[11] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x31);
-
- in[4] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x20);
- in[12] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x31);
- in[5] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x20);
- in[13] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x31);
- in[6] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x20);
- in[14] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x31);
- in[7] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x20);
- in[15] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x31);
+ out[0] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x20); // 0010 0000
+ out[8] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x31); // 0011 0001
+ out[1] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x20);
+ out[9] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x31);
+ out[2] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x20);
+ out[10] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x31);
+ out[3] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x20);
+ out[11] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x31);
+
+ out[4] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x20);
+ out[12] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x31);
+ out[5] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x20);
+ out[13] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x31);
+ out[6] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x20);
+ out[14] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x31);
+ out[7] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x20);
+ out[15] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x31);
}
-static INLINE __m256i butter_fly(__m256i a0, __m256i a1, const __m256i cospi) {
+static INLINE __m256i butter_fly(const __m256i *a0, const __m256i *a1,
+ const __m256i *cospi) {
const __m256i dct_rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
- __m256i y0 = _mm256_madd_epi16(a0, cospi);
- __m256i y1 = _mm256_madd_epi16(a1, cospi);
+ __m256i y0 = _mm256_madd_epi16(*a0, *cospi);
+ __m256i y1 = _mm256_madd_epi16(*a1, *cospi);
y0 = _mm256_add_epi32(y0, dct_rounding);
y1 = _mm256_add_epi32(y1, dct_rounding);