diff --git a/include/libyuv/rotate_row.h b/include/libyuv/rotate_row.h index fda6b59f3..a3e69e016 100644 --- a/include/libyuv/rotate_row.h +++ b/include/libyuv/rotate_row.h @@ -61,8 +61,8 @@ extern "C" { #endif // defined(__mips__) #if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) -#define HAS_TRANSPOSEWX8_MSA -#define HAS_TRANSPOSEUVWX8_MSA +#define HAS_TRANSPOSEWX16_MSA +#define HAS_TRANSPOSEUVWX16_MSA #endif void TransposeWxH_C(const uint8* src, @@ -77,6 +77,11 @@ void TransposeWx8_C(const uint8* src, uint8* dst, int dst_stride, int width); +void TransposeWx16_C(const uint8* src, + int src_stride, + uint8* dst, + int dst_stride, + int width); void TransposeWx8_NEON(const uint8* src, int src_stride, uint8* dst, @@ -102,11 +107,11 @@ void TransposeWx8_Fast_DSPR2(const uint8* src, uint8* dst, int dst_stride, int width); -void TransposeWx8_MSA(const uint8* src, - int src_stride, - uint8* dst, - int dst_stride, - int width); +void TransposeWx16_MSA(const uint8* src, + int src_stride, + uint8* dst, + int dst_stride, + int width); void TransposeWx8_Any_NEON(const uint8* src, int src_stride, @@ -128,11 +133,11 @@ void TransposeWx8_Any_DSPR2(const uint8* src, uint8* dst, int dst_stride, int width); -void TransposeWx8_Any_MSA(const uint8* src, - int src_stride, - uint8* dst, - int dst_stride, - int width); +void TransposeWx16_Any_MSA(const uint8* src, + int src_stride, + uint8* dst, + int dst_stride, + int width); void TransposeUVWxH_C(const uint8* src, int src_stride, @@ -150,6 +155,13 @@ void TransposeUVWx8_C(const uint8* src, uint8* dst_b, int dst_stride_b, int width); +void TransposeUVWx16_C(const uint8* src, + int src_stride, + uint8* dst_a, + int dst_stride_a, + uint8* dst_b, + int dst_stride_b, + int width); void TransposeUVWx8_SSE2(const uint8* src, int src_stride, uint8* dst_a, @@ -171,13 +183,13 @@ void TransposeUVWx8_DSPR2(const uint8* src, uint8* dst_b, int dst_stride_b, int width); -void TransposeUVWx8_MSA(const uint8* src, - int src_stride, - uint8* dst_a, - int dst_stride_a, - uint8* dst_b, - int dst_stride_b, - int width); +void TransposeUVWx16_MSA(const uint8* src, + int src_stride, + uint8* dst_a, + int dst_stride_a, + uint8* dst_b, + int dst_stride_b, + int width); void TransposeUVWx8_Any_SSE2(const uint8* src, int src_stride, @@ -200,13 +212,13 @@ void TransposeUVWx8_Any_DSPR2(const uint8* src, uint8* dst_b, int dst_stride_b, int width); -void TransposeUVWx8_Any_MSA(const uint8* src, - int src_stride, - uint8* dst_a, - int dst_stride_a, - uint8* dst_b, - int dst_stride_b, - int width); +void TransposeUVWx16_Any_MSA(const uint8* src, + int src_stride, + uint8* dst_a, + int dst_stride_a, + uint8* dst_b, + int dst_stride_b, + int width); #ifdef __cplusplus } // extern "C" diff --git a/include/libyuv/row.h b/include/libyuv/row.h old mode 100755 new mode 100644 diff --git a/source/convert_argb.cc b/source/convert_argb.cc old mode 100755 new mode 100644 diff --git a/source/rotate.cc b/source/rotate.cc index bfd51421b..277c53b24 100644 --- a/source/rotate.cc +++ b/source/rotate.cc @@ -29,8 +29,13 @@ void TransposePlane(const uint8* src, int width, int height) { int i = height; +#if defined(HAS_TRANSPOSEWX16_MSA) + void (*TransposeWx16)(const uint8* src, int src_stride, uint8* dst, + int dst_stride, int width) = TransposeWx16_C; +#else void (*TransposeWx8)(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width) = TransposeWx8_C; +#endif #if defined(HAS_TRANSPOSEWX8_NEON) if (TestCpuFlag(kCpuHasNEON)) { TransposeWx8 = TransposeWx8_NEON; @@ -62,15 +67,24 @@ void TransposePlane(const uint8* src, } } #endif -#if defined(HAS_TRANSPOSEWX8_MSA) +#if defined(HAS_TRANSPOSEWX16_MSA) if (TestCpuFlag(kCpuHasMSA)) { - TransposeWx8 = TransposeWx8_Any_MSA; + TransposeWx16 = TransposeWx16_Any_MSA; if (IS_ALIGNED(width, 16)) { - TransposeWx8 = TransposeWx8_MSA; + TransposeWx16 = TransposeWx16_MSA; } } #endif +#if defined(HAS_TRANSPOSEWX16_MSA) + // Work across the source in 16x16 tiles + while (i >= 16) { + TransposeWx16(src, src_stride, dst, dst_stride, width); + src += 16 * src_stride; // Go down 16 rows. + dst += 16; // Move over 16 columns. + i -= 16; + } +#else // Work across the source in 8x8 tiles while (i >= 8) { TransposeWx8(src, src_stride, dst, dst_stride, width); @@ -78,6 +92,7 @@ void TransposePlane(const uint8* src, dst += 8; // Move over 8 columns. i -= 8; } +#endif if (i > 0) { TransposeWxH_C(src, src_stride, dst, dst_stride, width, i); @@ -218,9 +233,15 @@ void TransposeUV(const uint8* src, int width, int height) { int i = height; +#if defined(HAS_TRANSPOSEUVWX16_MSA) + void (*TransposeUVWx16)(const uint8* src, int src_stride, uint8* dst_a, + int dst_stride_a, uint8* dst_b, int dst_stride_b, + int width) = TransposeUVWx16_C; +#else void (*TransposeUVWx8)(const uint8* src, int src_stride, uint8* dst_a, int dst_stride_a, uint8* dst_b, int dst_stride_b, int width) = TransposeUVWx8_C; +#endif #if defined(HAS_TRANSPOSEUVWX8_NEON) if (TestCpuFlag(kCpuHasNEON)) { TransposeUVWx8 = TransposeUVWx8_NEON; @@ -240,15 +261,26 @@ void TransposeUV(const uint8* src, TransposeUVWx8 = TransposeUVWx8_DSPR2; } #endif -#if defined(HAS_TRANSPOSEUVWX8_MSA) +#if defined(HAS_TRANSPOSEUVWX16_MSA) if (TestCpuFlag(kCpuHasMSA)) { - TransposeUVWx8 = TransposeUVWx8_Any_MSA; + TransposeUVWx16 = TransposeUVWx16_Any_MSA; if (IS_ALIGNED(width, 8)) { - TransposeUVWx8 = TransposeUVWx8_MSA; + TransposeUVWx16 = TransposeUVWx16_MSA; } } #endif +#if defined(HAS_TRANSPOSEUVWX16_MSA) + // Work through the source in 8x8 tiles. + while (i >= 16) { + TransposeUVWx16(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, + width); + src += 16 * src_stride; // Go down 16 rows. + dst_a += 16; // Move over 8 columns. + dst_b += 16; // Move over 8 columns. + i -= 16; + } +#else // Work through the source in 8x8 tiles. while (i >= 8) { TransposeUVWx8(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, @@ -258,6 +290,7 @@ void TransposeUV(const uint8* src, dst_b += 8; // Move over 8 columns. i -= 8; } +#endif if (i > 0) { TransposeUVWxH_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, diff --git a/source/rotate_any.cc b/source/rotate_any.cc index 67e565e4d..562096b92 100644 --- a/source/rotate_any.cc +++ b/source/rotate_any.cc @@ -41,8 +41,8 @@ TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15) #ifdef HAS_TRANSPOSEWX8_DSPR2 TANY(TransposeWx8_Any_DSPR2, TransposeWx8_DSPR2, 7) #endif -#ifdef HAS_TRANSPOSEWX8_MSA -TANY(TransposeWx8_Any_MSA, TransposeWx8_MSA, 15) +#ifdef HAS_TRANSPOSEWX16_MSA +TANY(TransposeWx16_Any_MSA, TransposeWx16_MSA, 15) #endif #undef TANY @@ -67,8 +67,8 @@ TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7) #ifdef HAS_TRANSPOSEUVWX8_DSPR2 TUVANY(TransposeUVWx8_Any_DSPR2, TransposeUVWx8_DSPR2, 7) #endif -#ifdef HAS_TRANSPOSEUVWX8_MSA -TUVANY(TransposeUVWx8_Any_MSA, TransposeUVWx8_MSA, 7) +#ifdef HAS_TRANSPOSEUVWX16_MSA +TUVANY(TransposeUVWx16_Any_MSA, TransposeUVWx16_MSA, 7) #endif #undef TUVANY diff --git a/source/rotate_msa.cc b/source/rotate_msa.cc index 08fbde019..8907765ab 100644 --- a/source/rotate_msa.cc +++ b/source/rotate_msa.cc @@ -19,179 +19,226 @@ namespace libyuv { extern "C" { #endif -void TransposeWx8_MSA(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride, - int width) { +#define ILVRL_B(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \ + out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \ + out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \ + out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \ + } + +#define ILVRL_H(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + out0 = (v16u8)__msa_ilvr_h((v8i16)in1, (v8i16)in0); \ + out1 = (v16u8)__msa_ilvl_h((v8i16)in1, (v8i16)in0); \ + out2 = (v16u8)__msa_ilvr_h((v8i16)in3, (v8i16)in2); \ + out3 = (v16u8)__msa_ilvl_h((v8i16)in3, (v8i16)in2); \ + } + +#define ILVRL_W(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \ + out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \ + out2 = (v16u8)__msa_ilvr_w((v4i32)in3, (v4i32)in2); \ + out3 = (v16u8)__msa_ilvl_w((v4i32)in3, (v4i32)in2); \ + } + +#define ILVRL_D(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + out0 = (v16u8)__msa_ilvr_d((v2i64)in1, (v2i64)in0); \ + out1 = (v16u8)__msa_ilvl_d((v2i64)in1, (v2i64)in0); \ + out2 = (v16u8)__msa_ilvr_d((v2i64)in3, (v2i64)in2); \ + out3 = (v16u8)__msa_ilvl_d((v2i64)in3, (v2i64)in2); \ + } + +void TransposeWx16_C(const uint8* src, + int src_stride, + uint8* dst, + int dst_stride, + int width) { + TransposeWx8_C(src, src_stride, dst, dst_stride, width); + TransposeWx8_C((src + 8 * src_stride), src_stride, (dst + 8), dst_stride, + width); +} + +void TransposeUVWx16_C(const uint8* src, + int src_stride, + uint8* dst_a, + int dst_stride_a, + uint8* dst_b, + int dst_stride_b, + int width) { + TransposeUVWx8_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, + width); + TransposeUVWx8_C((src + 8 * src_stride), src_stride, (dst_a + 8), + dst_stride_a, (dst_b + 8), dst_stride_b, width); +} + +void TransposeWx16_MSA(const uint8* src, + int src_stride, + uint8* dst, + int dst_stride, + int width) { int x; - uint64_t val0, val1, val2, val3; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; + const uint8* s; + v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; + v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9; for (x = 0; x < width; x += 16) { - src0 = (v16u8)__msa_ld_b((v16i8*)src, 0); - src1 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride), 0); - src2 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 2), 0); - src3 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 3), 0); - src4 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 4), 0); - src5 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 5), 0); - src6 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 6), 0); - src7 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 7), 0); - vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0); - vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1); - vec2 = (v16u8)__msa_ilvr_b((v16i8)src6, (v16i8)src4); - vec3 = (v16u8)__msa_ilvr_b((v16i8)src7, (v16i8)src5); - vec4 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0); - vec5 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1); - vec6 = (v16u8)__msa_ilvl_b((v16i8)src6, (v16i8)src4); - vec7 = (v16u8)__msa_ilvl_b((v16i8)src7, (v16i8)src5); - reg0 = (v16u8)__msa_ilvr_b((v16i8)vec1, (v16i8)vec0); - reg1 = (v16u8)__msa_ilvl_b((v16i8)vec1, (v16i8)vec0); - reg2 = (v16u8)__msa_ilvr_b((v16i8)vec3, (v16i8)vec2); - reg3 = (v16u8)__msa_ilvl_b((v16i8)vec3, (v16i8)vec2); - reg4 = (v16u8)__msa_ilvr_b((v16i8)vec5, (v16i8)vec4); - reg5 = (v16u8)__msa_ilvl_b((v16i8)vec5, (v16i8)vec4); - reg6 = (v16u8)__msa_ilvr_b((v16i8)vec7, (v16i8)vec6); - reg7 = (v16u8)__msa_ilvl_b((v16i8)vec7, (v16i8)vec6); - dst0 = (v16u8)__msa_ilvr_w((v4i32)reg2, (v4i32)reg0); - dst1 = (v16u8)__msa_ilvl_w((v4i32)reg2, (v4i32)reg0); - dst2 = (v16u8)__msa_ilvr_w((v4i32)reg3, (v4i32)reg1); - dst3 = (v16u8)__msa_ilvl_w((v4i32)reg3, (v4i32)reg1); - dst4 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg4); - dst5 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg4); - dst6 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg5); - dst7 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg5); - val0 = __msa_copy_s_d((v2i64)dst0, 0); - val1 = __msa_copy_s_d((v2i64)dst0, 1); - val2 = __msa_copy_s_d((v2i64)dst1, 0); - val3 = __msa_copy_s_d((v2i64)dst1, 1); - SD(val0, dst); - SD(val1, dst + dst_stride); - SD(val2, dst + dst_stride * 2); - SD(val3, dst + dst_stride * 3); + s = src; + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); + ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3); + ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); + res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0); + ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3); + ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); dst += dst_stride * 4; - val0 = __msa_copy_s_d((v2i64)dst2, 0); - val1 = __msa_copy_s_d((v2i64)dst2, 1); - val2 = __msa_copy_s_d((v2i64)dst3, 0); - val3 = __msa_copy_s_d((v2i64)dst3, 1); - SD(val0, dst); - SD(val1, dst + dst_stride); - SD(val2, dst + dst_stride * 2); - SD(val3, dst + dst_stride * 3); + res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1); + ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3); + ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); dst += dst_stride * 4; - val0 = __msa_copy_s_d((v2i64)dst4, 0); - val1 = __msa_copy_s_d((v2i64)dst4, 1); - val2 = __msa_copy_s_d((v2i64)dst5, 0); - val3 = __msa_copy_s_d((v2i64)dst5, 1); - SD(val0, dst); - SD(val1, dst + dst_stride); - SD(val2, dst + dst_stride * 2); - SD(val3, dst + dst_stride * 3); - dst += dst_stride * 4; - val0 = __msa_copy_s_d((v2i64)dst6, 0); - val1 = __msa_copy_s_d((v2i64)dst6, 1); - val2 = __msa_copy_s_d((v2i64)dst7, 0); - val3 = __msa_copy_s_d((v2i64)dst7, 1); - SD(val0, dst); - SD(val1, dst + dst_stride); - SD(val2, dst + dst_stride * 2); - SD(val3, dst + dst_stride * 3); + res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2); + ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3); + ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); dst += dst_stride * 4; + res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3); + ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3); + ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); src += 16; + dst += dst_stride * 4; } } -void TransposeUVWx8_MSA(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width) { +void TransposeUVWx16_MSA(const uint8* src, + int src_stride, + uint8* dst_a, + int dst_stride_a, + uint8* dst_b, + int dst_stride_b, + int width) { int x; - uint64_t val0, val1, val2, val3; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; + const uint8* s; + v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; + v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9; for (x = 0; x < width; x += 8) { - src0 = (v16u8)__msa_ld_b((v16i8*)src, 0); - src1 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride), 0); - src2 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 2), 0); - src3 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 3), 0); - src4 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 4), 0); - src5 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 5), 0); - src6 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 6), 0); - src7 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 7), 0); - vec0 = (v16u8)__msa_ilvr_b((v16i8)src1, (v16i8)src0); - vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src2); - vec2 = (v16u8)__msa_ilvr_b((v16i8)src5, (v16i8)src4); - vec3 = (v16u8)__msa_ilvr_b((v16i8)src7, (v16i8)src6); - vec4 = (v16u8)__msa_ilvl_b((v16i8)src1, (v16i8)src0); - vec5 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src2); - vec6 = (v16u8)__msa_ilvl_b((v16i8)src5, (v16i8)src4); - vec7 = (v16u8)__msa_ilvl_b((v16i8)src7, (v16i8)src6); - reg0 = (v16u8)__msa_ilvr_h((v8i16)vec1, (v8i16)vec0); - reg1 = (v16u8)__msa_ilvr_h((v8i16)vec3, (v8i16)vec2); - reg2 = (v16u8)__msa_ilvl_h((v8i16)vec1, (v8i16)vec0); - reg3 = (v16u8)__msa_ilvl_h((v8i16)vec3, (v8i16)vec2); - reg4 = (v16u8)__msa_ilvr_h((v8i16)vec5, (v8i16)vec4); - reg5 = (v16u8)__msa_ilvr_h((v8i16)vec7, (v8i16)vec6); - reg6 = (v16u8)__msa_ilvl_h((v8i16)vec5, (v8i16)vec5); - reg7 = (v16u8)__msa_ilvl_h((v8i16)vec7, (v8i16)vec6); - dst0 = (v16u8)__msa_ilvr_w((v4i32)reg1, (v4i32)reg0); - dst1 = (v16u8)__msa_ilvl_w((v4i32)reg1, (v4i32)reg0); - dst2 = (v16u8)__msa_ilvr_w((v4i32)reg3, (v4i32)reg2); - dst3 = (v16u8)__msa_ilvl_w((v4i32)reg3, (v4i32)reg2); - dst4 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg4); - dst5 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg4); - dst6 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg6); - dst7 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg6); - val0 = __msa_copy_s_d((v2i64)dst0, 0); - val1 = __msa_copy_s_d((v2i64)dst0, 1); - val2 = __msa_copy_s_d((v2i64)dst1, 0); - val3 = __msa_copy_s_d((v2i64)dst1, 1); - SD(val0, dst_a); - SD(val2, dst_a + dst_stride_a); - SD(val1, dst_b); - SD(val3, dst_b + dst_stride_b); + s = src; + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); + ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3); + ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3); + src0 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src1 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src2 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + src3 = (v16u8)__msa_ld_b((v16i8*)s, 0); + s += src_stride; + ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3); + ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7); + res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0); + ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3); + ST_UB2(dst0, dst2, dst_a, dst_stride_a); + ST_UB2(dst1, dst3, dst_b, dst_stride_b); dst_a += dst_stride_a * 2; dst_b += dst_stride_b * 2; - val0 = __msa_copy_s_d((v2i64)dst2, 0); - val1 = __msa_copy_s_d((v2i64)dst2, 1); - val2 = __msa_copy_s_d((v2i64)dst3, 0); - val3 = __msa_copy_s_d((v2i64)dst3, 1); - SD(val0, dst_a); - SD(val2, dst_a + dst_stride_a); - SD(val1, dst_b); - SD(val3, dst_b + dst_stride_b); + res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1); + ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3); + ST_UB2(dst0, dst2, dst_a, dst_stride_a); + ST_UB2(dst1, dst3, dst_b, dst_stride_b); dst_a += dst_stride_a * 2; dst_b += dst_stride_b * 2; - val0 = __msa_copy_s_d((v2i64)dst4, 0); - val1 = __msa_copy_s_d((v2i64)dst4, 1); - val2 = __msa_copy_s_d((v2i64)dst5, 0); - val3 = __msa_copy_s_d((v2i64)dst5, 1); - SD(val0, dst_a); - SD(val2, dst_a + dst_stride_a); - SD(val1, dst_b); - SD(val3, dst_b + dst_stride_b); - dst_a += dst_stride_a * 2; - dst_b += dst_stride_b * 2; - val0 = __msa_copy_s_d((v2i64)dst6, 0); - val1 = __msa_copy_s_d((v2i64)dst6, 1); - val2 = __msa_copy_s_d((v2i64)dst7, 0); - val3 = __msa_copy_s_d((v2i64)dst7, 1); - SD(val0, dst_a); - SD(val2, dst_a + dst_stride_a); - SD(val1, dst_b); - SD(val3, dst_b + dst_stride_b); + res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2); + ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3); + ST_UB2(dst0, dst2, dst_a, dst_stride_a); + ST_UB2(dst1, dst3, dst_b, dst_stride_b); dst_a += dst_stride_a * 2; dst_b += dst_stride_b * 2; + res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3); + res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3); + ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3); + ST_UB2(dst0, dst2, dst_a, dst_stride_a); + ST_UB2(dst1, dst3, dst_b, dst_stride_b); src += 16; + dst_a += dst_stride_a * 2; + dst_b += dst_stride_b * 2; } }