Add optimization functions in rotate_lsx.cc file.

Optimize two functions in source/rotate_lsx.cc file.
All test cases passed on loongarch platform.

Bug: libyuv:913
Change-Id: Idf670a1bc078f6284a499a292e0cb795f5b603b4
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/3351468
Reviewed-by: Frank Barchard <fbarchard@chromium.org>
This commit is contained in:
Hao Chen 2021-12-20 20:20:26 +08:00 committed by Frank Barchard
parent dfe046d272
commit f8e2da48ae
4 changed files with 326 additions and 0 deletions

View File

@ -66,6 +66,11 @@ extern "C" {
#define HAS_TRANSPOSEUVWX8_MMI
#endif
#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx)
#define HAS_TRANSPOSEWX16_LSX
#define HAS_TRANSPOSEUVWX16_LSX
#endif
void TransposeWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
@ -108,6 +113,11 @@ void TransposeWx16_MSA(const uint8_t* src,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Any_NEON(const uint8_t* src,
int src_stride,
@ -134,6 +144,11 @@ void TransposeWx16_Any_MSA(const uint8_t* src,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_Any_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeUVWxH_C(const uint8_t* src,
int src_stride,
@ -186,6 +201,13 @@ void TransposeUVWx16_MSA(const uint8_t* src,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_Any_SSE2(const uint8_t* src,
int src_stride,
@ -215,6 +237,13 @@ void TransposeUVWx16_Any_MSA(const uint8_t* src,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_Any_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
#ifdef __cplusplus
} // extern "C"

View File

@ -32,6 +32,9 @@ void TransposePlane(const uint8_t* src,
#if defined(HAS_TRANSPOSEWX16_MSA)
void (*TransposeWx16)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx16_C;
#elif defined(HAS_TRANSPOSEWX16_LSX)
void (*TransposeWx16)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx16_C;
#else
void (*TransposeWx8)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx8_C;
@ -44,6 +47,13 @@ void TransposePlane(const uint8_t* src,
TransposeWx16 = TransposeWx16_MSA;
}
}
#elif defined(HAS_TRANSPOSEWX16_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
TransposeWx16 = TransposeWx16_Any_LSX;
if (IS_ALIGNED(width, 16)) {
TransposeWx16 = TransposeWx16_LSX;
}
}
#else
#if defined(HAS_TRANSPOSEWX8_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
@ -81,6 +91,14 @@ void TransposePlane(const uint8_t* src,
dst += 16; // Move over 16 columns.
i -= 16;
}
#elif defined(HAS_TRANSPOSEWX16_LSX)
// Work across the source in 16x16 tiles
while (i >= 16) {
TransposeWx16(src, src_stride, dst, dst_stride, width);
src += 16 * src_stride; // Go down 16 rows.
dst += 16; // Move over 16 columns.
i -= 16;
}
#else
// Work across the source in 8x8 tiles
while (i >= 8) {
@ -181,6 +199,14 @@ void RotatePlane180(const uint8_t* src,
}
}
#endif
#if defined(HAS_MIRRORROW_LASX)
if (TestCpuFlag(kCpuHasLASX)) {
MirrorRow = MirrorRow_Any_LASX;
if (IS_ALIGNED(width, 64)) {
MirrorRow = MirrorRow_LASX;
}
}
#endif
#if defined(HAS_COPYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
@ -234,6 +260,10 @@ void SplitTransposeUV(const uint8_t* src,
void (*TransposeUVWx16)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
int width) = TransposeUVWx16_C;
#elif defined(HAS_TRANSPOSEUVWX16_LSX)
void (*TransposeUVWx16)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
int width) = TransposeUVWx16_C;
#else
void (*TransposeUVWx8)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
@ -247,6 +277,13 @@ void SplitTransposeUV(const uint8_t* src,
TransposeUVWx16 = TransposeUVWx16_MSA;
}
}
#elif defined(HAS_TRANSPOSEUVWX16_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
TransposeUVWx16 = TransposeUVWx16_Any_LSX;
if (IS_ALIGNED(width, 8)) {
TransposeUVWx16 = TransposeUVWx16_LSX;
}
}
#else
#if defined(HAS_TRANSPOSEUVWX8_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
@ -281,6 +318,16 @@ void SplitTransposeUV(const uint8_t* src,
dst_b += 16; // Move over 8 columns.
i -= 16;
}
#elif defined(HAS_TRANSPOSEUVWX16_LSX)
// Work through the source in 8x8 tiles.
while (i >= 16) {
TransposeUVWx16(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
src += 16 * src_stride; // Go down 16 rows.
dst_a += 16; // Move over 8 columns.
dst_b += 16; // Move over 8 columns.
i -= 16;
}
#else
// Work through the source in 8x8 tiles.
while (i >= 8) {

View File

@ -44,6 +44,9 @@ TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15)
#ifdef HAS_TRANSPOSEWX16_MSA
TANY(TransposeWx16_Any_MSA, TransposeWx16_MSA, 15)
#endif
#ifdef HAS_TRANSPOSEWX16_LSX
TANY(TransposeWx16_Any_LSX, TransposeWx16_LSX, 15)
#endif
#undef TANY
#define TUVANY(NAMEANY, TPOS_SIMD, MASK) \
@ -71,6 +74,9 @@ TUVANY(TransposeUVWx8_Any_MMI, TransposeUVWx8_MMI, 7)
#ifdef HAS_TRANSPOSEUVWX16_MSA
TUVANY(TransposeUVWx16_Any_MSA, TransposeUVWx16_MSA, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX16_LSX
TUVANY(TransposeUVWx16_Any_LSX, TransposeUVWx16_LSX, 7)
#endif
#undef TUVANY
#ifdef __cplusplus

244
source/rotate_lsx.cc Normal file
View File

@ -0,0 +1,244 @@
/*
* Copyright 2022 The LibYuv Project Authors. All rights reserved.
*
* Copyright (c) 2022 Loongson Technology Corporation Limited
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx)
#include "libyuv/loongson_intrinsics.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define ILVLH_B(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_b, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_b, in1, in0, in3, in2, out1, out3); \
}
#define ILVLH_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, out1, out3); \
}
#define ILVLH_W(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_w, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_w, in1, in0, in3, in2, out1, out3); \
}
#define ILVLH_D(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_d, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_d, in1, in0, in3, in2, out1, out3); \
}
#define LSX_ST_4(_dst0, _dst1, _dst2, _dst3, _dst, _stride, _stride2, \
_stride3, _stride4) \
{ \
__lsx_vst(_dst0, _dst, 0); \
__lsx_vstx(_dst1, _dst, _stride); \
__lsx_vstx(_dst2, _dst, _stride2); \
__lsx_vstx(_dst3, _dst, _stride3); \
_dst += _stride4; \
}
#define LSX_ST_2(_dst0, _dst1, _dst, _stride, _stride2) \
{ \
__lsx_vst(_dst0, _dst, 0); \
__lsx_vstx(_dst1, _dst, _stride); \
_dst += _stride2; \
}
void TransposeWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
TransposeWx8_C(src, src_stride, dst, dst_stride, width);
TransposeWx8_C((src + 8 * src_stride), src_stride, (dst + 8), dst_stride,
width);
}
void TransposeUVWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
TransposeUVWx8_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
TransposeUVWx8_C((src + 8 * src_stride), src_stride, (dst_a + 8),
dst_stride_a, (dst_b + 8), dst_stride_b, width);
}
void TransposeWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
int x;
int len = width / 16;
uint8_t *s;
int src_stride2 = src_stride << 1;
int src_stride3 = src_stride + src_stride2;
int src_stride4 = src_stride2 << 1;
int dst_stride2 = dst_stride << 1;
int dst_stride3 = dst_stride + dst_stride2;
int dst_stride4 = dst_stride2 << 1;
__m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3;
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
__m128i res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
for (x = 0; x < len; x++) {
s = (uint8_t*)src;
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3);
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7);
ILVLH_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
ILVLH_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3);
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7);
res8 = __lsx_vilvl_w(reg4, reg0);
res9 = __lsx_vilvh_w(reg4, reg0);
ILVLH_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
res8 = __lsx_vilvl_w(reg5, reg1);
res9 = __lsx_vilvh_w(reg5, reg1);
ILVLH_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
res8 = __lsx_vilvl_w(reg6, reg2);
res9 = __lsx_vilvh_w(reg6, reg2);
ILVLH_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
res8 = __lsx_vilvl_w(reg7, reg3);
res9 = __lsx_vilvh_w(reg7, reg3);
ILVLH_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
src += 16;
}
}
void TransposeUVWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
int x;
int len = width / 8;
uint8_t* s;
int src_stride2 = src_stride << 1;
int src_stride3 = src_stride + src_stride2;
int src_stride4 = src_stride2 << 1;
int dst_stride_a2 = dst_stride_a << 1;
int dst_stride_b2 = dst_stride_b << 1;
__m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3;
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
__m128i res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
for (x = 0; x < len; x++) {
s = (uint8_t*)src;
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3);
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7);
ILVLH_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
ILVLH_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3);
src0 = __lsx_vld(s, 0);
src1 = __lsx_vldx(s, src_stride);
src2 = __lsx_vldx(s, src_stride2);
src3 = __lsx_vldx(s, src_stride3);
s += src_stride4;
ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7);
res8 = __lsx_vilvl_w(reg4, reg0);
res9 = __lsx_vilvh_w(reg4, reg0);
ILVLH_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2);
LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2);
res8 = __lsx_vilvl_w(reg5, reg1);
res9 = __lsx_vilvh_w(reg5, reg1);
ILVLH_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2);
LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2);
res8 = __lsx_vilvl_w(reg6, reg2);
res9 = __lsx_vilvh_w(reg6, reg2);
ILVLH_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2);
LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2);
res8 = __lsx_vilvl_w(reg7, reg3);
res9 = __lsx_vilvh_w(reg7, reg3);
ILVLH_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2);
LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2);
src += 16;
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx)