DetilePlane and unittest for NEON

Bug: libyuv:915, b/215425056
Change-Id: Iccab1ed3f6d385f02895d44faa94d198ad79d693
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/3424820
Reviewed-by: Justin Green <greenjustin@google.com>
Reviewed-by: Frank Barchard <fbarchard@chromium.org>
Commit-Queue: Frank Barchard <fbarchard@chromium.org>
This commit is contained in:
Frank Barchard 2022-01-31 11:49:55 -08:00 committed by libyuv LUCI CQ
parent 2c6bfc02d5
commit 804980bbab
20 changed files with 1690 additions and 1573 deletions

1
.gitignore vendored
View File

@ -12,6 +12,7 @@ pin-log.txt
/native_client
/net
/out
/unit_test/out
/source/out
/sde-avx-sse-transition-out.txt
/testing

View File

@ -1,6 +1,6 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 1809
Version: 1810
License: BSD
License File: LICENSE

File diff suppressed because it is too large Load Diff

View File

@ -83,6 +83,16 @@ void SetPlane(uint8_t* dst_y,
int height,
uint32_t value);
// Convert a plane of tiles of 16 x H to linear.
LIBYUV_API
void DetilePlane(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height,
int tile_height);
// Split interleaved UV plane into separate U and V planes.
LIBYUV_API
void SplitUVPlane(const uint8_t* src_uv,

View File

@ -400,8 +400,8 @@ extern "C" {
// The following are available for AVX512 clang x64 platforms:
// TODO(fbarchard): Port to x86
#if !defined(LIBYUV_DISABLE_X86) && \
defined(__x86_64__) && (defined(CLANG_HAS_AVX512))
#if !defined(LIBYUV_DISABLE_X86) && defined(__x86_64__) && \
(defined(CLANG_HAS_AVX512))
#define HAS_I422TOARGBROW_AVX512BW
#endif
@ -536,7 +536,7 @@ extern "C" {
#define HAS_SCALESUMSAMPLES_NEON
#define HAS_GAUSSROW_F32_NEON
#define HAS_GAUSSCOL_F32_NEON
#define HAS_DETILEROW_NEON
#endif
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#define HAS_ABGRTOUVROW_MSA
@ -1768,7 +1768,9 @@ void ARGBMirrorRow_Any_NEON(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int width);
void ARGBMirrorRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width);
void ARGBMirrorRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width);
void ARGBMirrorRow_Any_LASX(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int width);
void RGB24MirrorRow_SSSE3(const uint8_t* src_rgb24,
uint8_t* dst_rgb24,
@ -1828,7 +1830,15 @@ void SplitUVRow_Any_LSX(const uint8_t* src_ptr,
uint8_t* dst_u,
uint8_t* dst_v,
int width);
void DetileRow_C(const uint8_t* src,
ptrdiff_t src_tile_stride,
uint8_t* dst,
int width);
void DetileRow_NEON(const uint8_t* src,
ptrdiff_t src_tile_stride,
uint8_t* dst,
int width);
void MergeUVRow_C(const uint8_t* src_u,
const uint8_t* src_v,
uint8_t* dst_uv,
@ -2802,7 +2812,6 @@ void ARGBToARGB4444Row_LASX(const uint8_t* src_argb,
uint8_t* dst_rgb,
int width);
void ARGBToRGBARow_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width);
void ARGBToRGB24Row_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width);
void ARGBToRAWRow_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width);
@ -4097,7 +4106,6 @@ void ARGBToARGB4444Row_Any_LASX(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int width);
void I444ToARGBRow_Any_NEON(const uint8_t* y_buf,
const uint8_t* u_buf,
const uint8_t* v_buf,
@ -4878,7 +4886,6 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
int interval_offset,
int width);
void ARGBShadeRow_C(const uint8_t* src_argb,
uint8_t* dst_argb,
int width,
@ -4912,7 +4919,6 @@ void ComputeCumulativeSumRow_SSE2(const uint8_t* row,
const int32_t* previous_cumsum,
int width);
void CumulativeSumToAverageRow_C(const int32_t* tl,
const int32_t* bl,
int w,
@ -5259,7 +5265,6 @@ float ScaleSumSamples_NEON(const float* src,
void ScaleSamples_C(const float* src, float* dst, float scale, int width);
void ScaleSamples_NEON(const float* src, float* dst, float scale, int width);
void GaussRow_F32_NEON(const float* src, float* dst, int width);
void GaussRow_F32_C(const float* src, float* dst, int width);

View File

@ -1564,7 +1564,6 @@ void ScaleRowDown34_1_Box_Any_MSA(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int dst_width);
void ScaleRowDown2_LSX(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,

View File

@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1809
#define LIBYUV_VERSION 1810
#endif // INCLUDE_LIBYUV_VERSION_H_

View File

@ -2448,8 +2448,7 @@ int RGB565ToI420(const uint8_t* src_rgb565,
}
}
// MSA version does direct RGB565 to YUV.
#elif (defined(HAS_RGB565TOYROW_MSA) \
|| defined(HAS_RGB565TOYROW_LSX))
#elif (defined(HAS_RGB565TOYROW_MSA) || defined(HAS_RGB565TOYROW_LSX))
#if defined(HAS_RGB565TOYROW_MSA) && defined(HAS_RGB565TOUVROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
RGB565ToUVRow = RGB565ToUVRow_Any_MSA;

View File

@ -90,7 +90,8 @@ int I420ToARGBMatrix(const uint8_t* src_y,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
(kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;
@ -329,7 +330,8 @@ int I422ToARGBMatrix(const uint8_t* src_y,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
(kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;
@ -5094,7 +5096,8 @@ int I420ToRGB565Dither(const uint8_t* src_y,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
(kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;

View File

@ -193,25 +193,21 @@ LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) {
// TODO(fbarchard): Consider read_loongarch_ir().
#define LOONGARCH_CFG2 0x2
#define LOONGARCH_CFG2_LSX (1 << 6)
#define LOONGARCH_CFG2_LASX (1 << 7)
#define LOONGARCH_CFG2_LSX (1 << 6)
#define LOONGARCH_CFG2_LASX (1 << 7)
#if defined(__loongarch__) && defined(__linux__)
LIBYUV_API SAFEBUFFERS int LoongarchCpuCaps(void) {
int flag = 0x0;
uint32_t cfg2 = 0;
__asm__ volatile(
"cpucfg %0, %1 \n\t"
: "+&r"(cfg2)
: "r"(LOONGARCH_CFG2)
);
__asm__ volatile("cpucfg %0, %1 \n\t" : "+&r"(cfg2) : "r"(LOONGARCH_CFG2));
if (cfg2 & LOONGARCH_CFG2_LSX)
flag |= kCpuHasLSX;
flag |= kCpuHasLSX;
if (cfg2 & LOONGARCH_CFG2_LASX)
flag |= kCpuHasLASX;
flag |= kCpuHasLASX;
return flag;
}
#endif

View File

@ -853,6 +853,53 @@ int NV21ToNV12(const uint8_t* src_y,
return 0;
}
// Detile a plane of data
// tile width is 16 and assumed.
// tile_height is 16 or 32 for MM21.
// src_stride_y is bytes per row of source ignoring tiling. e.g. 640
// TODO: More detile row functions.
LIBYUV_API
void DetilePlane(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height,
int tile_height) {
const ptrdiff_t src_tile_stride = 16 * tile_height;
int y;
void (*DetileRow)(const uint8_t* src, ptrdiff_t src_tile_stride, uint8_t* dst,
int width) = DetileRow_C;
assert(src_stride_y >= 0);
assert(tile_height > 0);
assert(src_stride_y > 0);
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_y = dst_y + (height - 1) * dst_stride_y;
dst_stride_y = -dst_stride_y;
}
#if defined(HAS_DETILEROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) {
DetileRow = DetileRow_NEON;
}
#endif
// Detile plane
for (y = 0; y < height; ++y) {
DetileRow(src_y, src_tile_stride, dst_y, width);
dst_y += dst_stride_y;
src_y += 16;
// Advance to next row of tiles.
if ((y & (tile_height - 1)) == (tile_height - 1)) {
src_y = src_y - src_tile_stride + src_stride_y * tile_height;
}
}
}
// Support function for NV12 etc RGB channels.
// Width and height are plane sizes (typically half pixel width).
LIBYUV_API

View File

@ -20,28 +20,28 @@ namespace libyuv {
extern "C" {
#endif
#define ILVLH_B(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_b, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_b, in1, in0, in3, in2, out1, out3); \
#define ILVLH_B(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_b, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_b, in1, in0, in3, in2, out1, out3); \
}
#define ILVLH_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, out1, out3); \
#define ILVLH_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, out1, out3); \
}
#define ILVLH_W(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_w, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_w, in1, in0, in3, in2, out1, out3); \
#define ILVLH_W(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_w, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_w, in1, in0, in3, in2, out1, out3); \
}
#define ILVLH_D(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_d, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_d, in1, in0, in3, in2, out1, out3); \
#define ILVLH_D(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
DUP2_ARG2(__lsx_vilvl_d, in1, in0, in3, in2, out0, out2); \
DUP2_ARG2(__lsx_vilvh_d, in1, in0, in3, in2, out1, out3); \
}
#define LSX_ST_4(_dst0, _dst1, _dst2, _dst3, _dst, _stride, _stride2, \
@ -54,11 +54,11 @@ extern "C" {
_dst += _stride4; \
}
#define LSX_ST_2(_dst0, _dst1, _dst, _stride, _stride2) \
{ \
__lsx_vst(_dst0, _dst, 0); \
__lsx_vstx(_dst1, _dst, _stride); \
_dst += _stride2; \
#define LSX_ST_2(_dst0, _dst1, _dst, _stride, _stride2) \
{ \
__lsx_vst(_dst0, _dst, 0); \
__lsx_vstx(_dst1, _dst, _stride); \
_dst += _stride2; \
}
void TransposeWx16_C(const uint8_t* src,
@ -84,7 +84,6 @@ void TransposeUVWx16_C(const uint8_t* src,
dst_stride_a, (dst_b + 8), dst_stride_b, width);
}
void TransposeWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
@ -92,7 +91,7 @@ void TransposeWx16_LSX(const uint8_t* src,
int width) {
int x;
int len = width / 16;
uint8_t *s;
uint8_t* s;
int src_stride2 = src_stride << 1;
int src_stride3 = src_stride + src_stride2;
int src_stride4 = src_stride2 << 1;
@ -139,23 +138,23 @@ void TransposeWx16_LSX(const uint8_t* src,
res8 = __lsx_vilvl_w(reg4, reg0);
res9 = __lsx_vilvh_w(reg4, reg0);
ILVLH_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
dst_stride4);
res8 = __lsx_vilvl_w(reg5, reg1);
res9 = __lsx_vilvh_w(reg5, reg1);
ILVLH_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
dst_stride4);
res8 = __lsx_vilvl_w(reg6, reg2);
res9 = __lsx_vilvh_w(reg6, reg2);
ILVLH_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
dst_stride4);
res8 = __lsx_vilvl_w(reg7, reg3);
res9 = __lsx_vilvh_w(reg7, reg3);
ILVLH_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
dst_stride3, dst_stride4);
LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
dst_stride4);
src += 16;
}
}

View File

@ -2659,6 +2659,21 @@ void RGB24MirrorRow_C(const uint8_t* src_rgb24, uint8_t* dst_rgb24, int width) {
}
}
void DetileRow_C(const uint8_t* src,
ptrdiff_t src_tile_stride,
uint8_t* dst,
int width) {
int x;
for (x = 0; x < width - 15; x += 16) {
memcpy(dst, src, 16);
dst += 16;
src += src_tile_stride;
}
if (width & 15) {
memcpy(dst, src, width & 15);
}
}
void SplitUVRow_C(const uint8_t* src_uv,
uint8_t* dst_u,
uint8_t* dst_v,

View File

@ -23,178 +23,176 @@ extern "C" {
#define ALPHA_VAL (-1)
// Fill YUV -> RGB conversion constants into vectors
#define YUVTORGB_SETUP(yuvconst, ubvr, ugvg, yg, yb) \
{ \
__m256i ub, vr, ug, vg; \
\
ub = __lasx_xvreplgr2vr_h(yuvconst->kUVToB[0]); \
vr = __lasx_xvreplgr2vr_h(yuvconst->kUVToR[1]); \
ug = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[0]); \
vg = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[1]); \
yg = __lasx_xvreplgr2vr_h(yuvconst->kYToRgb[0]); \
yb = __lasx_xvreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
ubvr = __lasx_xvilvl_h(ub, vr); \
ugvg = __lasx_xvilvl_h(ug, vg); \
#define YUVTORGB_SETUP(yuvconst, ubvr, ugvg, yg, yb) \
{ \
__m256i ub, vr, ug, vg; \
\
ub = __lasx_xvreplgr2vr_h(yuvconst->kUVToB[0]); \
vr = __lasx_xvreplgr2vr_h(yuvconst->kUVToR[1]); \
ug = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[0]); \
vg = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[1]); \
yg = __lasx_xvreplgr2vr_h(yuvconst->kYToRgb[0]); \
yb = __lasx_xvreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
ubvr = __lasx_xvilvl_h(ub, vr); \
ugvg = __lasx_xvilvl_h(ug, vg); \
}
// Load 32 YUV422 pixel data
#define READYUV422_D(psrc_y, psrc_u, psrc_v, out_y, uv_l, uv_h) \
{ \
__m256i temp0, temp1; \
\
DUP2_ARG2(__lasx_xvld, psrc_y, 0, psrc_u, 0, out_y, temp0); \
temp1 = __lasx_xvld(psrc_v, 0); \
temp0 = __lasx_xvsub_b(temp0, const_0x80); \
temp1 = __lasx_xvsub_b(temp1, const_0x80); \
temp0 = __lasx_vext2xv_h_b(temp0); \
temp1 = __lasx_vext2xv_h_b(temp1); \
uv_l = __lasx_xvilvl_h(temp0, temp1); \
uv_h = __lasx_xvilvh_h(temp0, temp1); \
#define READYUV422_D(psrc_y, psrc_u, psrc_v, out_y, uv_l, uv_h) \
{ \
__m256i temp0, temp1; \
\
DUP2_ARG2(__lasx_xvld, psrc_y, 0, psrc_u, 0, out_y, temp0); \
temp1 = __lasx_xvld(psrc_v, 0); \
temp0 = __lasx_xvsub_b(temp0, const_0x80); \
temp1 = __lasx_xvsub_b(temp1, const_0x80); \
temp0 = __lasx_vext2xv_h_b(temp0); \
temp1 = __lasx_vext2xv_h_b(temp1); \
uv_l = __lasx_xvilvl_h(temp0, temp1); \
uv_h = __lasx_xvilvh_h(temp0, temp1); \
}
// Load 16 YUV422 pixel data
#define READYUV422(psrc_y, psrc_u, psrc_v, out_y, uv) \
{ \
__m256i temp0, temp1; \
\
out_y = __lasx_xvld(psrc_y, 0); \
temp0 = __lasx_xvldrepl_d(psrc_u, 0); \
temp1 = __lasx_xvldrepl_d(psrc_v, 0); \
uv = __lasx_xvilvl_b(temp0, temp1); \
uv = __lasx_xvsub_b(uv, const_0x80); \
uv = __lasx_vext2xv_h_b(uv); \
#define READYUV422(psrc_y, psrc_u, psrc_v, out_y, uv) \
{ \
__m256i temp0, temp1; \
\
out_y = __lasx_xvld(psrc_y, 0); \
temp0 = __lasx_xvldrepl_d(psrc_u, 0); \
temp1 = __lasx_xvldrepl_d(psrc_v, 0); \
uv = __lasx_xvilvl_b(temp0, temp1); \
uv = __lasx_xvsub_b(uv, const_0x80); \
uv = __lasx_vext2xv_h_b(uv); \
}
// Convert 16 pixels of YUV420 to RGB.
#define YUVTORGB_D(in_y, in_uvl, in_uvh, ubvr, ugvg, \
yg, yb, b_l, b_h, g_l, g_h, r_l, r_h) \
{ \
__m256i u_l, u_h, v_l, v_h; \
__m256i yl_ev, yl_od, yh_ev, yh_od; \
__m256i temp0, temp1, temp2, temp3; \
\
temp0 = __lasx_xvilvl_b(in_y, in_y); \
temp1 = __lasx_xvilvh_b(in_y, in_y); \
yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
yh_ev = __lasx_xvmulwev_w_hu_h(temp1, yg); \
yh_od = __lasx_xvmulwod_w_hu_h(temp1, yg); \
DUP4_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yh_ev, 16, yh_od, 16, \
yl_ev, yl_od, yh_ev, yh_od); \
yl_ev = __lasx_xvadd_w(yl_ev, yb); \
yl_od = __lasx_xvadd_w(yl_od, yb); \
yh_ev = __lasx_xvadd_w(yh_ev, yb); \
yh_od = __lasx_xvadd_w(yh_od, yb); \
v_l = __lasx_xvmulwev_w_h(in_uvl, ubvr); \
u_l = __lasx_xvmulwod_w_h(in_uvl, ubvr); \
v_h = __lasx_xvmulwev_w_h(in_uvh, ubvr); \
u_h = __lasx_xvmulwod_w_h(in_uvh, ubvr); \
temp0 = __lasx_xvadd_w(yl_ev, u_l); \
temp1 = __lasx_xvadd_w(yl_od, u_l); \
temp2 = __lasx_xvadd_w(yh_ev, u_h); \
temp3 = __lasx_xvadd_w(yh_od, u_h); \
DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, \
temp0, temp1, temp2, temp3); \
DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, \
temp0, temp1, temp2, temp3); \
b_l = __lasx_xvpackev_h(temp1, temp0); \
b_h = __lasx_xvpackev_h(temp3, temp2); \
temp0 = __lasx_xvadd_w(yl_ev, v_l); \
temp1 = __lasx_xvadd_w(yl_od, v_l); \
temp2 = __lasx_xvadd_w(yh_ev, v_h); \
temp3 = __lasx_xvadd_w(yh_od, v_h); \
DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, \
temp0, temp1, temp2, temp3); \
DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, \
temp0, temp1, temp2, temp3); \
r_l = __lasx_xvpackev_h(temp1, temp0); \
r_h = __lasx_xvpackev_h(temp3, temp2); \
DUP2_ARG2(__lasx_xvdp2_w_h, in_uvl, ugvg, in_uvh, ugvg, u_l, u_h); \
temp0 = __lasx_xvsub_w(yl_ev, u_l); \
temp1 = __lasx_xvsub_w(yl_od, u_l); \
temp2 = __lasx_xvsub_w(yh_ev, u_h); \
temp3 = __lasx_xvsub_w(yh_od, u_h); \
DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, \
temp0, temp1, temp2, temp3); \
DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, \
temp0, temp1, temp2, temp3); \
g_l = __lasx_xvpackev_h(temp1, temp0); \
g_h = __lasx_xvpackev_h(temp3, temp2); \
#define YUVTORGB_D(in_y, in_uvl, in_uvh, ubvr, ugvg, yg, yb, b_l, b_h, g_l, \
g_h, r_l, r_h) \
{ \
__m256i u_l, u_h, v_l, v_h; \
__m256i yl_ev, yl_od, yh_ev, yh_od; \
__m256i temp0, temp1, temp2, temp3; \
\
temp0 = __lasx_xvilvl_b(in_y, in_y); \
temp1 = __lasx_xvilvh_b(in_y, in_y); \
yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
yh_ev = __lasx_xvmulwev_w_hu_h(temp1, yg); \
yh_od = __lasx_xvmulwod_w_hu_h(temp1, yg); \
DUP4_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yh_ev, 16, yh_od, 16, \
yl_ev, yl_od, yh_ev, yh_od); \
yl_ev = __lasx_xvadd_w(yl_ev, yb); \
yl_od = __lasx_xvadd_w(yl_od, yb); \
yh_ev = __lasx_xvadd_w(yh_ev, yb); \
yh_od = __lasx_xvadd_w(yh_od, yb); \
v_l = __lasx_xvmulwev_w_h(in_uvl, ubvr); \
u_l = __lasx_xvmulwod_w_h(in_uvl, ubvr); \
v_h = __lasx_xvmulwev_w_h(in_uvh, ubvr); \
u_h = __lasx_xvmulwod_w_h(in_uvh, ubvr); \
temp0 = __lasx_xvadd_w(yl_ev, u_l); \
temp1 = __lasx_xvadd_w(yl_od, u_l); \
temp2 = __lasx_xvadd_w(yh_ev, u_h); \
temp3 = __lasx_xvadd_w(yh_od, u_h); \
DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
temp1, temp2, temp3); \
DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
temp2, temp3); \
b_l = __lasx_xvpackev_h(temp1, temp0); \
b_h = __lasx_xvpackev_h(temp3, temp2); \
temp0 = __lasx_xvadd_w(yl_ev, v_l); \
temp1 = __lasx_xvadd_w(yl_od, v_l); \
temp2 = __lasx_xvadd_w(yh_ev, v_h); \
temp3 = __lasx_xvadd_w(yh_od, v_h); \
DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
temp1, temp2, temp3); \
DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
temp2, temp3); \
r_l = __lasx_xvpackev_h(temp1, temp0); \
r_h = __lasx_xvpackev_h(temp3, temp2); \
DUP2_ARG2(__lasx_xvdp2_w_h, in_uvl, ugvg, in_uvh, ugvg, u_l, u_h); \
temp0 = __lasx_xvsub_w(yl_ev, u_l); \
temp1 = __lasx_xvsub_w(yl_od, u_l); \
temp2 = __lasx_xvsub_w(yh_ev, u_h); \
temp3 = __lasx_xvsub_w(yh_od, u_h); \
DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
temp1, temp2, temp3); \
DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
temp2, temp3); \
g_l = __lasx_xvpackev_h(temp1, temp0); \
g_h = __lasx_xvpackev_h(temp3, temp2); \
}
// Convert 8 pixels of YUV420 to RGB.
#define YUVTORGB(in_y, in_uv, ubvr, ugvg, \
yg, yb, out_b, out_g, out_r) \
{ \
__m256i u_l, v_l, yl_ev, yl_od; \
__m256i temp0, temp1; \
\
in_y = __lasx_xvpermi_d(in_y, 0xD8); \
temp0 = __lasx_xvilvl_b(in_y, in_y); \
yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
DUP2_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yl_ev, yl_od); \
yl_ev = __lasx_xvadd_w(yl_ev, yb); \
yl_od = __lasx_xvadd_w(yl_od, yb); \
v_l = __lasx_xvmulwev_w_h(in_uv, ubvr); \
u_l = __lasx_xvmulwod_w_h(in_uv, ubvr); \
temp0 = __lasx_xvadd_w(yl_ev, u_l); \
temp1 = __lasx_xvadd_w(yl_od, u_l); \
DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
out_b = __lasx_xvpackev_h(temp1, temp0); \
temp0 = __lasx_xvadd_w(yl_ev, v_l); \
temp1 = __lasx_xvadd_w(yl_od, v_l); \
DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
out_r = __lasx_xvpackev_h(temp1, temp0); \
u_l = __lasx_xvdp2_w_h(in_uv, ugvg); \
temp0 = __lasx_xvsub_w(yl_ev, u_l); \
temp1 = __lasx_xvsub_w(yl_od, u_l); \
DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
out_g = __lasx_xvpackev_h(temp1, temp0); \
#define YUVTORGB(in_y, in_uv, ubvr, ugvg, yg, yb, out_b, out_g, out_r) \
{ \
__m256i u_l, v_l, yl_ev, yl_od; \
__m256i temp0, temp1; \
\
in_y = __lasx_xvpermi_d(in_y, 0xD8); \
temp0 = __lasx_xvilvl_b(in_y, in_y); \
yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
DUP2_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yl_ev, yl_od); \
yl_ev = __lasx_xvadd_w(yl_ev, yb); \
yl_od = __lasx_xvadd_w(yl_od, yb); \
v_l = __lasx_xvmulwev_w_h(in_uv, ubvr); \
u_l = __lasx_xvmulwod_w_h(in_uv, ubvr); \
temp0 = __lasx_xvadd_w(yl_ev, u_l); \
temp1 = __lasx_xvadd_w(yl_od, u_l); \
DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
out_b = __lasx_xvpackev_h(temp1, temp0); \
temp0 = __lasx_xvadd_w(yl_ev, v_l); \
temp1 = __lasx_xvadd_w(yl_od, v_l); \
DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
out_r = __lasx_xvpackev_h(temp1, temp0); \
u_l = __lasx_xvdp2_w_h(in_uv, ugvg); \
temp0 = __lasx_xvsub_w(yl_ev, u_l); \
temp1 = __lasx_xvsub_w(yl_od, u_l); \
DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
out_g = __lasx_xvpackev_h(temp1, temp0); \
}
// Pack and Store 16 ARGB values.
#define STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, \
b_l, b_h, pdst_argb) \
{ \
__m256i temp0, temp1, temp2, temp3; \
\
temp0 = __lasx_xvpackev_b(g_l, b_l); \
temp1 = __lasx_xvpackev_b(a_l, r_l); \
temp2 = __lasx_xvpackev_b(g_h, b_h); \
temp3 = __lasx_xvpackev_b(a_h, r_h); \
r_l = __lasx_xvilvl_h(temp1, temp0); \
r_h = __lasx_xvilvh_h(temp1, temp0); \
g_l = __lasx_xvilvl_h(temp3, temp2); \
g_h = __lasx_xvilvh_h(temp3, temp2); \
temp0 = __lasx_xvpermi_q(r_h, r_l, 0x20); \
temp1 = __lasx_xvpermi_q(g_h, g_l, 0x20); \
temp2 = __lasx_xvpermi_q(r_h, r_l, 0x31); \
temp3 = __lasx_xvpermi_q(g_h, g_l, 0x31); \
__lasx_xvst(temp0, pdst_argb, 0); \
__lasx_xvst(temp1, pdst_argb, 32); \
__lasx_xvst(temp2, pdst_argb, 64); \
__lasx_xvst(temp3, pdst_argb, 96); \
pdst_argb += 128; \
#define STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, pdst_argb) \
{ \
__m256i temp0, temp1, temp2, temp3; \
\
temp0 = __lasx_xvpackev_b(g_l, b_l); \
temp1 = __lasx_xvpackev_b(a_l, r_l); \
temp2 = __lasx_xvpackev_b(g_h, b_h); \
temp3 = __lasx_xvpackev_b(a_h, r_h); \
r_l = __lasx_xvilvl_h(temp1, temp0); \
r_h = __lasx_xvilvh_h(temp1, temp0); \
g_l = __lasx_xvilvl_h(temp3, temp2); \
g_h = __lasx_xvilvh_h(temp3, temp2); \
temp0 = __lasx_xvpermi_q(r_h, r_l, 0x20); \
temp1 = __lasx_xvpermi_q(g_h, g_l, 0x20); \
temp2 = __lasx_xvpermi_q(r_h, r_l, 0x31); \
temp3 = __lasx_xvpermi_q(g_h, g_l, 0x31); \
__lasx_xvst(temp0, pdst_argb, 0); \
__lasx_xvst(temp1, pdst_argb, 32); \
__lasx_xvst(temp2, pdst_argb, 64); \
__lasx_xvst(temp3, pdst_argb, 96); \
pdst_argb += 128; \
}
// Pack and Store 8 ARGB values.
#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
{ \
__m256i temp0, temp1; \
\
temp0 = __lasx_xvpackev_b(in_g, in_b); \
temp1 = __lasx_xvpackev_b(in_a, in_r); \
in_a = __lasx_xvilvl_h(temp1, temp0); \
in_r = __lasx_xvilvh_h(temp1, temp0); \
temp0 = __lasx_xvpermi_q(in_r, in_a, 0x20); \
temp1 = __lasx_xvpermi_q(in_r, in_a, 0x31); \
__lasx_xvst(temp0, pdst_argb, 0); \
__lasx_xvst(temp1, pdst_argb, 32); \
pdst_argb += 64; \
#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
{ \
__m256i temp0, temp1; \
\
temp0 = __lasx_xvpackev_b(in_g, in_b); \
temp1 = __lasx_xvpackev_b(in_a, in_r); \
in_a = __lasx_xvilvl_h(temp1, temp0); \
in_r = __lasx_xvilvh_h(temp1, temp0); \
temp0 = __lasx_xvpermi_q(in_r, in_a, 0x20); \
temp1 = __lasx_xvpermi_q(in_r, in_a, 0x31); \
__lasx_xvst(temp0, pdst_argb, 0); \
__lasx_xvst(temp1, pdst_argb, 32); \
pdst_argb += 64; \
}
void MirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) {
@ -205,15 +203,15 @@ void MirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) {
0x08090A0B0C0D0E0F, 0x0001020304050607};
src += width - 64;
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler,
src1, src1, shuffler, src0, src1);
src0 = __lasx_xvpermi_q(src0, src0, 0x01);
src1 = __lasx_xvpermi_q(src1, src1, 0x01);
__lasx_xvst(src1, dst, 0);
__lasx_xvst(src0, dst, 32);
dst += 64;
src -= 64;
DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0,
src1);
src0 = __lasx_xvpermi_q(src0, src0, 0x01);
src1 = __lasx_xvpermi_q(src1, src1, 0x01);
__lasx_xvst(src1, dst, 0);
__lasx_xvst(src0, dst, 32);
dst += 64;
src -= 64;
}
}
@ -226,12 +224,12 @@ void MirrorUVRow_LASX(const uint8_t* src_uv, uint8_t* dst_uv, int width) {
src_uv += (width - 16) << 1;
for (x = 0; x < len; x++) {
src = __lasx_xvld(src_uv, 0);
dst = __lasx_xvshuf_h(shuffler, src, src);
dst = __lasx_xvpermi_q(dst, dst, 0x01);
__lasx_xvst(dst, dst_uv, 0);
src_uv -= 32;
dst_uv += 32;
src = __lasx_xvld(src_uv, 0);
dst = __lasx_xvshuf_h(shuffler, src, src);
dst = __lasx_xvpermi_q(dst, dst, 0x01);
__lasx_xvst(dst, dst_uv, 0);
src_uv -= 32;
dst_uv += 32;
}
}
@ -244,15 +242,15 @@ void ARGBMirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) {
0x0B0A09080F0E0D0C, 0x0302010007060504};
src += (width * 4) - 64;
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler,
src1, src1, shuffler, src0, src1);
dst1 = __lasx_xvpermi_q(src0, src0, 0x01);
dst0 = __lasx_xvpermi_q(src1, src1, 0x01);
__lasx_xvst(dst0, dst, 0);
__lasx_xvst(dst1, dst, 32);
dst += 64;
src -= 64;
DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0,
src1);
dst1 = __lasx_xvpermi_q(src0, src0, 0x01);
dst0 = __lasx_xvpermi_q(src1, src1, 0x01);
__lasx_xvst(dst0, dst, 0);
__lasx_xvst(dst1, dst, 32);
dst += 64;
src -= 64;
}
}
@ -268,21 +266,21 @@ void I422ToYUY2Row_LASX(const uint8_t* src_y,
__m256i dst_yuy2_0, dst_yuy2_1;
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
src_y0 = __lasx_xvld(src_y, 0);
src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
vec_yuy2_0 = __lasx_xvilvl_b(vec_uv0, src_y0);
vec_yuy2_1 = __lasx_xvilvh_b(vec_uv0, src_y0);
dst_yuy2_0 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x20);
dst_yuy2_1 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x31);
__lasx_xvst(dst_yuy2_0, dst_yuy2, 0);
__lasx_xvst(dst_yuy2_1, dst_yuy2, 32);
src_u += 16;
src_v += 16;
src_y += 32;
dst_yuy2 += 64;
DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
src_y0 = __lasx_xvld(src_y, 0);
src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
vec_yuy2_0 = __lasx_xvilvl_b(vec_uv0, src_y0);
vec_yuy2_1 = __lasx_xvilvh_b(vec_uv0, src_y0);
dst_yuy2_0 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x20);
dst_yuy2_1 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x31);
__lasx_xvst(dst_yuy2_0, dst_yuy2, 0);
__lasx_xvst(dst_yuy2_1, dst_yuy2, 32);
src_u += 16;
src_v += 16;
src_y += 32;
dst_yuy2 += 64;
}
}
@ -298,21 +296,21 @@ void I422ToUYVYRow_LASX(const uint8_t* src_y,
__m256i dst_uyvy0, dst_uyvy1;
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
src_y0 = __lasx_xvld(src_y, 0);
src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
vec_uyvy0 = __lasx_xvilvl_b(src_y0, vec_uv0);
vec_uyvy1 = __lasx_xvilvh_b(src_y0, vec_uv0);
dst_uyvy0 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x20);
dst_uyvy1 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x31);
__lasx_xvst(dst_uyvy0, dst_uyvy, 0);
__lasx_xvst(dst_uyvy1, dst_uyvy, 32);
src_u += 16;
src_v += 16;
src_y += 32;
dst_uyvy +=64;
DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
src_y0 = __lasx_xvld(src_y, 0);
src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
vec_uyvy0 = __lasx_xvilvl_b(src_y0, vec_uv0);
vec_uyvy1 = __lasx_xvilvh_b(src_y0, vec_uv0);
dst_uyvy0 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x20);
dst_uyvy1 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x31);
__lasx_xvst(dst_uyvy0, dst_uyvy, 0);
__lasx_xvst(dst_uyvy1, dst_uyvy, 32);
src_u += 16;
src_v += 16;
src_y += 32;
dst_uyvy += 64;
}
}
@ -335,8 +333,8 @@ void I422ToARGBRow_LASX(const uint8_t* src_y,
__m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
STOREARGB_D(alpha, alpha, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb);
src_y += 32;
src_u += 16;
@ -363,8 +361,8 @@ void I422ToRGBARow_LASX(const uint8_t* src_y,
__m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
STOREARGB_D(r_l, r_h, g_l, g_h, b_l, b_h, alpha, alpha, dst_argb);
src_y += 32;
src_u += 16;
@ -392,12 +390,12 @@ void I422AlphaToARGBRow_LASX(const uint8_t* src_y,
for (x = 0; x < len; x++) {
__m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h, a_l, a_h;
y = __lasx_xvld(src_a, 0);
y = __lasx_xvld(src_a, 0);
a_l = __lasx_xvilvl_b(zero, y);
a_h = __lasx_xvilvh_b(zero, y);
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb);
src_y += 32;
src_u += 16;
@ -437,12 +435,13 @@ void I422ToRGB24Row_LASX(const uint8_t* src_y,
__m256i temp0, temp1, temp2, temp3;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
temp0 = __lasx_xvpackev_b(g_l, b_l);
temp1 = __lasx_xvpackev_b(g_h, b_h);
DUP4_ARG3(__lasx_xvshuf_b, r_l, temp0, shuffler1, r_h, temp1, shuffler1,
r_l, temp0, shuffler0, r_h, temp1, shuffler0, temp2, temp3, temp0, temp1);
r_l, temp0, shuffler0, r_h, temp1, shuffler0, temp2, temp3, temp0,
temp1);
b_l = __lasx_xvilvl_d(temp1, temp2);
b_h = __lasx_xvilvh_d(temp3, temp1);
@ -479,22 +478,22 @@ void I422ToRGB565Row_LASX(const uint8_t* src_y,
__m256i dst_l, dst_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
b_l = __lasx_xvsrli_h(b_l, 3);
b_h = __lasx_xvsrli_h(b_h, 3);
g_l = __lasx_xvsrli_h(g_l, 2);
g_h = __lasx_xvsrli_h(g_h, 2);
r_l = __lasx_xvsrli_h(r_l, 3);
r_h = __lasx_xvsrli_h(r_h, 3);
r_l = __lasx_xvslli_h(r_l, 11);
r_h = __lasx_xvslli_h(r_h, 11);
g_l = __lasx_xvslli_h(g_l, 5);
g_h = __lasx_xvslli_h(g_h, 5);
r_l = __lasx_xvor_v(r_l, g_l);
r_l = __lasx_xvor_v(r_l, b_l);
r_h = __lasx_xvor_v(r_h, g_h);
r_h = __lasx_xvor_v(r_h, b_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
b_l = __lasx_xvsrli_h(b_l, 3);
b_h = __lasx_xvsrli_h(b_h, 3);
g_l = __lasx_xvsrli_h(g_l, 2);
g_h = __lasx_xvsrli_h(g_h, 2);
r_l = __lasx_xvsrli_h(r_l, 3);
r_h = __lasx_xvsrli_h(r_h, 3);
r_l = __lasx_xvslli_h(r_l, 11);
r_h = __lasx_xvslli_h(r_h, 11);
g_l = __lasx_xvslli_h(g_l, 5);
g_h = __lasx_xvslli_h(g_h, 5);
r_l = __lasx_xvor_v(r_l, g_l);
r_l = __lasx_xvor_v(r_l, b_l);
r_h = __lasx_xvor_v(r_h, g_h);
r_h = __lasx_xvor_v(r_h, b_h);
dst_l = __lasx_xvpermi_q(r_h, r_l, 0x20);
dst_h = __lasx_xvpermi_q(r_h, r_l, 0x31);
__lasx_xvst(dst_l, dst_rgb565, 0);
@ -518,10 +517,10 @@ void I422ToARGB4444Row_LASX(const uint8_t* src_y,
__m256i vec_yb, vec_yg;
__m256i vec_ubvr, vec_ugvg;
__m256i const_0x80 = __lasx_xvldi(0x80);
__m256i alpha = {0xF000F000F000F000, 0xF000F000F000F000,
0xF000F000F000F000, 0xF000F000F000F000};
__m256i mask = {0x00F000F000F000F0, 0x00F000F000F000F0,
0x00F000F000F000F0, 0x00F000F000F000F0};
__m256i alpha = {0xF000F000F000F000, 0xF000F000F000F000, 0xF000F000F000F000,
0xF000F000F000F000};
__m256i mask = {0x00F000F000F000F0, 0x00F000F000F000F0, 0x00F000F000F000F0,
0x00F000F000F000F0};
YUVTORGB_SETUP(yuvconstants, vec_ubvr, vec_ugvg, vec_yg, vec_yb);
@ -530,8 +529,8 @@ void I422ToARGB4444Row_LASX(const uint8_t* src_y,
__m256i dst_l, dst_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
b_l = __lasx_xvsrli_h(b_l, 4);
b_h = __lasx_xvsrli_h(b_h, 4);
r_l = __lasx_xvsrli_h(r_l, 4);
@ -568,8 +567,8 @@ void I422ToARGB1555Row_LASX(const uint8_t* src_y,
__m256i vec_yb, vec_yg;
__m256i vec_ubvr, vec_ugvg;
__m256i const_0x80 = __lasx_xvldi(0x80);
__m256i alpha = {0x8000800080008000, 0x8000800080008000,
0x8000800080008000, 0x8000800080008000};
__m256i alpha = {0x8000800080008000, 0x8000800080008000, 0x8000800080008000,
0x8000800080008000};
YUVTORGB_SETUP(yuvconstants, vec_ubvr, vec_ugvg, vec_yg, vec_yb);
@ -578,8 +577,8 @@ void I422ToARGB1555Row_LASX(const uint8_t* src_y,
__m256i dst_l, dst_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
g_h, r_l, r_h);
b_l = __lasx_xvsrli_h(b_l, 3);
b_h = __lasx_xvsrli_h(b_h, 3);
g_l = __lasx_xvsrli_h(g_l, 3);
@ -751,13 +750,13 @@ void ARGBToYRow_LASX(const uint8_t* src_argb0, uint8_t* dst_y, int width) {
int len = width / 32;
__m256i src0, src1, src2, src3, vec0, vec1, vec2, vec3;
__m256i tmp0, tmp1, dst0;
__m256i const_19 = __lasx_xvldi(0x19);
__m256i const_42 = __lasx_xvldi(0x42);
__m256i const_81 = __lasx_xvldi(0x81);
__m256i const_19 = __lasx_xvldi(0x19);
__m256i const_42 = __lasx_xvldi(0x42);
__m256i const_81 = __lasx_xvldi(0x81);
__m256i const_1080 = {0x1080108010801080, 0x1080108010801080,
0x1080108010801080, 0x1080108010801080};
__m256i control = {0x0000000400000000, 0x0000000500000001,
0x0000000600000002, 0x0000000700000003};
__m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
0x0000000700000003};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lasx_xvld, src_argb0, 0, src_argb0, 32, src_argb0, 64,
@ -802,8 +801,8 @@ void ARGBToUVRow_LASX(const uint8_t* src_argb0,
0x002f002f002f002f, 0x002f002f002f002f};
__m256i const_0x12 = {0x0009000900090009, 0x0009000900090009,
0x0009000900090009, 0x0009000900090009};
__m256i control = {0x0000000400000000, 0x0000000500000001,
0x0000000600000002, 0x0000000700000003};
__m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
0x0000000700000003};
__m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
@ -861,13 +860,13 @@ void ARGBToRGB24Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
int len = (width / 32) - 1;
__m256i src0, src1, src2, src3;
__m256i tmp0, tmp1, tmp2, tmp3;
__m256i shuf = {0x0908060504020100, 0x000000000E0D0C0A,
0x0908060504020100, 0x000000000E0D0C0A};
__m256i control = {0x0000000100000000, 0x0000000400000002,
0x0000000600000005, 0x0000000700000003};
__m256i shuf = {0x0908060504020100, 0x000000000E0D0C0A, 0x0908060504020100,
0x000000000E0D0C0A};
__m256i control = {0x0000000100000000, 0x0000000400000002, 0x0000000600000005,
0x0000000700000003};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
src_argb, 96, src0, src1, src2, src3);
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb,
96, src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@ -883,8 +882,8 @@ void ARGBToRGB24Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
dst_rgb += 96;
src_argb += 128;
}
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
src_argb, 96, src0, src1, src2, src3);
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, 96,
src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@ -907,13 +906,13 @@ void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
int len = (width / 32) - 1;
__m256i src0, src1, src2, src3;
__m256i tmp0, tmp1, tmp2, tmp3;
__m256i shuf = {0x090A040506000102, 0x000000000C0D0E08,
0x090A040506000102, 0x000000000C0D0E08};
__m256i control = {0x0000000100000000, 0x0000000400000002,
0x0000000600000005, 0x0000000700000003};
__m256i shuf = {0x090A040506000102, 0x000000000C0D0E08, 0x090A040506000102,
0x000000000C0D0E08};
__m256i control = {0x0000000100000000, 0x0000000400000002, 0x0000000600000005,
0x0000000700000003};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
src_argb, 96, src0, src1, src2, src3);
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb,
96, src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@ -929,8 +928,8 @@ void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
dst_rgb += 96;
src_argb += 128;
}
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
src_argb, 96, src0, src1, src2, src3);
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, 96,
src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@ -948,13 +947,15 @@ void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
__lasx_xvstelm_d(tmp3, dst_rgb, 16, 2);
}
void ARGBToRGB565Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
void ARGBToRGB565Row_LASX(const uint8_t* src_argb,
uint8_t* dst_rgb,
int width) {
int x;
int len = width / 16;
__m256i zero = __lasx_xvldi(0);
__m256i src0, src1, tmp0, tmp1, dst0;
__m256i shift = {0x0300030003000300, 0x0300030003000300,
0x0300030003000300, 0x0300030003000300};
__m256i shift = {0x0300030003000300, 0x0300030003000300, 0x0300030003000300,
0x0300030003000300};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@ -980,10 +981,10 @@ void ARGBToARGB1555Row_LASX(const uint8_t* src_argb,
int len = width / 16;
__m256i zero = __lasx_xvldi(0);
__m256i src0, src1, tmp0, tmp1, tmp2, tmp3, dst0;
__m256i shift1 = {0x0703070307030703, 0x0703070307030703,
0x0703070307030703, 0x0703070307030703};
__m256i shift2 = {0x0200020002000200, 0x0200020002000200,
0x0200020002000200, 0x0200020002000200};
__m256i shift1 = {0x0703070307030703, 0x0703070307030703, 0x0703070307030703,
0x0703070307030703};
__m256i shift2 = {0x0200020002000200, 0x0200020002000200, 0x0200020002000200,
0x0200020002000200};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@ -1036,17 +1037,17 @@ void ARGBToUV444Row_LASX(const uint8_t* src_argb,
__m256i tmp0, tmp1, tmp2, tmp3;
__m256i reg0, reg1, reg2, reg3, dst0, dst1;
__m256i const_112 = __lasx_xvldi(112);
__m256i const_74 = __lasx_xvldi(74);
__m256i const_38 = __lasx_xvldi(38);
__m256i const_94 = __lasx_xvldi(94);
__m256i const_18 = __lasx_xvldi(18);
__m256i const_74 = __lasx_xvldi(74);
__m256i const_38 = __lasx_xvldi(38);
__m256i const_94 = __lasx_xvldi(94);
__m256i const_18 = __lasx_xvldi(18);
__m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
__m256i control = {0x0000000400000000, 0x0000000500000001,
0x0000000600000002, 0x0000000700000003};
__m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
0x0000000700000003};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
src_argb, 96, src0, src1, src2, src3);
DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb,
96, src0, src1, src2, src3);
tmp0 = __lasx_xvpickev_h(src1, src0);
tmp1 = __lasx_xvpickod_h(src1, src0);
tmp2 = __lasx_xvpickev_h(src3, src2);
@ -1101,7 +1102,7 @@ void ARGBMultiplyRow_LASX(const uint8_t* src_argb0,
__lasx_xvst(dst0, dst_argb, 0);
src_argb0 += 32;
src_argb1 += 32;
dst_argb += 32;
dst_argb += 32;
}
}
@ -1119,7 +1120,7 @@ void ARGBAddRow_LASX(const uint8_t* src_argb0,
__lasx_xvst(dst0, dst_argb, 0);
src_argb0 += 32;
src_argb1 += 32;
dst_argb += 32;
dst_argb += 32;
}
}
@ -1137,7 +1138,7 @@ void ARGBSubtractRow_LASX(const uint8_t* src_argb0,
__lasx_xvst(dst0, dst_argb, 0);
src_argb0 += 32;
src_argb1 += 32;
dst_argb += 32;
dst_argb += 32;
}
}
@ -1149,8 +1150,8 @@ void ARGBAttenuateRow_LASX(const uint8_t* src_argb,
__m256i src0, src1, tmp0, tmp1;
__m256i reg0, reg1, reg2, reg3, reg4, reg5;
__m256i b, g, r, a, dst0, dst1;
__m256i control = {0x0005000100040000, 0x0007000300060002,
0x0005000100040000, 0x0007000300060002};
__m256i control = {0x0005000100040000, 0x0007000300060002, 0x0005000100040000,
0x0007000300060002};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@ -1199,12 +1200,12 @@ void ARGBToRGB565DitherRow_LASX(const uint8_t* src_argb,
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
tmp0 = __lasx_xvpickev_b(src1, src0);
tmp1 = __lasx_xvpickod_b(src1, src0);
b = __lasx_xvpackev_b(zero, tmp0);
r = __lasx_xvpackod_b(zero, tmp0);
g = __lasx_xvpackev_b(zero, tmp1);
b = __lasx_xvadd_h(b, vec_dither);
g = __lasx_xvadd_h(g, vec_dither);
r = __lasx_xvadd_h(r, vec_dither);
b = __lasx_xvpackev_b(zero, tmp0);
r = __lasx_xvpackod_b(zero, tmp0);
g = __lasx_xvpackev_b(zero, tmp1);
b = __lasx_xvadd_h(b, vec_dither);
g = __lasx_xvadd_h(g, vec_dither);
r = __lasx_xvadd_h(r, vec_dither);
DUP2_ARG1(__lasx_xvclip255_h, b, g, b, g);
r = __lasx_xvclip255_h(r);
b = __lasx_xvsrai_h(b, 3);
@ -1228,8 +1229,8 @@ void ARGBShuffleRow_LASX(const uint8_t* src_argb,
int x;
int len = width / 16;
__m256i src0, src1, dst0, dst1;
__m256i shuf = {0x0404040400000000, 0x0C0C0C0C08080808,
0x0404040400000000, 0x0C0C0C0C08080808};
__m256i shuf = {0x0404040400000000, 0x0C0C0C0C08080808, 0x0404040400000000,
0x0C0C0C0C08080808};
__m256i temp = __lasx_xvldrepl_w(shuffler, 0);
shuf = __lasx_xvadd_b(shuf, temp);
@ -1274,8 +1275,8 @@ void ARGBGrayRow_LASX(const uint8_t* src_argb, uint8_t* dst_argb, int width) {
__m256i reg0, reg1, reg2, dst0, dst1;
__m256i const_128 = __lasx_xvldi(0x480);
__m256i const_150 = __lasx_xvldi(0x96);
__m256i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D,
0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
__m256i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D,
0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@ -1301,17 +1302,17 @@ void ARGBSepiaRow_LASX(uint8_t* dst_argb, int width) {
__m256i src0, src1, tmp0, tmp1;
__m256i reg0, reg1, spb, spg, spr;
__m256i dst0, dst1;
__m256i spb_g = __lasx_xvldi(68);
__m256i spg_g = __lasx_xvldi(88);
__m256i spr_g = __lasx_xvldi(98);
__m256i spb_br = {0x2311231123112311, 0x2311231123112311,
0x2311231123112311, 0x2311231123112311};
__m256i spg_br = {0x2D162D162D162D16, 0x2D162D162D162D16,
0x2D162D162D162D16, 0x2D162D162D162D16};
__m256i spr_br = {0x3218321832183218, 0x3218321832183218,
0x3218321832183218, 0x3218321832183218};
__m256i shuff = {0x1706150413021100, 0x1F0E1D0C1B0A1908,
0x1706150413021100, 0x1F0E1D0C1B0A1908};
__m256i spb_g = __lasx_xvldi(68);
__m256i spg_g = __lasx_xvldi(88);
__m256i spr_g = __lasx_xvldi(98);
__m256i spb_br = {0x2311231123112311, 0x2311231123112311, 0x2311231123112311,
0x2311231123112311};
__m256i spg_br = {0x2D162D162D162D16, 0x2D162D162D162D16, 0x2D162D162D162D16,
0x2D162D162D162D16};
__m256i spr_br = {0x3218321832183218, 0x3218321832183218, 0x3218321832183218,
0x3218321832183218};
__m256i shuff = {0x1706150413021100, 0x1F0E1D0C1B0A1908, 0x1706150413021100,
0x1F0E1D0C1B0A1908};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, dst_argb, 0, dst_argb, 32, src0, src1);
@ -1319,14 +1320,14 @@ void ARGBSepiaRow_LASX(uint8_t* dst_argb, int width) {
tmp1 = __lasx_xvpickod_b(src1, src0);
DUP2_ARG2(__lasx_xvdp2_h_bu, tmp0, spb_br, tmp0, spg_br, spb, spg);
spr = __lasx_xvdp2_h_bu(tmp0, spr_br);
spb = __lasx_xvmaddwev_h_bu(spb, tmp1, spb_g);
spg = __lasx_xvmaddwev_h_bu(spg, tmp1, spg_g);
spr = __lasx_xvmaddwev_h_bu(spr, tmp1, spr_g);
spb = __lasx_xvsrli_h(spb, 7);
spg = __lasx_xvsrli_h(spg, 7);
spr = __lasx_xvsrli_h(spr, 7);
spg = __lasx_xvsat_hu(spg, 7);
spr = __lasx_xvsat_hu(spr, 7);
spb = __lasx_xvmaddwev_h_bu(spb, tmp1, spb_g);
spg = __lasx_xvmaddwev_h_bu(spg, tmp1, spg_g);
spr = __lasx_xvmaddwev_h_bu(spr, tmp1, spr_g);
spb = __lasx_xvsrli_h(spb, 7);
spg = __lasx_xvsrli_h(spg, 7);
spr = __lasx_xvsrli_h(spr, 7);
spg = __lasx_xvsat_hu(spg, 7);
spr = __lasx_xvsat_hu(spr, 7);
reg0 = __lasx_xvpackev_b(spg, spb);
reg1 = __lasx_xvshuf_b(tmp1, spr, shuff);
dst0 = __lasx_xvilvl_h(reg1, reg0);

View File

@ -21,139 +21,138 @@ extern "C" {
#endif
// Fill YUV -> RGB conversion constants into vectors
#define YUVTORGB_SETUP(yuvconst, vr, ub, vg, ug, yg, yb) \
{ \
ub = __lsx_vreplgr2vr_h(yuvconst->kUVToB[0]); \
vr = __lsx_vreplgr2vr_h(yuvconst->kUVToR[1]); \
ug = __lsx_vreplgr2vr_h(yuvconst->kUVToG[0]); \
vg = __lsx_vreplgr2vr_h(yuvconst->kUVToG[1]); \
yg = __lsx_vreplgr2vr_h(yuvconst->kYToRgb[0]); \
yb = __lsx_vreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
#define YUVTORGB_SETUP(yuvconst, vr, ub, vg, ug, yg, yb) \
{ \
ub = __lsx_vreplgr2vr_h(yuvconst->kUVToB[0]); \
vr = __lsx_vreplgr2vr_h(yuvconst->kUVToR[1]); \
ug = __lsx_vreplgr2vr_h(yuvconst->kUVToG[0]); \
vg = __lsx_vreplgr2vr_h(yuvconst->kUVToG[1]); \
yg = __lsx_vreplgr2vr_h(yuvconst->kYToRgb[0]); \
yb = __lsx_vreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
}
// Convert 8 pixels of YUV420 to RGB.
#define YUVTORGB(in_y, in_vu, vrub, vgug, \
yg, yb, out_b, out_g, out_r) \
{ \
__m128i y_ev, y_od, u_l, v_l; \
__m128i tmp0, tmp1, tmp2, tmp3; \
\
tmp0 = __lsx_vilvl_b(in_y, in_y); \
y_ev = __lsx_vmulwev_w_hu_h(tmp0, yg); \
y_od = __lsx_vmulwod_w_hu_h(tmp0, yg); \
y_ev = __lsx_vsrai_w(y_ev, 16); \
y_od = __lsx_vsrai_w(y_od, 16); \
y_ev = __lsx_vadd_w(y_ev, yb); \
y_od = __lsx_vadd_w(y_od, yb); \
in_vu = __lsx_vilvl_b(zero, in_vu); \
in_vu = __lsx_vsub_h(in_vu, const_80); \
u_l = __lsx_vmulwev_w_h(in_vu, vrub); \
v_l = __lsx_vmulwod_w_h(in_vu, vrub); \
tmp0 = __lsx_vadd_w(y_ev, u_l); \
tmp1 = __lsx_vadd_w(y_od, u_l); \
tmp2 = __lsx_vadd_w(y_ev, v_l); \
tmp3 = __lsx_vadd_w(y_od, v_l); \
tmp0 = __lsx_vsrai_w(tmp0, 6); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp2 = __lsx_vsrai_w(tmp2, 6); \
tmp3 = __lsx_vsrai_w(tmp3, 6); \
tmp0 = __lsx_vclip255_w(tmp0); \
tmp1 = __lsx_vclip255_w(tmp1); \
tmp2 = __lsx_vclip255_w(tmp2); \
tmp3 = __lsx_vclip255_w(tmp3); \
out_b = __lsx_vpackev_h(tmp1, tmp0); \
out_r = __lsx_vpackev_h(tmp3, tmp2); \
tmp0 = __lsx_vdp2_w_h(in_vu, vgug); \
tmp1 = __lsx_vsub_w(y_ev, tmp0); \
tmp2 = __lsx_vsub_w(y_od, tmp0); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp2 = __lsx_vsrai_w(tmp2, 6); \
tmp1 = __lsx_vclip255_w(tmp1); \
tmp2 = __lsx_vclip255_w(tmp2); \
out_g = __lsx_vpackev_h(tmp2, tmp1); \
#define YUVTORGB(in_y, in_vu, vrub, vgug, yg, yb, out_b, out_g, out_r) \
{ \
__m128i y_ev, y_od, u_l, v_l; \
__m128i tmp0, tmp1, tmp2, tmp3; \
\
tmp0 = __lsx_vilvl_b(in_y, in_y); \
y_ev = __lsx_vmulwev_w_hu_h(tmp0, yg); \
y_od = __lsx_vmulwod_w_hu_h(tmp0, yg); \
y_ev = __lsx_vsrai_w(y_ev, 16); \
y_od = __lsx_vsrai_w(y_od, 16); \
y_ev = __lsx_vadd_w(y_ev, yb); \
y_od = __lsx_vadd_w(y_od, yb); \
in_vu = __lsx_vilvl_b(zero, in_vu); \
in_vu = __lsx_vsub_h(in_vu, const_80); \
u_l = __lsx_vmulwev_w_h(in_vu, vrub); \
v_l = __lsx_vmulwod_w_h(in_vu, vrub); \
tmp0 = __lsx_vadd_w(y_ev, u_l); \
tmp1 = __lsx_vadd_w(y_od, u_l); \
tmp2 = __lsx_vadd_w(y_ev, v_l); \
tmp3 = __lsx_vadd_w(y_od, v_l); \
tmp0 = __lsx_vsrai_w(tmp0, 6); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp2 = __lsx_vsrai_w(tmp2, 6); \
tmp3 = __lsx_vsrai_w(tmp3, 6); \
tmp0 = __lsx_vclip255_w(tmp0); \
tmp1 = __lsx_vclip255_w(tmp1); \
tmp2 = __lsx_vclip255_w(tmp2); \
tmp3 = __lsx_vclip255_w(tmp3); \
out_b = __lsx_vpackev_h(tmp1, tmp0); \
out_r = __lsx_vpackev_h(tmp3, tmp2); \
tmp0 = __lsx_vdp2_w_h(in_vu, vgug); \
tmp1 = __lsx_vsub_w(y_ev, tmp0); \
tmp2 = __lsx_vsub_w(y_od, tmp0); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp2 = __lsx_vsrai_w(tmp2, 6); \
tmp1 = __lsx_vclip255_w(tmp1); \
tmp2 = __lsx_vclip255_w(tmp2); \
out_g = __lsx_vpackev_h(tmp2, tmp1); \
}
// Convert I444 pixels of YUV420 to RGB.
#define I444TORGB(in_yy, in_u, in_v, ub, vr, ugvg, \
yg, yb, out_b, out_g, out_r) \
{ \
__m128i y_ev, y_od, u_ev, v_ev, u_od, v_od; \
__m128i tmp0, tmp1, tmp2, tmp3; \
\
y_ev = __lsx_vmulwev_w_hu_h(in_yy, yg); \
y_od = __lsx_vmulwod_w_hu_h(in_yy, yg); \
y_ev = __lsx_vsrai_w(y_ev, 16); \
y_od = __lsx_vsrai_w(y_od, 16); \
y_ev = __lsx_vadd_w(y_ev, yb); \
y_od = __lsx_vadd_w(y_od, yb); \
in_u = __lsx_vsub_h(in_u, const_80); \
in_v = __lsx_vsub_h(in_v, const_80); \
u_ev = __lsx_vmulwev_w_h(in_u, ub); \
u_od = __lsx_vmulwod_w_h(in_u, ub); \
v_ev = __lsx_vmulwev_w_h(in_v, vr); \
v_od = __lsx_vmulwod_w_h(in_v, vr); \
tmp0 = __lsx_vadd_w(y_ev, u_ev); \
tmp1 = __lsx_vadd_w(y_od, u_od); \
tmp2 = __lsx_vadd_w(y_ev, v_ev); \
tmp3 = __lsx_vadd_w(y_od, v_od); \
tmp0 = __lsx_vsrai_w(tmp0, 6); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp2 = __lsx_vsrai_w(tmp2, 6); \
tmp3 = __lsx_vsrai_w(tmp3, 6); \
tmp0 = __lsx_vclip255_w(tmp0); \
tmp1 = __lsx_vclip255_w(tmp1); \
tmp2 = __lsx_vclip255_w(tmp2); \
tmp3 = __lsx_vclip255_w(tmp3); \
out_b = __lsx_vpackev_h(tmp1, tmp0); \
out_r = __lsx_vpackev_h(tmp3, tmp2); \
u_ev = __lsx_vpackev_h(in_u, in_v); \
u_od = __lsx_vpackod_h(in_u, in_v); \
v_ev = __lsx_vdp2_w_h(u_ev, ugvg); \
v_od = __lsx_vdp2_w_h(u_od, ugvg); \
tmp0 = __lsx_vsub_w(y_ev, v_ev); \
tmp1 = __lsx_vsub_w(y_od, v_od); \
tmp0 = __lsx_vsrai_w(tmp0, 6); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp0 = __lsx_vclip255_w(tmp0); \
tmp1 = __lsx_vclip255_w(tmp1); \
out_g = __lsx_vpackev_h(tmp1, tmp0); \
#define I444TORGB(in_yy, in_u, in_v, ub, vr, ugvg, yg, yb, out_b, out_g, \
out_r) \
{ \
__m128i y_ev, y_od, u_ev, v_ev, u_od, v_od; \
__m128i tmp0, tmp1, tmp2, tmp3; \
\
y_ev = __lsx_vmulwev_w_hu_h(in_yy, yg); \
y_od = __lsx_vmulwod_w_hu_h(in_yy, yg); \
y_ev = __lsx_vsrai_w(y_ev, 16); \
y_od = __lsx_vsrai_w(y_od, 16); \
y_ev = __lsx_vadd_w(y_ev, yb); \
y_od = __lsx_vadd_w(y_od, yb); \
in_u = __lsx_vsub_h(in_u, const_80); \
in_v = __lsx_vsub_h(in_v, const_80); \
u_ev = __lsx_vmulwev_w_h(in_u, ub); \
u_od = __lsx_vmulwod_w_h(in_u, ub); \
v_ev = __lsx_vmulwev_w_h(in_v, vr); \
v_od = __lsx_vmulwod_w_h(in_v, vr); \
tmp0 = __lsx_vadd_w(y_ev, u_ev); \
tmp1 = __lsx_vadd_w(y_od, u_od); \
tmp2 = __lsx_vadd_w(y_ev, v_ev); \
tmp3 = __lsx_vadd_w(y_od, v_od); \
tmp0 = __lsx_vsrai_w(tmp0, 6); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp2 = __lsx_vsrai_w(tmp2, 6); \
tmp3 = __lsx_vsrai_w(tmp3, 6); \
tmp0 = __lsx_vclip255_w(tmp0); \
tmp1 = __lsx_vclip255_w(tmp1); \
tmp2 = __lsx_vclip255_w(tmp2); \
tmp3 = __lsx_vclip255_w(tmp3); \
out_b = __lsx_vpackev_h(tmp1, tmp0); \
out_r = __lsx_vpackev_h(tmp3, tmp2); \
u_ev = __lsx_vpackev_h(in_u, in_v); \
u_od = __lsx_vpackod_h(in_u, in_v); \
v_ev = __lsx_vdp2_w_h(u_ev, ugvg); \
v_od = __lsx_vdp2_w_h(u_od, ugvg); \
tmp0 = __lsx_vsub_w(y_ev, v_ev); \
tmp1 = __lsx_vsub_w(y_od, v_od); \
tmp0 = __lsx_vsrai_w(tmp0, 6); \
tmp1 = __lsx_vsrai_w(tmp1, 6); \
tmp0 = __lsx_vclip255_w(tmp0); \
tmp1 = __lsx_vclip255_w(tmp1); \
out_g = __lsx_vpackev_h(tmp1, tmp0); \
}
// Pack and Store 8 ARGB values.
#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
{ \
__m128i temp0, temp1; \
__m128i dst0, dst1; \
\
temp0 = __lsx_vpackev_b(in_g, in_b); \
temp1 = __lsx_vpackev_b(in_a, in_r); \
dst0 = __lsx_vilvl_h(temp1, temp0); \
dst1 = __lsx_vilvh_h(temp1, temp0); \
__lsx_vst(dst0, pdst_argb, 0); \
__lsx_vst(dst1, pdst_argb, 16); \
pdst_argb += 32; \
#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
{ \
__m128i temp0, temp1; \
__m128i dst0, dst1; \
\
temp0 = __lsx_vpackev_b(in_g, in_b); \
temp1 = __lsx_vpackev_b(in_a, in_r); \
dst0 = __lsx_vilvl_h(temp1, temp0); \
dst1 = __lsx_vilvh_h(temp1, temp0); \
__lsx_vst(dst0, pdst_argb, 0); \
__lsx_vst(dst1, pdst_argb, 16); \
pdst_argb += 32; \
}
#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _dst0) \
{ \
__m128i _tmp0, _tmp1, _tmp2, _tmp3; \
__m128i _reg0, _reg1; \
_tmp0 = __lsx_vaddwev_h_bu(_tmpb, _nexb); \
_tmp1 = __lsx_vaddwod_h_bu(_tmpb, _nexb); \
_tmp2 = __lsx_vaddwev_h_bu(_tmpg, _nexg); \
_tmp3 = __lsx_vaddwod_h_bu(_tmpg, _nexg); \
_reg0 = __lsx_vaddwev_h_bu(_tmpr, _nexr); \
_reg1 = __lsx_vaddwod_h_bu(_tmpr, _nexr); \
_tmpb = __lsx_vavgr_hu(_tmp0, _tmp1); \
_tmpg = __lsx_vavgr_hu(_tmp2, _tmp3); \
_tmpr = __lsx_vavgr_hu(_reg0, _reg1); \
_reg0 = __lsx_vmadd_h(const_8080, const_112, _tmpb); \
_reg1 = __lsx_vmadd_h(const_8080, const_112, _tmpr); \
_reg0 = __lsx_vmsub_h(_reg0, const_74, _tmpg); \
_reg1 = __lsx_vmsub_h(_reg1, const_94, _tmpg); \
_reg0 = __lsx_vmsub_h(_reg0, const_38, _tmpr); \
_reg1 = __lsx_vmsub_h(_reg1, const_18, _tmpb); \
_dst0 = __lsx_vsrlni_b_h(_reg1, _reg0, 8); \
#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _dst0) \
{ \
__m128i _tmp0, _tmp1, _tmp2, _tmp3; \
__m128i _reg0, _reg1; \
_tmp0 = __lsx_vaddwev_h_bu(_tmpb, _nexb); \
_tmp1 = __lsx_vaddwod_h_bu(_tmpb, _nexb); \
_tmp2 = __lsx_vaddwev_h_bu(_tmpg, _nexg); \
_tmp3 = __lsx_vaddwod_h_bu(_tmpg, _nexg); \
_reg0 = __lsx_vaddwev_h_bu(_tmpr, _nexr); \
_reg1 = __lsx_vaddwod_h_bu(_tmpr, _nexr); \
_tmpb = __lsx_vavgr_hu(_tmp0, _tmp1); \
_tmpg = __lsx_vavgr_hu(_tmp2, _tmp3); \
_tmpr = __lsx_vavgr_hu(_reg0, _reg1); \
_reg0 = __lsx_vmadd_h(const_8080, const_112, _tmpb); \
_reg1 = __lsx_vmadd_h(const_8080, const_112, _tmpr); \
_reg0 = __lsx_vmsub_h(_reg0, const_74, _tmpg); \
_reg1 = __lsx_vmsub_h(_reg1, const_94, _tmpg); \
_reg0 = __lsx_vmsub_h(_reg0, const_38, _tmpr); \
_reg1 = __lsx_vmsub_h(_reg1, const_18, _tmpb); \
_dst0 = __lsx_vsrlni_b_h(_reg1, _reg0, 8); \
}
void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444,
@ -177,8 +176,8 @@ void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444,
reg2 = __lsx_vslli_b(tmp2, 4);
reg1 = __lsx_vsrli_b(tmp1, 4);
reg3 = __lsx_vsrli_b(tmp3, 4);
DUP4_ARG2(__lsx_vor_v, tmp0, reg0, tmp1, reg1, tmp2, reg2,
tmp3, reg3, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vor_v, tmp0, reg0, tmp1, reg1, tmp2, reg2, tmp3, reg3, tmp0,
tmp1, tmp2, tmp3);
dst0 = __lsx_vilvl_b(tmp1, tmp0);
dst2 = __lsx_vilvl_b(tmp3, tmp2);
dst1 = __lsx_vilvh_b(tmp1, tmp0);
@ -352,9 +351,9 @@ void ARGB1555ToYRow_LSX(const uint8_t* src_argb1555,
__m128i src0, src1;
__m128i tmp0, tmp1, tmpb, tmpg, tmpr;
__m128i reg0, reg1, reg2, dst0;
__m128i const_66 = __lsx_vldi(66);
__m128i const_66 = __lsx_vldi(66);
__m128i const_129 = __lsx_vldi(129);
__m128i const_25 = __lsx_vldi(25);
__m128i const_25 = __lsx_vldi(25);
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
__m128i shuff = {0x0B030A0209010800, 0x0F070E060D050C04};
@ -406,15 +405,15 @@ void ARGB1555ToUVRow_LSX(const uint8_t* src_argb1555,
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i reg0, reg1, reg2, reg3, dst0;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb1555, 0, src_argb1555, 16,
next_argb1555, 0, next_argb1555, 16, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_argb1555, 0, src_argb1555, 16, next_argb1555, 0,
next_argb1555, 16, src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3);
tmpb = __lsx_vandi_b(tmp0, 0x1F);
@ -465,9 +464,9 @@ void RGB565ToYRow_LSX(const uint8_t* src_rgb565, uint8_t* dst_y, int width) {
__m128i src0, src1;
__m128i tmp0, tmp1, tmpb, tmpg, tmpr;
__m128i reg0, reg1, dst0;
__m128i const_66 = __lsx_vldi(66);
__m128i const_66 = __lsx_vldi(66);
__m128i const_129 = __lsx_vldi(129);
__m128i const_25 = __lsx_vldi(25);
__m128i const_25 = __lsx_vldi(25);
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
__m128i shuff = {0x0B030A0209010800, 0x0F070E060D050C04};
@ -517,15 +516,15 @@ void RGB565ToUVRow_LSX(const uint8_t* src_rgb565,
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i reg0, reg1, reg2, reg3, dst0;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_rgb565, 0, src_rgb565, 16,
next_rgb565, 0, next_rgb565, 16, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_rgb565, 0, src_rgb565, 16, next_rgb565, 0,
next_rgb565, 16, src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3);
tmpb = __lsx_vandi_b(tmp0, 0x1F);
@ -611,10 +610,10 @@ void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
__m128i nex0, nex1, nex2, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
__m128i shuff0_b = {0x15120F0C09060300, 0x00000000001E1B18};
__m128i shuff1_b = {0x0706050403020100, 0x1D1A1714110A0908};
@ -630,12 +629,18 @@ void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
nex0 = __lsx_vld(next_rgb24, 0);
nex1 = __lsx_vld(next_rgb24, 16);
nex2 = __lsx_vld(next_rgb24, 32);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, nexb);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, nexg);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, nexr);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, nexb);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, nexg);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, nexr);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb,
nexb);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg,
nexg);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr,
nexr);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb,
nexb);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg,
nexg);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr,
nexr);
RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0);
__lsx_vstelm_d(dst0, dst_u, 0, 0);
__lsx_vstelm_d(dst0, dst_v, 0, 1);
@ -691,10 +696,10 @@ void RAWToUVRow_LSX(const uint8_t* src_raw,
__m128i nex0, nex1, nex2, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
__m128i shuff0_r = {0x15120F0C09060300, 0x00000000001E1B18};
__m128i shuff1_r = {0x0706050403020100, 0x1D1A1714110A0908};
@ -710,12 +715,18 @@ void RAWToUVRow_LSX(const uint8_t* src_raw,
nex0 = __lsx_vld(next_raw, 0);
nex1 = __lsx_vld(next_raw, 16);
nex2 = __lsx_vld(next_raw, 32);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, nexb);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, nexg);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, nexr);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, nexb);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, nexg);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, nexr);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb,
nexb);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg,
nexg);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr,
nexr);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb,
nexb);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg,
nexg);
DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr,
nexr);
RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0);
__lsx_vstelm_d(dst0, dst_u, 0, 0);
__lsx_vstelm_d(dst0, dst_v, 0, 1);
@ -739,19 +750,19 @@ void NV12ToARGBRow_LSX(const uint8_t* src_y,
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i alpha = __lsx_vldi(0xFF);
__m128i zero = __lsx_vldi(0);
__m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub);
vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug);
for (x = 0; x < len; x++) {
vec_y = __lsx_vld(src_y, 0);
vec_y = __lsx_vld(src_y, 0);
vec_vu = __lsx_vld(src_uv, 0);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
out_b, out_g, out_r);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_y += 8;
src_y += 8;
src_uv += 8;
}
}
@ -768,17 +779,17 @@ void NV12ToRGB565Row_LSX(const uint8_t* src_y,
__m128i vec_vrub, vec_vgug;
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i zero = __lsx_vldi(0);
__m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub);
vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug);
for (x = 0; x < len; x++) {
vec_y = __lsx_vld(src_y, 0);
vec_y = __lsx_vld(src_y, 0);
vec_vu = __lsx_vld(src_uv, 0);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
out_b, out_g, out_r);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
out_r);
out_b = __lsx_vsrli_h(out_b, 3);
out_g = __lsx_vsrli_h(out_g, 2);
out_r = __lsx_vsrli_h(out_r, 3);
@ -787,7 +798,7 @@ void NV12ToRGB565Row_LSX(const uint8_t* src_y,
out_r = __lsx_vor_v(out_r, out_g);
out_r = __lsx_vor_v(out_r, out_b);
__lsx_vst(out_r, dst_rgb565, 0);
src_y += 8;
src_y += 8;
src_uv += 8;
dst_rgb565 += 16;
}
@ -806,19 +817,19 @@ void NV21ToARGBRow_LSX(const uint8_t* src_y,
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i alpha = __lsx_vldi(0xFF);
__m128i zero = __lsx_vldi(0);
__m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
for (x = 0; x < len; x++) {
vec_y = __lsx_vld(src_y, 0);
vec_y = __lsx_vld(src_y, 0);
vec_uv = __lsx_vld(src_vu, 0);
YUVTORGB(vec_y, vec_uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb,
out_r, out_g, out_b);
YUVTORGB(vec_y, vec_uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb, out_r, out_g,
out_b);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_y += 8;
src_y += 8;
src_vu += 8;
}
}
@ -831,7 +842,7 @@ void SobelRow_LSX(const uint8_t* src_sobelx,
int len = width / 16;
__m128i src0, src1, tmp0;
__m128i out0, out1, out2, out3;
__m128i alpha = __lsx_vldi(0xFF);
__m128i alpha = __lsx_vldi(0xFF);
__m128i shuff0 = {0x1001010110000000, 0x1003030310020202};
__m128i shuff1 = __lsx_vaddi_bu(shuff0, 0x04);
__m128i shuff2 = __lsx_vaddi_bu(shuff1, 0x04);
@ -915,11 +926,11 @@ void ARGBToYJRow_LSX(const uint8_t* src_argb, uint8_t* dst_y, int width) {
__m128i reg0, reg1;
__m128i const_128 = __lsx_vldi(0x480);
__m128i const_150 = __lsx_vldi(0x96);
__m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
__m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32,
src_argb, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
src0, src1, src2, src3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@ -942,12 +953,12 @@ void BGRAToYRow_LSX(const uint8_t* src_bgra, uint8_t* dst_y, int width) {
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1;
__m128i const_129 = __lsx_vldi(0x81);
__m128i const_br = {0x1942194219421942, 0x1942194219421942};
__m128i const_br = {0x1942194219421942, 0x1942194219421942};
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32,
src_bgra, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48,
src0, src1, src2, src3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@ -976,17 +987,17 @@ void BGRAToUVRow_LSX(const uint8_t* src_bgra,
__m128i tmp0, tmp1, tmp2, tmp3, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32,
src_bgra, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_bgra, 0, next_bgra, 16, next_bgra, 32,
next_bgra, 48, nex0, nex1, nex2, nex3);
DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_bgra, 0, next_bgra, 16, next_bgra, 32, next_bgra,
48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@ -1018,12 +1029,12 @@ void ABGRToYRow_LSX(const uint8_t* src_abgr, uint8_t* dst_y, int width) {
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1;
__m128i const_129 = __lsx_vldi(0x81);
__m128i const_br = {0x1942194219421942, 0x1942194219421942};
__m128i const_br = {0x1942194219421942, 0x1942194219421942};
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32,
src_abgr, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48,
src0, src1, src2, src3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@ -1052,17 +1063,17 @@ void ABGRToUVRow_LSX(const uint8_t* src_abgr,
__m128i tmp0, tmp1, tmp2, tmp3, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32,
src_abgr, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_abgr, 0, next_abgr, 16, next_abgr, 32,
next_abgr, 48, nex0, nex1, nex2, nex3);
DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_abgr, 0, next_abgr, 16, next_abgr, 32, next_abgr,
48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@ -1094,12 +1105,12 @@ void RGBAToYRow_LSX(const uint8_t* src_rgba, uint8_t* dst_y, int width) {
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1;
__m128i const_129 = __lsx_vldi(0x81);
__m128i const_br = {0x4219421942194219, 0x4219421942194219};
__m128i const_br = {0x4219421942194219, 0x4219421942194219};
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32,
src_rgba, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48,
src0, src1, src2, src3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@ -1128,17 +1139,17 @@ void RGBAToUVRow_LSX(const uint8_t* src_rgba,
__m128i tmp0, tmp1, tmp2, tmp3, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_74 = __lsx_vldi(0x425);
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32,
src_rgba, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_rgba, 0, next_rgba, 16, next_rgba, 32,
next_rgba, 48, nex0, nex1, nex2, nex3);
DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_rgba, 0, next_rgba, 16, next_rgba, 32, next_rgba,
48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@ -1174,20 +1185,20 @@ void ARGBToUVJRow_LSX(const uint8_t* src_argb,
__m128i src0, src1, src2, src3;
__m128i nex0, nex1, nex2, nex3;
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1, dst0;
__m128i reg0, reg1, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_63 = __lsx_vldi(0x43F);
__m128i const_42 = __lsx_vldi(0x42A);
__m128i const_21 = __lsx_vldi(0x415);
__m128i const_53 = __lsx_vldi(0x435);
__m128i const_10 = __lsx_vldi(0x40A);
__m128i const_63 = __lsx_vldi(0x43F);
__m128i const_42 = __lsx_vldi(0x42A);
__m128i const_21 = __lsx_vldi(0x415);
__m128i const_53 = __lsx_vldi(0x435);
__m128i const_10 = __lsx_vldi(0x40A);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32,
src_argb, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_argb, 0, next_argb, 16, next_argb, 32,
next_argb, 48, nex0, nex1, nex2, nex3);
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, next_argb, 0, next_argb, 16, next_argb, 32, next_argb,
48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@ -1240,26 +1251,26 @@ void I444ToARGBRow_LSX(const uint8_t* src_y,
__m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb, vec_ugvg;
__m128i const_80 = __lsx_vldi(0x480);
__m128i alpha = __lsx_vldi(0xFF);
__m128i zero = __lsx_vldi(0);
__m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
for (x = 0; x < len; x++) {
vec_y = __lsx_vld(src_y, 0);
vec_u = __lsx_vld(src_u, 0);
vec_v = __lsx_vld(src_v, 0);
vec_y = __lsx_vld(src_y, 0);
vec_u = __lsx_vld(src_u, 0);
vec_v = __lsx_vld(src_v, 0);
vec_yl = __lsx_vilvl_b(vec_y, vec_y);
vec_ul = __lsx_vilvl_b(zero, vec_u);
vec_vl = __lsx_vilvl_b(zero, vec_v);
I444TORGB(vec_yl, vec_ul, vec_vl, vec_ub, vec_vr, vec_ugvg,
vec_yg, vec_yb, out_b, out_g, out_r);
I444TORGB(vec_yl, vec_ul, vec_vl, vec_ub, vec_vr, vec_ugvg, vec_yg, vec_yb,
out_b, out_g, out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
vec_yh = __lsx_vilvh_b(vec_y, vec_y);
vec_uh = __lsx_vilvh_b(zero, vec_u);
vec_vh = __lsx_vilvh_b(zero, vec_v);
I444TORGB(vec_yh, vec_uh, vec_vh, vec_ub, vec_vr, vec_ugvg,
vec_yg, vec_yb, out_b, out_g, out_r);
I444TORGB(vec_yh, vec_uh, vec_vh, vec_ub, vec_vr, vec_ugvg, vec_yg, vec_yb,
out_b, out_g, out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_y += 16;
src_u += 16;
@ -1283,37 +1294,37 @@ void I400ToARGBRow_LSX(const uint8_t* src_y,
for (x = 0; x < len; x++) {
vec_y = __lsx_vld(src_y, 0);
vec_yl = __lsx_vilvl_b(vec_y, vec_y);
y_ev = __lsx_vmulwev_w_hu_h(vec_yl, vec_yg);
y_od = __lsx_vmulwod_w_hu_h(vec_yl, vec_yg);
y_ev = __lsx_vsrai_w(y_ev, 16);
y_od = __lsx_vsrai_w(y_od, 16);
y_ev = __lsx_vadd_w(y_ev, vec_yb);
y_od = __lsx_vadd_w(y_od, vec_yb);
y_ev = __lsx_vsrai_w(y_ev, 6);
y_od = __lsx_vsrai_w(y_od, 6);
y_ev = __lsx_vclip255_w(y_ev);
y_od = __lsx_vclip255_w(y_od);
out0 = __lsx_vpackev_h(y_od, y_ev);
y_ev = __lsx_vmulwev_w_hu_h(vec_yl, vec_yg);
y_od = __lsx_vmulwod_w_hu_h(vec_yl, vec_yg);
y_ev = __lsx_vsrai_w(y_ev, 16);
y_od = __lsx_vsrai_w(y_od, 16);
y_ev = __lsx_vadd_w(y_ev, vec_yb);
y_od = __lsx_vadd_w(y_od, vec_yb);
y_ev = __lsx_vsrai_w(y_ev, 6);
y_od = __lsx_vsrai_w(y_od, 6);
y_ev = __lsx_vclip255_w(y_ev);
y_od = __lsx_vclip255_w(y_od);
out0 = __lsx_vpackev_h(y_od, y_ev);
temp0 = __lsx_vpackev_b(out0, out0);
temp1 = __lsx_vpackev_b(alpha, out0);
dst0 = __lsx_vilvl_h(temp1, temp0);
dst1 = __lsx_vilvh_h(temp1, temp0);
dst0 = __lsx_vilvl_h(temp1, temp0);
dst1 = __lsx_vilvh_h(temp1, temp0);
vec_yh = __lsx_vilvh_b(vec_y, vec_y);
y_ev = __lsx_vmulwev_w_hu_h(vec_yh, vec_yg);
y_od = __lsx_vmulwod_w_hu_h(vec_yh, vec_yg);
y_ev = __lsx_vsrai_w(y_ev, 16);
y_od = __lsx_vsrai_w(y_od, 16);
y_ev = __lsx_vadd_w(y_ev, vec_yb);
y_od = __lsx_vadd_w(y_od, vec_yb);
y_ev = __lsx_vsrai_w(y_ev, 6);
y_od = __lsx_vsrai_w(y_od, 6);
y_ev = __lsx_vclip255_w(y_ev);
y_od = __lsx_vclip255_w(y_od);
out0 = __lsx_vpackev_h(y_od, y_ev);
y_ev = __lsx_vmulwev_w_hu_h(vec_yh, vec_yg);
y_od = __lsx_vmulwod_w_hu_h(vec_yh, vec_yg);
y_ev = __lsx_vsrai_w(y_ev, 16);
y_od = __lsx_vsrai_w(y_od, 16);
y_ev = __lsx_vadd_w(y_ev, vec_yb);
y_od = __lsx_vadd_w(y_od, vec_yb);
y_ev = __lsx_vsrai_w(y_ev, 6);
y_od = __lsx_vsrai_w(y_od, 6);
y_ev = __lsx_vclip255_w(y_ev);
y_od = __lsx_vclip255_w(y_od);
out0 = __lsx_vpackev_h(y_od, y_ev);
temp0 = __lsx_vpackev_b(out0, out0);
temp1 = __lsx_vpackev_b(alpha, out0);
dst2 = __lsx_vilvl_h(temp1, temp0);
dst3 = __lsx_vilvh_h(temp1, temp0);
dst2 = __lsx_vilvl_h(temp1, temp0);
dst3 = __lsx_vilvh_h(temp1, temp0);
__lsx_vst(dst0, dst_argb, 0);
__lsx_vst(dst1, dst_argb, 16);
__lsx_vst(dst2, dst_argb, 32);
@ -1360,7 +1371,7 @@ void YUY2ToARGBRow_LSX(const uint8_t* src_yuy2,
__m128i vec_vrub, vec_vgug;
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i zero = __lsx_vldi(0);
__m128i zero = __lsx_vldi(0);
__m128i alpha = __lsx_vldi(0xFF);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
@ -1369,10 +1380,10 @@ void YUY2ToARGBRow_LSX(const uint8_t* src_yuy2,
for (x = 0; x < len; x++) {
src0 = __lsx_vld(src_yuy2, 0);
vec_y = __lsx_vpickev_b(src0, src0);
vec_y = __lsx_vpickev_b(src0, src0);
vec_vu = __lsx_vpickod_b(src0, src0);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
out_b, out_g, out_r);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_yuy2 += 16;
}
@ -1389,7 +1400,7 @@ void UYVYToARGBRow_LSX(const uint8_t* src_uyvy,
__m128i vec_vrub, vec_vgug;
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i zero = __lsx_vldi(0);
__m128i zero = __lsx_vldi(0);
__m128i alpha = __lsx_vldi(0xFF);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
@ -1398,10 +1409,10 @@ void UYVYToARGBRow_LSX(const uint8_t* src_uyvy,
for (x = 0; x < len; x++) {
src0 = __lsx_vld(src_uyvy, 0);
vec_y = __lsx_vpickod_b(src0, src0);
vec_y = __lsx_vpickod_b(src0, src0);
vec_vu = __lsx_vpickev_b(src0, src0);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
out_b, out_g, out_r);
YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_uyvy += 16;
}
@ -1535,8 +1546,8 @@ void ARGBExtractAlphaRow_LSX(const uint8_t* src_argb,
__m128i src0, src1, src2, src3, tmp0, tmp1, dst0;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32,
src_argb, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
src0, src1, src2, src3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickod_b(src3, src2);
dst0 = __lsx_vpickod_b(tmp1, tmp0);
@ -1562,22 +1573,22 @@ void ARGBBlendRow_LSX(const uint8_t* src_argb,
__m128i control = {0xFF000000FF000000, 0xFF000000FF000000};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16,
src_argb1, 0, src_argb1, 16, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb1, 0, src_argb1, 16,
src0, src1, src2, src3);
tmp0 = __lsx_vshuf4i_b(src0, 0xFF);
tmp1 = __lsx_vshuf4i_b(src1, 0xFF);
a0 = __lsx_vilvl_b(zero, tmp0);
a1 = __lsx_vilvh_b(zero, tmp0);
a2 = __lsx_vilvl_b(zero, tmp1);
a3 = __lsx_vilvh_b(zero, tmp1);
a0 = __lsx_vilvl_b(zero, tmp0);
a1 = __lsx_vilvh_b(zero, tmp0);
a2 = __lsx_vilvl_b(zero, tmp1);
a3 = __lsx_vilvh_b(zero, tmp1);
reg0 = __lsx_vilvl_b(zero, src2);
reg1 = __lsx_vilvh_b(zero, src2);
reg2 = __lsx_vilvl_b(zero, src3);
reg3 = __lsx_vilvh_b(zero, src3);
DUP4_ARG2(__lsx_vsub_h, const_256, a0, const_256, a1, const_256, a2,
const_256, a3, a0, a1, a2, a3);
DUP4_ARG2(__lsx_vmul_h, a0, reg0, a1, reg1, a2, reg2, a3, reg3,
reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vmul_h, a0, reg0, a1, reg1, a2, reg2, a3, reg3, reg0, reg1,
reg2, reg3);
DUP2_ARG3(__lsx_vsrani_b_h, reg1, reg0, 8, reg3, reg2, 8, dst0, dst1);
dst0 = __lsx_vsadd_bu(dst0, src0);
dst1 = __lsx_vsadd_bu(dst1, src1);
@ -1608,8 +1619,8 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
__m128i control = {0xFF000000FF000000, 0xFF000000FF000000};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, dst_argb, 32,
dst_argb, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, dst_argb, 32, dst_argb, 48,
src0, src1, src2, src3);
reg0 = __lsx_vilvl_b(zero, src0);
reg1 = __lsx_vilvh_b(zero, src0);
reg2 = __lsx_vilvl_b(zero, src1);
@ -1652,10 +1663,10 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
dst3 = __lsx_vpickev_b(reg3, reg2);
DUP4_ARG2(__lsx_vmul_b, dst0, vec_size, dst1, vec_size, dst2, vec_size,
dst3, vec_size, dst0, dst1, dst2, dst3);
DUP4_ARG2(__lsx_vadd_b, dst0, vec_offset, dst1, vec_offset, dst2, vec_offset,
dst3, vec_offset, dst0, dst1, dst2, dst3);
DUP4_ARG3(__lsx_vbitsel_v, dst0, src0, control, dst1, src1, control,
dst2, src2, control, dst3, src3, control, dst0, dst1, dst2, dst3);
DUP4_ARG2(__lsx_vadd_b, dst0, vec_offset, dst1, vec_offset, dst2,
vec_offset, dst3, vec_offset, dst0, dst1, dst2, dst3);
DUP4_ARG3(__lsx_vbitsel_v, dst0, src0, control, dst1, src1, control, dst2,
src2, control, dst3, src3, control, dst0, dst1, dst2, dst3);
__lsx_vst(dst0, dst_argb, 0);
__lsx_vst(dst1, dst_argb, 16);
__lsx_vst(dst2, dst_argb, 32);
@ -1684,22 +1695,24 @@ void ARGBColorMatrixRow_LSX(const uint8_t* src_argb,
src0, matrix_a, tmp_b, tmp_g, tmp_r, tmp_a);
DUP4_ARG2(__lsx_vdp2_h_bu_b, src1, matrix_b, src1, matrix_g, src1, matrix_r,
src1, matrix_a, reg_b, reg_g, reg_r, reg_a);
DUP4_ARG2(__lsx_vhaddw_w_h, tmp_b, tmp_b, tmp_g, tmp_g, tmp_r, tmp_r,
tmp_a, tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
DUP4_ARG2(__lsx_vhaddw_w_h, reg_b, reg_b, reg_g, reg_g, reg_r, reg_r,
reg_a, reg_a, reg_b, reg_g, reg_r, reg_a);
DUP4_ARG2(__lsx_vsrai_w, tmp_b, 6, tmp_g, 6, tmp_r, 6,
tmp_a, 6, tmp_b, tmp_g, tmp_r, tmp_a);
DUP4_ARG2(__lsx_vsrai_w, reg_b, 6, reg_g, 6, reg_r, 6,
reg_a, 6, reg_b, reg_g, reg_r, reg_a);
DUP4_ARG1(__lsx_vclip255_w, tmp_b, tmp_g, tmp_r, tmp_a, tmp_b, tmp_g, tmp_r, tmp_a)
DUP4_ARG1(__lsx_vclip255_w, reg_b, reg_g, reg_r, reg_a, reg_b, reg_g, reg_r, reg_a)
DUP4_ARG2(__lsx_vpickev_h, reg_b, tmp_b, reg_g, tmp_g, reg_r, tmp_r,
reg_a, tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
tmp0 = __lsx_vpackev_b(tmp_g, tmp_b);
tmp1 = __lsx_vpackev_b(tmp_a, tmp_r);
dst0 = __lsx_vilvl_h(tmp1, tmp0);
dst1 = __lsx_vilvh_h(tmp1, tmp0);
DUP4_ARG2(__lsx_vhaddw_w_h, tmp_b, tmp_b, tmp_g, tmp_g, tmp_r, tmp_r, tmp_a,
tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
DUP4_ARG2(__lsx_vhaddw_w_h, reg_b, reg_b, reg_g, reg_g, reg_r, reg_r, reg_a,
reg_a, reg_b, reg_g, reg_r, reg_a);
DUP4_ARG2(__lsx_vsrai_w, tmp_b, 6, tmp_g, 6, tmp_r, 6, tmp_a, 6, tmp_b,
tmp_g, tmp_r, tmp_a);
DUP4_ARG2(__lsx_vsrai_w, reg_b, 6, reg_g, 6, reg_r, 6, reg_a, 6, reg_b,
reg_g, reg_r, reg_a);
DUP4_ARG1(__lsx_vclip255_w, tmp_b, tmp_g, tmp_r, tmp_a, tmp_b, tmp_g, tmp_r,
tmp_a)
DUP4_ARG1(__lsx_vclip255_w, reg_b, reg_g, reg_r, reg_a, reg_b, reg_g, reg_r,
reg_a)
DUP4_ARG2(__lsx_vpickev_h, reg_b, tmp_b, reg_g, tmp_g, reg_r, tmp_r, reg_a,
tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
tmp0 = __lsx_vpackev_b(tmp_g, tmp_b);
tmp1 = __lsx_vpackev_b(tmp_a, tmp_r);
dst0 = __lsx_vilvl_h(tmp1, tmp0);
dst1 = __lsx_vilvh_h(tmp1, tmp0);
__lsx_vst(dst0, dst_argb, 0);
__lsx_vst(dst1, dst_argb, 16);
src_argb += 32;
@ -1717,8 +1730,8 @@ void SplitUVRow_LSX(const uint8_t* src_uv,
__m128i dst0, dst1, dst2, dst3;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32,
src_uv, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32, src_uv, 48, src0,
src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, dst0, dst1);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst2, dst3);
__lsx_vst(dst0, dst_u, 0);
@ -1756,10 +1769,10 @@ void MirrorSplitUVRow_LSX(const uint8_t* src_uv,
src_uv += (width << 1);
for (x = 0; x < len; x++) {
src_uv -= 64;
DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32,
src_uv, 48, src2, src3, src0, src1);
DUP4_ARG3(__lsx_vshuf_b, src1, src0, shuff1, src3, src2, shuff1,
src1, src0, shuff0, src3, src2, shuff0, dst0, dst1, dst2, dst3);
DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32, src_uv, 48, src2,
src3, src0, src1);
DUP4_ARG3(__lsx_vshuf_b, src1, src0, shuff1, src3, src2, shuff1, src1, src0,
shuff0, src3, src2, shuff0, dst0, dst1, dst2, dst3);
__lsx_vst(dst0, dst_v, 0);
__lsx_vst(dst1, dst_v, 16);
__lsx_vst(dst2, dst_u, 0);
@ -1778,18 +1791,21 @@ void HalfFloatRow_LSX(const uint16_t* src,
float mult = 1.9259299444e-34f * scale;
__m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
__m128 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
__m128 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
__m128 vec_mult = (__m128)__lsx_vldrepl_w(&mult, 0);
__m128i zero = __lsx_vldi(0);
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vilvl_h, zero, src0, zero, src1, zero, src2,
zero, src3, tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vilvh_h, zero, src0, zero, src1, zero, src2,
zero, src3, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG1(__lsx_vffint_s_wu, tmp0, tmp2, tmp4, tmp6, reg0, reg2, reg4, reg6);
DUP4_ARG1(__lsx_vffint_s_wu, tmp1, tmp3, tmp5, tmp7, reg1, reg3, reg5, reg7);
DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src1, src2,
src3);
DUP4_ARG2(__lsx_vilvl_h, zero, src0, zero, src1, zero, src2, zero, src3,
tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vilvh_h, zero, src0, zero, src1, zero, src2, zero, src3,
tmp1, tmp3, tmp5, tmp7);
DUP4_ARG1(__lsx_vffint_s_wu, tmp0, tmp2, tmp4, tmp6, reg0, reg2, reg4,
reg6);
DUP4_ARG1(__lsx_vffint_s_wu, tmp1, tmp3, tmp5, tmp7, reg1, reg3, reg5,
reg7);
DUP4_ARG2(__lsx_vfmul_s, reg0, vec_mult, reg1, vec_mult, reg2, vec_mult,
reg3, vec_mult, reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vfmul_s, reg4, vec_mult, reg5, vec_mult, reg6, vec_mult,
@ -1798,8 +1814,8 @@ void HalfFloatRow_LSX(const uint16_t* src,
(v4u32)reg3, 13, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vsrli_w, (v4u32)reg4, 13, (v4u32)reg5, 13, (v4u32)reg6, 13,
(v4u32)reg7, 13, tmp4, tmp5, tmp6, tmp7);
DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4,
tmp7, tmp6, dst0, dst1, dst2, dst3);
DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6,
dst0, dst1, dst2, dst3);
__lsx_vst(dst0, dst, 0);
__lsx_vst(dst1, dst, 16);
__lsx_vst(dst2, dst, 32);

View File

@ -616,13 +616,13 @@ void DetileRow_NEON(const uint8_t* src,
"1: \n"
"ld1 {v0.16b}, [%0], %3 \n" // load 16 bytes
"subs %w2, %w2, #16 \n" // 16 processed per loop
"prfm pldl1keep, [%0, 448] \n"
"prfm pldl1keep, [%0, 1792] \n" // 7 tiles of 256b ahead
"st1 {v0.16b}, [%1], #16 \n" // store 16 bytes
"b.gt 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
: "r"(src_tile_stride) // %3
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
: "r"(src_tile_stride) // %3
: "cc", "memory", "v0" // Clobber List
);
}

View File

@ -631,7 +631,8 @@ static void ScaleYUVToARGBBilinearUp(int src_width,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
(kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(src_width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;

View File

@ -22,15 +22,15 @@ namespace libyuv {
extern "C" {
#endif
#define LOAD_DATA(_src, _in, _out) \
{ \
int _tmp1, _tmp2, _tmp3, _tmp4; \
DUP4_ARG2(__lsx_vpickve2gr_w, _in, 0, _in, 1, _in, 2, \
_in, 3, _tmp1, _tmp2, _tmp3, _tmp4); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp1], 0); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp2], 1); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp3], 2); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp4], 3); \
#define LOAD_DATA(_src, _in, _out) \
{ \
int _tmp1, _tmp2, _tmp3, _tmp4; \
DUP4_ARG2(__lsx_vpickve2gr_w, _in, 0, _in, 1, _in, 2, _in, 3, _tmp1, \
_tmp2, _tmp3, _tmp4); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp1], 0); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp2], 1); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp3], 2); \
_out = __lsx_vinsgr2vr_w(_out, _src[_tmp4], 3); \
}
void ScaleARGBRowDown2_LSX(const uint8_t* src_argb,
@ -157,8 +157,8 @@ void ScaleARGBRowDownEvenBox_LSX(const uint8_t* src_argb,
next_argb += stepx;
tmp7 = __lsx_vldrepl_d(next_argb, 0);
next_argb += stepx;
DUP4_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4,
tmp7, tmp6, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6,
src0, src1, src2, src3);
DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2);
DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3);
DUP2_ARG2(__lsx_vpackev_w, tmp1, tmp0, tmp3, tmp2, reg0, reg1);
@ -181,8 +181,8 @@ void ScaleRowDown2_LSX(const uint8_t* src_ptr,
(void)src_stride;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst0, dst1);
__lsx_vst(dst0, dst, 0);
__lsx_vst(dst1, dst, 16);
@ -201,9 +201,9 @@ void ScaleRowDown2Linear_LSX(const uint8_t* src_ptr,
__m128i tmp0, tmp1, tmp2, tmp3, dst0, dst1;
(void)src_stride;
for(x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
48, src0, src1, src2, src3);
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3);
DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp1, tmp2, tmp3, dst0, dst1);
@ -220,20 +220,20 @@ void ScaleRowDown2Box_LSX(const uint8_t* src_ptr,
int dst_width) {
int x;
int len = dst_width / 32;
const uint8_t *src_nex = src_ptr + src_stride;
const uint8_t* src_nex = src_ptr + src_stride;
__m128i src0, src1, src2, src3, src4, src5, src6, src7;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
__m128i dst0, dst1;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex,
48, src4, src5, src6, src7);
DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex, 48,
src4, src5, src6, src7);
DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3,
src7, tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3,
src7, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
tmp0, tmp1, tmp2, tmp3);
DUP2_ARG3(__lsx_vsrarni_b_h, tmp1, tmp0, 2, tmp3, tmp2, 2, dst0, dst1);
@ -255,8 +255,8 @@ void ScaleRowDown4_LSX(const uint8_t* src_ptr,
(void)src_stride;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp1);
dst0 = __lsx_vpickod_b(tmp1, tmp0);
__lsx_vst(dst0, dst, 0);
@ -279,30 +279,30 @@ void ScaleRowDown4Box_LSX(const uint8_t* src_ptr,
__m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, dst0;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
48, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, ptr1, 0, ptr1, 16, ptr1, 32, ptr1, 48,
src4, src5, src6, src7);
DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, ptr1, 0, ptr1, 16, ptr1, 32, ptr1, 48, src4, src5,
src6, src7);
DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3,
src7, tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3,
src7, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, ptr2, 32, ptr2, 48,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, ptr3, 0, ptr3, 16, ptr3, 32, ptr3, 48,
src4, src5, src6, src7);
DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, ptr2, 32, ptr2, 48, src0, src1,
src2, src3);
DUP4_ARG2(__lsx_vld, ptr3, 0, ptr3, 16, ptr3, 32, ptr3, 48, src4, src5,
src6, src7);
DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3,
src7, tmp0, tmp2, tmp4, tmp6);
DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3,
src7, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vhaddw_wu_hu, reg0, reg0, reg1, reg1, reg2, reg2, reg3, reg3,
reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vhaddw_wu_hu, reg0, reg0, reg1, reg1, reg2, reg2, reg3,
reg3, reg0, reg1, reg2, reg3);
DUP2_ARG3(__lsx_vsrarni_h_w, reg1, reg0, 4, reg3, reg2, 4, tmp0, tmp1);
dst0 = __lsx_vpickev_b(tmp1, tmp0);
__lsx_vst(dst0, dst, 0);
@ -353,8 +353,8 @@ void ScaleRowDown38_2_Box_LSX(const uint8_t* src_ptr,
len = dst_width / 12;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_nex, 0, src_nex,
16, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_nex, 0, src_nex, 16, src0,
src1, src2, src3);
DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2);
DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3);
DUP2_ARG2(__lsx_vpickev_h, tmp2, tmp0, tmp3, tmp1, reg0, reg1);
@ -394,15 +394,15 @@ void ScaleRowDown38_3_Box_LSX(const uint8_t* src_ptr,
len = dst_width / 12;
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, ptr1, 0, ptr1, 16,
src0, src1, src2, src3);
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, ptr1, 0, ptr1, 16, src0, src1,
src2, src3);
DUP2_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, src4, src5);
DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2);
DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3);
DUP2_ARG2(__lsx_vpackev_b, zero, src4, zero, src5, tmp4, tmp6);
DUP2_ARG2(__lsx_vpackod_b, zero, src4, zero, src5, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3,
tmp7, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7,
tmp0, tmp1, tmp2, tmp3);
DUP2_ARG2(__lsx_vpickev_h, tmp2, tmp0, tmp3, tmp1, reg0, reg1);
DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, reg2, reg3);
tmp4 = __lsx_vpickev_w(reg3, reg2);
@ -476,28 +476,28 @@ void ScaleFilterCols_LSX(uint8_t* dst_ptr,
tmp3 = __lsx_vsrai_w(vec_x, 16);
tmp7 = __lsx_vand_v(vec_x, const1);
vec_x = __lsx_vadd_w(vec_x, vec1);
DUP4_ARG2(__lsx_vsrai_w, tmp4, 9, tmp5, 9, tmp6, 9, tmp7, 9,
tmp4, tmp5, tmp6, tmp7);
DUP4_ARG2(__lsx_vsrai_w, tmp4, 9, tmp5, 9, tmp6, 9, tmp7, 9, tmp4, tmp5,
tmp6, tmp7);
LOAD_DATA(src_ptr, tmp0, reg0);
LOAD_DATA(src_ptr, tmp1, reg1);
LOAD_DATA(src_ptr, tmp2, reg2);
LOAD_DATA(src_ptr, tmp3, reg3);
DUP4_ARG2(__lsx_vaddi_wu, tmp0, 1, tmp1, 1, tmp2, 1, tmp3, 1,
tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vaddi_wu, tmp0, 1, tmp1, 1, tmp2, 1, tmp3, 1, tmp0, tmp1,
tmp2, tmp3);
LOAD_DATA(src_ptr, tmp0, reg4);
LOAD_DATA(src_ptr, tmp1, reg5);
LOAD_DATA(src_ptr, tmp2, reg6);
LOAD_DATA(src_ptr, tmp3, reg7);
DUP4_ARG2(__lsx_vsub_w, reg4, reg0, reg5, reg1, reg6, reg2, reg7,
reg3, reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vmul_w, reg4, tmp4, reg5, tmp5, reg6, tmp6, reg7,
tmp7, reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vadd_w, reg4, const2, reg5, const2, reg6, const2,
reg7, const2, reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vsrai_w, reg4, 7, reg5, 7, reg6, 7, reg7, 7,
DUP4_ARG2(__lsx_vsub_w, reg4, reg0, reg5, reg1, reg6, reg2, reg7, reg3,
reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vadd_w, reg0, reg4, reg1, reg5, reg2, reg6, reg3,
reg7, reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vmul_w, reg4, tmp4, reg5, tmp5, reg6, tmp6, reg7, tmp7,
reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vadd_w, reg4, const2, reg5, const2, reg6, const2, reg7,
const2, reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vsrai_w, reg4, 7, reg5, 7, reg6, 7, reg7, 7, reg4, reg5,
reg6, reg7);
DUP4_ARG2(__lsx_vadd_w, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
reg0, reg1, reg2, reg3);
DUP2_ARG2(__lsx_vpickev_h, reg1, reg0, reg3, reg2, tmp0, tmp1);
dst0 = __lsx_vpickev_b(tmp1, tmp0);
__lsx_vst(dst0, dst_ptr, 0);
@ -598,7 +598,8 @@ void ScaleRowDown34_LSX(const uint8_t* src_ptr,
for (x = 0; x < dst_width; x += 48) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0, src2, src1, shuff1, dst0, dst1);
DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0, src2, src1, shuff1, dst0,
dst1);
dst2 = __lsx_vshuf_b(src3, src2, shuff2);
__lsx_vst(dst0, dst, 0);
__lsx_vst(dst1, dst, 16);
@ -644,16 +645,16 @@ void ScaleRowDown34_0_Box_LSX(const uint8_t* src_ptr,
const0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, const1, tmp5, const2, tmp6, const0, tmp7,
const1, src4, src5, src6, src7);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1,
tmp11, const2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1, tmp11,
const2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vsrar_h, src0, shift0, src1, shift1, src2, shift2, src3,
shift0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vsrar_h, src4, shift1, src5, shift2, src6, shift0, src7,
shift1, src4, src5, src6, src7);
DUP4_ARG2(__lsx_vsrar_h, tmp0, shift2, tmp1, shift0, tmp2, shift1, tmp3,
shift2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vslli_h, src0, 1, src1, 1, src2, 1, src3, 1,
tmp5, tmp6, tmp7, tmp8);
DUP4_ARG2(__lsx_vslli_h, src0, 1, src1, 1, src2, 1, src3, 1, tmp5, tmp6,
tmp7, tmp8);
DUP2_ARG2(__lsx_vslli_h, src4, 1, src5, 1, tmp9, tmp10);
DUP4_ARG2(__lsx_vadd_h, src0, tmp5, src1, tmp6, src2, tmp7, src3, tmp8,
src0, src1, src2, src3);
@ -708,8 +709,8 @@ void ScaleRowDown34_1_Box_LSX(const uint8_t* src_ptr,
const0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, const1, tmp5, const2, tmp6, const0, tmp7,
const1, src4, src5, src6, src7);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1,
tmp11, const2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1, tmp11,
const2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vsrar_h, src0, shift0, src1, shift1, src2, shift2, src3,
shift0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vsrar_h, src4, shift1, src5, shift2, src6, shift0, src7,

View File

@ -257,7 +257,8 @@ TEST_F(LibYUVBaseTest, TestLinuxMipsMsa) {
EXPECT_EQ(0, MipsCpuCaps("../../unit_test/testdata/mips.txt"));
EXPECT_EQ(kCpuHasMSA, MipsCpuCaps("../../unit_test/testdata/mips_msa.txt"));
EXPECT_EQ(kCpuHasMSA, MipsCpuCaps("../../unit_test/testdata/mips_loongson2k.txt"));
EXPECT_EQ(kCpuHasMSA,
MipsCpuCaps("../../unit_test/testdata/mips_loongson2k.txt"));
} else {
printf("WARNING: unable to load \"../../unit_test/testdata/mips.txt\"\n");
}

39
unit_test/planar_test.cc Normal file → Executable file
View File

@ -1484,6 +1484,45 @@ TEST_F(LibYUVPlanarTest, TestCopyPlane) {
EXPECT_EQ(0, err);
}
TEST_F(LibYUVPlanarTest, TestDetilePlane) {
int i, j;
// orig is tiled. Allocate enough memory for tiles.
int orig_width = (benchmark_width_ + 15) & ~15;
int orig_height = (benchmark_height_ + 15) & ~15;
int orig_plane_size = orig_width * orig_height;
int y_plane_size = benchmark_width_ * benchmark_height_;
align_buffer_page_end(orig_y, orig_plane_size);
align_buffer_page_end(dst_c, y_plane_size);
align_buffer_page_end(dst_opt, y_plane_size);
MemRandomize(orig_y, orig_plane_size);
memset(dst_c, 0, y_plane_size);
memset(dst_opt, 0, y_plane_size);
// Disable all optimizations.
MaskCpuFlags(disable_cpu_flags_);
for (j = 0; j < benchmark_iterations_; j++) {
DetilePlane(orig_y, orig_width, dst_c, benchmark_width_,
benchmark_width_, benchmark_height_, 16);
}
// Enable optimizations.
MaskCpuFlags(benchmark_cpu_info_);
for (j = 0; j < benchmark_iterations_; j++) {
DetilePlane(orig_y, orig_width, dst_opt, benchmark_width_,
benchmark_width_, benchmark_height_, 16);
}
for (i = 0; i < y_plane_size; ++i) {
EXPECT_EQ(dst_c[i], dst_opt[i]);
}
free_aligned_buffer_page_end(orig_y);
free_aligned_buffer_page_end(dst_c);
free_aligned_buffer_page_end(dst_opt);
}
static int TestMultiply(int width,
int height,
int benchmark_iterations,