mirror of
https://chromium.googlesource.com/libyuv/libyuv
synced 2025-12-06 16:56:55 +08:00
Add MSA optimized TransposeWx8_MSA and TransposeUVWx8_MSA functions
R=fbarchard@google.com BUG=libyuv:634 Performance Gain (vs C vectorized) TransposeWx8_MSA - ~2.7x TransposeWx8_Any_MSA - ~2.1x TransposeUVWx8_MSA - ~2.5x TransposeUVWx8_Any_MSA - ~2.7x Performance Gain (vs C non-vectorized) TransposeWx8_MSA - ~4.6x TransposeWx8_Any_MSA - ~2.9x TransposeUVWx8_MSA - ~4.4x TransposeUVWx8_Any_MSA - ~3.7x Review URL: https://codereview.chromium.org/2553403002 .
This commit is contained in:
parent
b18fd21d3c
commit
6fa5e4eb78
@ -57,7 +57,8 @@ ifeq ($(TARGET_ARCH_ABI),mips)
|
||||
LOCAL_CFLAGS += -DLIBYUV_MSA
|
||||
LOCAL_SRC_FILES += \
|
||||
source/row_msa.cc \
|
||||
source/scale_msa.cc
|
||||
source/scale_msa.cc \
|
||||
source/rotate_msa.cc
|
||||
endif
|
||||
|
||||
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
|
||||
|
||||
1
BUILD.gn
1
BUILD.gn
@ -165,6 +165,7 @@ if (libyuv_use_msa) {
|
||||
# MSA Source Files
|
||||
"source/row_msa.cc",
|
||||
"source/scale_msa.cc",
|
||||
"source/rotate_msa.cc",
|
||||
]
|
||||
|
||||
public_configs = [ ":libyuv_config" ]
|
||||
|
||||
@ -33,6 +33,7 @@ set(ly_source_files
|
||||
${ly_src_dir}/rotate_argb.cc
|
||||
${ly_src_dir}/rotate_common.cc
|
||||
${ly_src_dir}/rotate_mips.cc
|
||||
${ly_src_dir}/rotate_msa.cc
|
||||
${ly_src_dir}/rotate_neon.cc
|
||||
${ly_src_dir}/rotate_neon64.cc
|
||||
${ly_src_dir}/rotate_gcc.cc
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
({ \
|
||||
uint8* psrc_lw_m = (uint8*)(psrc); /* NOLINT */ \
|
||||
uint32 val_m; \
|
||||
asm volatile("lw %[val_m], %[psrc_lw_m] \n\t" \
|
||||
asm volatile("lw %[val_m], %[psrc_lw_m] \n" \
|
||||
: [val_m] "=r"(val_m) \
|
||||
: [psrc_lw_m] "m"(*psrc_lw_m)); \
|
||||
val_m; \
|
||||
@ -31,7 +31,7 @@
|
||||
({ \
|
||||
uint8* psrc_ld_m = (uint8*)(psrc); /* NOLINT */ \
|
||||
uint64 val_m = 0; \
|
||||
asm volatile("ld %[val_m], %[psrc_ld_m] \n\t" \
|
||||
asm volatile("ld %[val_m], %[psrc_ld_m] \n" \
|
||||
: [val_m] "=r"(val_m) \
|
||||
: [psrc_ld_m] "m"(*psrc_ld_m)); \
|
||||
val_m; \
|
||||
@ -50,12 +50,44 @@
|
||||
val_m; \
|
||||
})
|
||||
#endif // (__mips == 64)
|
||||
|
||||
#define SW(val, pdst) \
|
||||
({ \
|
||||
uint8_t* pdst_sw_m = (uint8_t*)(pdst); \
|
||||
uint32_t val_m = (val); \
|
||||
asm volatile("sw %[val_m], %[pdst_sw_m] \n" \
|
||||
\
|
||||
: [pdst_sw_m] "=m"(*pdst_sw_m) \
|
||||
: [val_m] "r"(val_m)); \
|
||||
})
|
||||
|
||||
#if (__mips == 64)
|
||||
#define SD(val, pdst) \
|
||||
({ \
|
||||
uint8_t* pdst_sd_m = (uint8_t*)(pdst); \
|
||||
uint64_t val_m = (val); \
|
||||
asm volatile("sd %[val_m], %[pdst_sd_m] \n" \
|
||||
\
|
||||
: [pdst_sd_m] "=m"(*pdst_sd_m) \
|
||||
: [val_m] "r"(val_m)); \
|
||||
})
|
||||
#else // !(__mips == 64)
|
||||
#define SD(val, pdst) \
|
||||
({ \
|
||||
uint8_t* pdst_sd_m = (uint8_t*)(pdst); \
|
||||
uint32_t val0_m, val1_m; \
|
||||
val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
|
||||
val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
|
||||
SW(val0_m, pdst_sd_m); \
|
||||
SW(val1_m, pdst_sd_m + 4); \
|
||||
})
|
||||
#endif // !(__mips == 64)
|
||||
#else // !(__mips_isa_rev >= 6)
|
||||
#define LW(psrc) \
|
||||
({ \
|
||||
uint8* psrc_lw_m = (uint8*)(psrc); /* NOLINT */ \
|
||||
uint32 val_m; \
|
||||
asm volatile("ulw %[val_m], %[psrc_lw_m] \n\t" \
|
||||
asm volatile("ulw %[val_m], %[psrc_lw_m] \n" \
|
||||
: [val_m] "=r"(val_m) \
|
||||
: [psrc_lw_m] "m"(*psrc_lw_m)); \
|
||||
val_m; \
|
||||
@ -66,7 +98,7 @@
|
||||
({ \
|
||||
uint8* psrc_ld_m = (uint8*)(psrc); /* NOLINT */ \
|
||||
uint64 val_m = 0; \
|
||||
asm volatile("uld %[val_m], %[psrc_ld_m] \n\t" \
|
||||
asm volatile("uld %[val_m], %[psrc_ld_m] \n" \
|
||||
: [val_m] "=r"(val_m) \
|
||||
: [psrc_ld_m] "m"(*psrc_ld_m)); \
|
||||
val_m; \
|
||||
@ -85,6 +117,25 @@
|
||||
val_m; \
|
||||
})
|
||||
#endif // (__mips == 64)
|
||||
|
||||
#define SW(val, pdst) \
|
||||
({ \
|
||||
uint8_t* pdst_sw_m = (uint8_t*)(pdst); \
|
||||
uint32_t val_m = (val); \
|
||||
asm volatile("usw %[val_m], %[pdst_sw_m] \n" \
|
||||
: [pdst_sw_m] "=m"(*pdst_sw_m) \
|
||||
: [val_m] "r"(val_m)); \
|
||||
})
|
||||
|
||||
#define SD(val, pdst) \
|
||||
({ \
|
||||
uint8_t* pdst_sd_m = (uint8_t*)(pdst); \
|
||||
uint32_t val0_m, val1_m; \
|
||||
val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
|
||||
val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
|
||||
SW(val0_m, pdst_sd_m); \
|
||||
SW(val1_m, pdst_sd_m + 4); \
|
||||
})
|
||||
#endif // (__mips_isa_rev >= 6)
|
||||
|
||||
// TODO(fbarchard): Consider removing __VAR_ARGS versions.
|
||||
|
||||
@ -60,6 +60,11 @@ extern "C" {
|
||||
#define HAS_TRANSPOSEUVWX8_DSPR2
|
||||
#endif // defined(__mips__)
|
||||
|
||||
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
|
||||
#define HAS_TRANSPOSEWX8_MSA
|
||||
#define HAS_TRANSPOSEUVWX8_MSA
|
||||
#endif
|
||||
|
||||
void TransposeWxH_C(const uint8* src,
|
||||
int src_stride,
|
||||
uint8* dst,
|
||||
@ -97,6 +102,11 @@ void TransposeWx8_Fast_DSPR2(const uint8* src,
|
||||
uint8* dst,
|
||||
int dst_stride,
|
||||
int width);
|
||||
void TransposeWx8_MSA(const uint8* src,
|
||||
int src_stride,
|
||||
uint8* dst,
|
||||
int dst_stride,
|
||||
int width);
|
||||
|
||||
void TransposeWx8_Any_NEON(const uint8* src,
|
||||
int src_stride,
|
||||
@ -118,6 +128,11 @@ void TransposeWx8_Any_DSPR2(const uint8* src,
|
||||
uint8* dst,
|
||||
int dst_stride,
|
||||
int width);
|
||||
void TransposeWx8_Any_MSA(const uint8* src,
|
||||
int src_stride,
|
||||
uint8* dst,
|
||||
int dst_stride,
|
||||
int width);
|
||||
|
||||
void TransposeUVWxH_C(const uint8* src,
|
||||
int src_stride,
|
||||
@ -156,6 +171,13 @@ void TransposeUVWx8_DSPR2(const uint8* src,
|
||||
uint8* dst_b,
|
||||
int dst_stride_b,
|
||||
int width);
|
||||
void TransposeUVWx8_MSA(const uint8* src,
|
||||
int src_stride,
|
||||
uint8* dst_a,
|
||||
int dst_stride_a,
|
||||
uint8* dst_b,
|
||||
int dst_stride_b,
|
||||
int width);
|
||||
|
||||
void TransposeUVWx8_Any_SSE2(const uint8* src,
|
||||
int src_stride,
|
||||
@ -178,6 +200,13 @@ void TransposeUVWx8_Any_DSPR2(const uint8* src,
|
||||
uint8* dst_b,
|
||||
int dst_stride_b,
|
||||
int width);
|
||||
void TransposeUVWx8_Any_MSA(const uint8* src,
|
||||
int src_stride,
|
||||
uint8* dst_a,
|
||||
int dst_stride_a,
|
||||
uint8* dst_b,
|
||||
int dst_stride_b,
|
||||
int width);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
||||
@ -55,6 +55,7 @@
|
||||
'source/rotate_common.cc',
|
||||
'source/rotate_gcc.cc',
|
||||
'source/rotate_mips.cc',
|
||||
'source/rotate_msa.cc',
|
||||
'source/rotate_neon.cc',
|
||||
'source/rotate_neon64.cc',
|
||||
'source/rotate_win.cc',
|
||||
|
||||
@ -62,6 +62,14 @@ void TransposePlane(const uint8* src,
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if defined(HAS_TRANSPOSEWX8_MSA)
|
||||
if (TestCpuFlag(kCpuHasMSA)) {
|
||||
TransposeWx8 = TransposeWx8_Any_MSA;
|
||||
if (IS_ALIGNED(width, 16)) {
|
||||
TransposeWx8 = TransposeWx8_MSA;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Work across the source in 8x8 tiles
|
||||
while (i >= 8) {
|
||||
@ -232,6 +240,14 @@ void TransposeUV(const uint8* src,
|
||||
TransposeUVWx8 = TransposeUVWx8_DSPR2;
|
||||
}
|
||||
#endif
|
||||
#if defined(HAS_TRANSPOSEUVWX8_MSA)
|
||||
if (TestCpuFlag(kCpuHasMSA)) {
|
||||
TransposeUVWx8 = TransposeUVWx8_Any_MSA;
|
||||
if (IS_ALIGNED(width, 8)) {
|
||||
TransposeUVWx8 = TransposeUVWx8_MSA;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Work through the source in 8x8 tiles.
|
||||
while (i >= 8) {
|
||||
|
||||
@ -41,6 +41,9 @@ TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15)
|
||||
#ifdef HAS_TRANSPOSEWX8_DSPR2
|
||||
TANY(TransposeWx8_Any_DSPR2, TransposeWx8_DSPR2, 7)
|
||||
#endif
|
||||
#ifdef HAS_TRANSPOSEWX8_MSA
|
||||
TANY(TransposeWx8_Any_MSA, TransposeWx8_MSA, 15)
|
||||
#endif
|
||||
#undef TANY
|
||||
|
||||
#define TUVANY(NAMEANY, TPOS_SIMD, MASK) \
|
||||
@ -64,6 +67,9 @@ TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7)
|
||||
#ifdef HAS_TRANSPOSEUVWX8_DSPR2
|
||||
TUVANY(TransposeUVWx8_Any_DSPR2, TransposeUVWx8_DSPR2, 7)
|
||||
#endif
|
||||
#ifdef HAS_TRANSPOSEUVWX8_MSA
|
||||
TUVANY(TransposeUVWx8_Any_MSA, TransposeUVWx8_MSA, 7)
|
||||
#endif
|
||||
#undef TUVANY
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
203
source/rotate_msa.cc
Normal file
203
source/rotate_msa.cc
Normal file
@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "libyuv/rotate_row.h"
|
||||
|
||||
// This module is for GCC MSA
|
||||
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
|
||||
#include "libyuv/macros_msa.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace libyuv {
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void TransposeWx8_MSA(const uint8_t* src,
|
||||
int src_stride,
|
||||
uint8_t* dst,
|
||||
int dst_stride,
|
||||
int width) {
|
||||
int x;
|
||||
uint64_t val0, val1, val2, val3;
|
||||
v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
|
||||
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
|
||||
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
|
||||
v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
|
||||
|
||||
for (x = 0; x < width; x += 16) {
|
||||
src0 = (v16u8)__msa_ld_b((v16i8*)src, 0);
|
||||
src1 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride), 0);
|
||||
src2 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 2), 0);
|
||||
src3 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 3), 0);
|
||||
src4 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 4), 0);
|
||||
src5 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 5), 0);
|
||||
src6 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 6), 0);
|
||||
src7 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 7), 0);
|
||||
vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
|
||||
vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
|
||||
vec2 = (v16u8)__msa_ilvr_b((v16i8)src6, (v16i8)src4);
|
||||
vec3 = (v16u8)__msa_ilvr_b((v16i8)src7, (v16i8)src5);
|
||||
vec4 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
|
||||
vec5 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
|
||||
vec6 = (v16u8)__msa_ilvl_b((v16i8)src6, (v16i8)src4);
|
||||
vec7 = (v16u8)__msa_ilvl_b((v16i8)src7, (v16i8)src5);
|
||||
reg0 = (v16u8)__msa_ilvr_b((v16i8)vec1, (v16i8)vec0);
|
||||
reg1 = (v16u8)__msa_ilvl_b((v16i8)vec1, (v16i8)vec0);
|
||||
reg2 = (v16u8)__msa_ilvr_b((v16i8)vec3, (v16i8)vec2);
|
||||
reg3 = (v16u8)__msa_ilvl_b((v16i8)vec3, (v16i8)vec2);
|
||||
reg4 = (v16u8)__msa_ilvr_b((v16i8)vec5, (v16i8)vec4);
|
||||
reg5 = (v16u8)__msa_ilvl_b((v16i8)vec5, (v16i8)vec4);
|
||||
reg6 = (v16u8)__msa_ilvr_b((v16i8)vec7, (v16i8)vec6);
|
||||
reg7 = (v16u8)__msa_ilvl_b((v16i8)vec7, (v16i8)vec6);
|
||||
dst0 = (v16u8)__msa_ilvr_w((v4i32)reg2, (v4i32)reg0);
|
||||
dst1 = (v16u8)__msa_ilvl_w((v4i32)reg2, (v4i32)reg0);
|
||||
dst2 = (v16u8)__msa_ilvr_w((v4i32)reg3, (v4i32)reg1);
|
||||
dst3 = (v16u8)__msa_ilvl_w((v4i32)reg3, (v4i32)reg1);
|
||||
dst4 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg4);
|
||||
dst5 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg4);
|
||||
dst6 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg5);
|
||||
dst7 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg5);
|
||||
val0 = __msa_copy_s_d((v2i64)dst0, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst0, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst1, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst1, 1);
|
||||
SD(val0, dst);
|
||||
SD(val1, dst + dst_stride);
|
||||
SD(val2, dst + dst_stride * 2);
|
||||
SD(val3, dst + dst_stride * 3);
|
||||
dst += dst_stride * 4;
|
||||
val0 = __msa_copy_s_d((v2i64)dst2, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst2, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst3, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst3, 1);
|
||||
SD(val0, dst);
|
||||
SD(val1, dst + dst_stride);
|
||||
SD(val2, dst + dst_stride * 2);
|
||||
SD(val3, dst + dst_stride * 3);
|
||||
dst += dst_stride * 4;
|
||||
val0 = __msa_copy_s_d((v2i64)dst4, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst4, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst5, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst5, 1);
|
||||
SD(val0, dst);
|
||||
SD(val1, dst + dst_stride);
|
||||
SD(val2, dst + dst_stride * 2);
|
||||
SD(val3, dst + dst_stride * 3);
|
||||
dst += dst_stride * 4;
|
||||
val0 = __msa_copy_s_d((v2i64)dst6, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst6, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst7, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst7, 1);
|
||||
SD(val0, dst);
|
||||
SD(val1, dst + dst_stride);
|
||||
SD(val2, dst + dst_stride * 2);
|
||||
SD(val3, dst + dst_stride * 3);
|
||||
dst += dst_stride * 4;
|
||||
src += 16;
|
||||
}
|
||||
}
|
||||
|
||||
void TransposeUVWx8_MSA(const uint8_t* src,
|
||||
int src_stride,
|
||||
uint8_t* dst_a,
|
||||
int dst_stride_a,
|
||||
uint8_t* dst_b,
|
||||
int dst_stride_b,
|
||||
int width) {
|
||||
int x;
|
||||
uint64_t val0, val1, val2, val3;
|
||||
v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
|
||||
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
|
||||
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
|
||||
v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
|
||||
|
||||
for (x = 0; x < width; x += 8) {
|
||||
src0 = (v16u8)__msa_ld_b((v16i8*)src, 0);
|
||||
src1 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride), 0);
|
||||
src2 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 2), 0);
|
||||
src3 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 3), 0);
|
||||
src4 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 4), 0);
|
||||
src5 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 5), 0);
|
||||
src6 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 6), 0);
|
||||
src7 = (v16u8)__msa_ld_b((v16i8*)(src + src_stride * 7), 0);
|
||||
vec0 = (v16u8)__msa_ilvr_b((v16i8)src1, (v16i8)src0);
|
||||
vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src2);
|
||||
vec2 = (v16u8)__msa_ilvr_b((v16i8)src5, (v16i8)src4);
|
||||
vec3 = (v16u8)__msa_ilvr_b((v16i8)src7, (v16i8)src6);
|
||||
vec4 = (v16u8)__msa_ilvl_b((v16i8)src1, (v16i8)src0);
|
||||
vec5 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src2);
|
||||
vec6 = (v16u8)__msa_ilvl_b((v16i8)src5, (v16i8)src4);
|
||||
vec7 = (v16u8)__msa_ilvl_b((v16i8)src7, (v16i8)src6);
|
||||
reg0 = (v16u8)__msa_ilvr_h((v8i16)vec1, (v8i16)vec0);
|
||||
reg1 = (v16u8)__msa_ilvr_h((v8i16)vec3, (v8i16)vec2);
|
||||
reg2 = (v16u8)__msa_ilvl_h((v8i16)vec1, (v8i16)vec0);
|
||||
reg3 = (v16u8)__msa_ilvl_h((v8i16)vec3, (v8i16)vec2);
|
||||
reg4 = (v16u8)__msa_ilvr_h((v8i16)vec5, (v8i16)vec4);
|
||||
reg5 = (v16u8)__msa_ilvr_h((v8i16)vec7, (v8i16)vec6);
|
||||
reg6 = (v16u8)__msa_ilvl_h((v8i16)vec5, (v8i16)vec5);
|
||||
reg7 = (v16u8)__msa_ilvl_h((v8i16)vec7, (v8i16)vec6);
|
||||
dst0 = (v16u8)__msa_ilvr_w((v4i32)reg1, (v4i32)reg0);
|
||||
dst1 = (v16u8)__msa_ilvl_w((v4i32)reg1, (v4i32)reg0);
|
||||
dst2 = (v16u8)__msa_ilvr_w((v4i32)reg3, (v4i32)reg2);
|
||||
dst3 = (v16u8)__msa_ilvl_w((v4i32)reg3, (v4i32)reg2);
|
||||
dst4 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg4);
|
||||
dst5 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg4);
|
||||
dst6 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg6);
|
||||
dst7 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg6);
|
||||
val0 = __msa_copy_s_d((v2i64)dst0, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst0, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst1, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst1, 1);
|
||||
SD(val0, dst_a);
|
||||
SD(val2, dst_a + dst_stride_a);
|
||||
SD(val1, dst_b);
|
||||
SD(val3, dst_b + dst_stride_b);
|
||||
dst_a += dst_stride_a * 2;
|
||||
dst_b += dst_stride_b * 2;
|
||||
val0 = __msa_copy_s_d((v2i64)dst2, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst2, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst3, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst3, 1);
|
||||
SD(val0, dst_a);
|
||||
SD(val2, dst_a + dst_stride_a);
|
||||
SD(val1, dst_b);
|
||||
SD(val3, dst_b + dst_stride_b);
|
||||
dst_a += dst_stride_a * 2;
|
||||
dst_b += dst_stride_b * 2;
|
||||
val0 = __msa_copy_s_d((v2i64)dst4, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst4, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst5, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst5, 1);
|
||||
SD(val0, dst_a);
|
||||
SD(val2, dst_a + dst_stride_a);
|
||||
SD(val1, dst_b);
|
||||
SD(val3, dst_b + dst_stride_b);
|
||||
dst_a += dst_stride_a * 2;
|
||||
dst_b += dst_stride_b * 2;
|
||||
val0 = __msa_copy_s_d((v2i64)dst6, 0);
|
||||
val1 = __msa_copy_s_d((v2i64)dst6, 1);
|
||||
val2 = __msa_copy_s_d((v2i64)dst7, 0);
|
||||
val3 = __msa_copy_s_d((v2i64)dst7, 1);
|
||||
SD(val0, dst_a);
|
||||
SD(val2, dst_a + dst_stride_a);
|
||||
SD(val1, dst_b);
|
||||
SD(val3, dst_b + dst_stride_b);
|
||||
dst_a += dst_stride_a * 2;
|
||||
dst_b += dst_stride_b * 2;
|
||||
src += 16;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
} // namespace libyuv
|
||||
#endif
|
||||
|
||||
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
|
||||
Loading…
x
Reference in New Issue
Block a user