Mirror source to continuous destination with Neon

BUG=none
TEST=none
Review URL: https://webrtc-codereview.appspot.com/937020

git-svn-id: http://libyuv.googlecode.com/svn/trunk@488 16f28f9a-4ce2-e073-06de-1de4eb20be90
This commit is contained in:
fbarchard@google.com 2012-11-14 02:03:49 +00:00
parent fdec4be353
commit 3e46444727
15 changed files with 535 additions and 589 deletions

View File

@ -1,6 +1,6 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 486
Version: 488
License: BSD
License File: LICENSE

View File

@ -188,7 +188,7 @@ extern "C" {
#define HAS_I444TOARGBROW_NEON
#define HAS_MERGEUV_NEON
#define HAS_MIRRORROW_NEON
#define HAS_MirrorUVRow_NEON
#define HAS_MIRRORUVROW_NEON
#define HAS_NV12TOARGBROW_NEON
#define HAS_NV12TORGB565ROW_NEON
#define HAS_NV21TOARGBROW_NEON
@ -216,6 +216,7 @@ extern "C" {
#define HAS_YUY2TOUV422ROW_NEON
#define HAS_YUY2TOUVROW_NEON
#define HAS_YUY2TOYROW_NEON
#define HAS_ARGBMIRRORROW_NEON
#endif
// The following are available on Mips platforms
@ -434,7 +435,7 @@ void BGRAToUVRow_Any_SSSE3(const uint8* src_bgra, int src_stride_bgra,
void ABGRToUVRow_Any_SSSE3(const uint8* src_abgr, int src_stride_abgr,
uint8* dst_u, uint8* dst_v, int width);
void RGBAToUVRow_Any_SSSE3(const uint8* src_rgba, int src_stride_rgba,
uint8* dst_u, uint8* dst_v, int width);
uint8* dst_u, uint8* dst_v, int width);
void ARGBToUV444Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
int pix);
void ARGBToUV422Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
@ -498,18 +499,19 @@ void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width);
void MirrorRow_SSE2(const uint8* src, uint8* dst, int width);
void MirrorRow_NEON(const uint8* src, uint8* dst, int width);
void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width);
void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void MirrorRow_C(const uint8* src, uint8* dst, int width);
void MirrorUVRow_SSSE3(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void ARGBMirrorRow_SSSE3(const uint8* src, uint8* dst, int width);
void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width);
void ARGBMirrorRow_C(const uint8* src, uint8* dst, int width);
void SplitUV_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix);

View File

@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 486
#define LIBYUV_VERSION 488
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT

View File

@ -156,7 +156,6 @@ enum FourCCBpp {
FOURCC_BPP_ANY = 0, // 0 means unknown.
};
// Converts fourcc aliases into canonical ones.
LIBYUV_API uint32 CanonicalFourCC(uint32 fourcc);

View File

@ -111,14 +111,15 @@ void MirrorPlane(const uint8* src_y, int src_stride_y,
#if defined(HAS_MIRRORROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 16)) {
MirrorRow = MirrorRow_SSE2;
}
#endif
#if defined(HAS_MIRRORROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) &&
IS_ALIGNED(src_y, 16) && IS_ALIGNED(src_stride_y, 16)) {
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) &&
IS_ALIGNED(src_y, 16) && IS_ALIGNED(src_stride_y, 16) &&
IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
MirrorRow = MirrorRow_SSSE3;
}
#endif
}
#endif
// Mirror plane
for (int y = 0; y < height; ++y) {
@ -330,6 +331,10 @@ int ARGBMirror(const uint8* src_argb, int src_stride_argb,
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBMirrorRow = ARGBMirrorRow_SSSE3;
}
#elif defined(HAS_ARGBMIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 4)) {
ARGBMirrorRow = ARGBMirrorRow_NEON;
}
#endif
// Mirror plane

View File

@ -864,21 +864,19 @@ void RotatePlane180(const uint8* src, int src_stride,
int width, int height) {
void (*MirrorRow)(const uint8* src, uint8* dst, int width) = MirrorRow_C;
#if defined(HAS_MIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) {
MirrorRow = MirrorRow_NEON;
}
#endif
#if defined(HAS_MIRRORROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) &&
IS_ALIGNED(width, 16) &&
if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 16) &&
IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) &&
IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) {
MirrorRow = MirrorRow_SSE2;
}
#endif
#if defined(HAS_MIRRORROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) &&
IS_ALIGNED(width, 16) &&
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) &&
IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) &&
IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) {
MirrorRow = MirrorRow_SSSE3;
@ -1050,13 +1048,12 @@ void RotateUV180(const uint8* src, int src_stride,
int width, int height) {
void (*MirrorRowUV)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) =
MirrorUVRow_C;
#if defined(HAS_MIRRORROW_UV_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
#if defined(HAS_MIRRORUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
MirrorRowUV = MirrorUVRow_NEON;
}
#elif defined(HAS_MIRRORROW_UV_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) &&
IS_ALIGNED(width, 16) &&
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) &&
IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16)) {
MirrorRowUV = MirrorUVRow_SSSE3;
}

View File

@ -39,14 +39,13 @@ static void ARGBTranspose(const uint8* src, int src_stride,
void (*ScaleARGBRowDownEven)(const uint8* src_ptr, int src_stride,
int src_step, uint8* dst_ptr, int dst_width) = ScaleARGBRowDownEven_C;
#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
if (TestCpuFlag(kCpuHasSSE2) &&
IS_ALIGNED(height, 4) && // width of dest.
if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(height, 4) && // Width of dest.
IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
}
#endif
int src_pixel_step = src_stride / 4;
int src_pixel_step = src_stride >> 2;
for (int i = 0; i < width; ++i) { // column of source to row of dest.
ScaleARGBRowDownEven(src, 0, src_pixel_step, dst, height);
dst += dst_stride;
@ -87,6 +86,10 @@ void ARGBRotate180(const uint8* src, int src_stride,
IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) {
ARGBMirrorRow = ARGBMirrorRow_SSSE3;
}
#elif defined(HAS_ARGBMIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 4)) {
ARGBMirrorRow = ARGBMirrorRow_NEON;
}
#endif
void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C;
#if defined(HAS_COPYROW_NEON)

View File

@ -840,15 +840,15 @@ void CopyRow_NEON(const uint8* src, uint8* dst, int count) {
asm volatile (
".p2align 2 \n"
"1: \n"
"vld4.u8 {d0, d1, d2, d3}, [%0]! \n" // load 32
"vld1.u8 {d0, d1, d2, d3}, [%0]! \n" // load 32
"subs %2, %2, #32 \n" // 32 processed per loop
"vst4.u8 {d0, d1, d2, d3}, [%1]! \n" // store 32
"vst1.u8 {d0, d1, d2, d3}, [%1]! \n" // store 32
"bgt 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(count) // %2 // Output registers
: // Input registers
: "memory", "cc", "q0", "q1" // Clobber List
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(count) // %2 // Output registers
: // Input registers
: "memory", "cc", "q0", "q1" // Clobber List
);
}
#endif // HAS_COPYROW_NEON
@ -856,16 +856,17 @@ void CopyRow_NEON(const uint8* src, uint8* dst, int count) {
#ifdef HAS_SETROW_NEON
// SetRow8 writes 'count' bytes using a 32 bit value repeated.
void SetRow8_NEON(uint8* dst, uint32 v32, int count) {
asm volatile ( // NOLINT
asm volatile (
"vdup.u32 q0, %2 \n" // duplicate 4 ints
"1: \n"
"subs %1, %1, #16 \n" // 16 bytes per loop
"vst1.u8 {q0}, [%0]! \n" // store
"bgt 1b \n"
: "+r"(dst), // %0
"+r"(count) // %1
: "r"(v32) // %2
: "q0", "memory", "cc");
: "+r"(dst), // %0
"+r"(count) // %1
: "r"(v32) // %2
: "q0", "memory", "cc"
);
}
// TODO(fbarchard): Make fully assembler
@ -882,138 +883,78 @@ void SetRows32_NEON(uint8* dst, uint32 v32, int width,
#ifdef HAS_MIRRORROW_NEON
void MirrorRow_NEON(const uint8* src, uint8* dst, int width) {
asm volatile (
// compute where to start writing destination
"add %1, %2 \n"
// work on segments that are multiples of 16
"lsrs r3, %2, #4 \n"
// the output is written in two block. 8 bytes followed
// by another 8. reading is done sequentially, from left to
// right. writing is done from right to left in block sizes
// %1, the destination pointer is incremented after writing
// the first of the two blocks. need to subtract that 8 off
// along with 16 to get the next location.
"mov r3, #-24 \n"
"beq 2f \n"
// Start at end of source row.
"mov r3, #-16 \n"
"add %0, %0, %2 \n"
"sub %0, #16 \n"
// back of destination by the size of the register that is
// going to be mirrored
"sub %1, #16 \n"
// the loop needs to run on blocks of 16. what will be left
// over is either a negative number, the residuals that need
// to be done, or 0. If this isn't subtracted off here the
// loop will run one extra time.
"sub %2, #16 \n"
// mirror the bytes in the 64 bit segments. unable to mirror
// the bytes in the entire 128 bits in one go.
// because of the inability to mirror the entire 128 bits
// mirror the writing out of the two 64 bit segments.
".p2align 2 \n"
"1: \n"
"vld1.8 {q0}, [%0]! \n" // src += 16
"subs %2, #16 \n"
"vrev64.8 q0, q0 \n"
"vst1.8 {d1}, [%1]! \n"
"vst1.8 {d0}, [%1], r3 \n" // dst -= 16
"bge 1b \n"
// add 16 back to the counter. if the result is 0 there is no
// residuals so jump past
"adds %2, #16 \n"
"beq 5f \n"
"add %1, #16 \n"
"2: \n"
"mov r3, #-3 \n"
"sub %1, #2 \n"
"subs %2, #2 \n"
// check for 16*n+1 scenarios where segments_of_2 should not
// be run, but there is something left over.
"blt 4f \n"
// do this in neon registers as per
// http://blogs.arm.com/software-enablement/196-coding-for-neon-part-2-dealing-with-leftovers/
"3: \n"
"vld2.8 {d0[0], d1[0]}, [%0]! \n" // src += 2
"subs %2, #2 \n"
"vst1.8 {d1[0]}, [%1]! \n"
"vst1.8 {d0[0]}, [%1], r3 \n" // dst -= 2
"bge 3b \n"
"adds %2, #2 \n"
"beq 5f \n"
"4: \n"
"add %1, #1 \n"
"vld1.8 {d0[0]}, [%0] \n"
"vst1.8 {d0[0]}, [%1] \n"
"5: \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
:
: "memory", "cc", "r3", "q0"
"vld1.8 {q0}, [%0], r3 \n" // src -= 16
"subs %2, #16 \n" // 16 pixels per loop.
"vrev64.8 q0, q0 \n"
"vst1.8 {d1}, [%1]! \n" // dst += 16
"vst1.8 {d0}, [%1]! \n"
"bgt 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
:
: "memory", "cc", "r3", "q0"
);
}
#endif // HAS_MIRRORROW_NEON
#ifdef HAS_MirrorUVRow_NEON
void MirrorUVRow_NEON(const uint8* src, uint8* dst_a, uint8* dst_b, int width) {
#ifdef HAS_MIRRORUVROW_NEON
void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) {
asm volatile (
// compute where to start writing destination
"add %1, %3 \n" // dst_a + width
"add %2, %3 \n" // dst_b + width
// work on input segments that are multiples of 16, but
// width that has been passed is output segments, half
// the size of input.
"lsrs r12, %3, #3 \n"
"beq 2f \n"
// the output is written in to two blocks.
"mov r12, #-8 \n"
// back of destination by the size of the register that is
// going to be mirrord
"sub %1, #8 \n"
"sub %2, #8 \n"
// the loop needs to run on blocks of 8. what will be left
// over is either a negative number, the residuals that need
// to be done, or 0. if this isn't subtracted off here the
// loop will run one extra time.
"sub %3, #8 \n"
// Start at end of source row.
"mov r3, #-16 \n"
"add %0, %0, %3, lsl #1 \n"
"sub %0, #16 \n"
// mirror the bytes in the 64 bit segments
".p2align 2 \n"
"1: \n"
"vld2.8 {d0, d1}, [%0]! \n" // src += 16
"subs %3, #8 \n"
"vrev64.8 q0, q0 \n"
"vst1.8 {d0}, [%1], r12 \n" // dst_a -= 8
"vst1.8 {d1}, [%2], r12 \n" // dst_b -= 8
"bge 1b \n"
// add 8 back to the counter. if the result is 0 there is no
// residuals so return
"adds %3, #8 \n"
"beq 4f \n"
"add %1, #8 \n"
"add %2, #8 \n"
"2: \n"
"mov r12, #-1 \n"
"sub %1, #1 \n"
"sub %2, #1 \n"
"3: \n"
"vld2.8 {d0[0], d1[0]}, [%0]! \n" // src += 2
"subs %3, %3, #1 \n"
"vst1.8 {d0[0]}, [%1], r12 \n" // dst_a -= 1
"vst1.8 {d1[0]}, [%2], r12 \n" // dst_b -= 1
"bgt 3b \n"
"4: \n"
: "+r"(src), // %0
"+r"(dst_a), // %1
"+r"(dst_b), // %2
"+r"(width) // %3
:
: "memory", "cc", "r12", "q0"
"vld2.8 {d0, d1}, [%0], r3 \n" // src -= 16
"subs %3, #8 \n" // 8 pixels per loop.
"vrev64.8 q0, q0 \n"
"vst1.8 {d0}, [%1]! \n" // dst += 8
"vst1.8 {d1}, [%2]! \n"
"bgt 1b \n"
: "+r"(src_uv), // %0
"+r"(dst_u), // %1
"+r"(dst_v), // %2
"+r"(width) // %3
:
: "memory", "cc", "r3", "q0"
);
}
#endif // HAS_MirrorUVRow_NEON
#endif // HAS_MIRRORUVROW_NEON
#ifdef HAS_ARGBMIRRORROW_NEON
void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) {
asm volatile (
// Start at end of source row.
"mov r3, #-16 \n"
"add %0, %0, %2, lsl #2 \n"
"sub %0, #16 \n"
".p2align 2 \n"
"1: \n"
"vld1.8 {q0}, [%0], r3 \n" // src -= 16
"subs %2, #4 \n" // 4 pixels per loop.
"vrev64.32 q0, q0 \n"
"vst1.8 {d1}, [%1]! \n" // dst += 16
"vst1.8 {d0}, [%1]! \n"
"bgt 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
:
: "memory", "cc", "r3", "q0"
);
}
#endif // HAS_ARGBMIRRORROW_NEON
#ifdef HAS_BGRATOARGBROW_NEON
void BGRAToARGBRow_NEON(const uint8* src_bgra, uint8* dst_argb, int pix) {
@ -1421,13 +1362,13 @@ void HalfRow_NEON(const uint8* src_uv, int src_uv_stride,
"vrhadd.u8 q0, q1 \n" // average row 1 and 2
"vst1.u8 {q0}, [%2]! \n"
"bgt 1b \n"
: "+r"(src_uv), // %0
"+r"(src_uv_stride), // %1
"+r"(dst_uv), // %2
"+r"(pix) // %3
:
: "memory", "cc", "q0", "q1" // Clobber List
);
: "+r"(src_uv), // %0
"+r"(src_uv_stride), // %1
"+r"(dst_uv), // %2
"+r"(pix) // %3
:
: "memory", "cc", "q0", "q1" // Clobber List
);
}
// Select 2 channels from ARGB on alternating pixels. e.g. BGBGBGBG
@ -1441,13 +1382,13 @@ void ARGBToBayerRow_NEON(const uint8* src_argb,
"vtbl.8 d3, {d0, d1}, d2 \n" // look up 4 pixels
"vst1.u32 {d3[0]}, [%1]! \n" // store 4.
"bgt 1b \n"
: "+r"(src_argb), // %0
"+r"(dst_bayer), // %1
"+r"(selector), // %2
"+r"(pix) // %3
:
: "memory", "cc", "q0", "q1" // Clobber List
);
: "+r"(src_argb), // %0
"+r"(dst_bayer), // %1
"+r"(selector), // %2
"+r"(pix) // %3
:
: "memory", "cc", "q0", "q1" // Clobber List
);
}
void I422ToYUY2Row_NEON(const uint8* src_y,
@ -1463,13 +1404,13 @@ void I422ToYUY2Row_NEON(const uint8* src_y,
"subs %4, %4, #16 \n" // 16 pixels
"vst4.u8 {d0, d1, d2, d3}, [%3]! \n" // Store 8 YUY2/16 pixels.
"bgt 1b \n"
: "+r"(src_y), // %0
"+r"(src_u), // %1
"+r"(src_v), // %2
"+r"(dst_yuy2), // %3
"+r"(width) // %4
:
: "cc", "memory", "d0", "d1", "d2", "d3"
: "+r"(src_y), // %0
"+r"(src_u), // %1
"+r"(src_v), // %2
"+r"(dst_yuy2), // %3
"+r"(width) // %4
:
: "cc", "memory", "d0", "d1", "d2", "d3"
);
}
@ -1486,13 +1427,13 @@ void I422ToUYVYRow_NEON(const uint8* src_y,
"subs %4, %4, #16 \n" // 16 pixels
"vst4.u8 {d0, d1, d2, d3}, [%3]! \n" // Store 8 UYVY/16 pixels.
"bgt 1b \n"
: "+r"(src_y), // %0
"+r"(src_u), // %1
"+r"(src_v), // %2
"+r"(dst_uyvy), // %3
"+r"(width) // %4
:
: "cc", "memory", "d0", "d1", "d2", "d3"
: "+r"(src_y), // %0
"+r"(src_u), // %1
"+r"(src_v), // %2
"+r"(dst_uyvy), // %3
"+r"(width) // %4
:
: "cc", "memory", "d0", "d1", "d2", "d3"
);
}

View File

@ -32,7 +32,7 @@ static uint32 ReferenceHashDjb2(const uint8* src, uint64 count, uint32 seed) {
TEST_F(libyuvTest, BenchmakDjb2_OPT) {
const int kMaxTest = benchmark_width_ * benchmark_height_;
align_buffer_16(src_a, kMaxTest)
align_buffer_64(src_a, kMaxTest)
for (int i = 0; i < kMaxTest; ++i) {
src_a[i] = i;
@ -43,12 +43,12 @@ TEST_F(libyuvTest, BenchmakDjb2_OPT) {
h1 = HashDjb2(src_a, kMaxTest, 5381);
}
EXPECT_EQ(h1, h2);
free_aligned_buffer_16(src_a)
free_aligned_buffer_64(src_a)
}
TEST_F(libyuvTest, BenchmakDjb2_Unaligned_OPT) {
const int kMaxTest = benchmark_width_ * benchmark_height_;
align_buffer_16(src_a, kMaxTest + 1)
align_buffer_64(src_a, kMaxTest + 1)
for (int i = 0; i < kMaxTest; ++i) {
src_a[i + 1] = i;
}
@ -58,13 +58,13 @@ TEST_F(libyuvTest, BenchmakDjb2_Unaligned_OPT) {
h1 = HashDjb2(src_a + 1, kMaxTest, 5381);
}
EXPECT_EQ(h1, h2);
free_aligned_buffer_16(src_a)
free_aligned_buffer_64(src_a)
}
TEST_F(libyuvTest, BenchmarkSumSquareError_OPT) {
const int kMaxWidth = 4096 * 3;
align_buffer_16(src_a, kMaxWidth)
align_buffer_16(src_b, kMaxWidth)
align_buffer_64(src_a, kMaxWidth)
align_buffer_64(src_b, kMaxWidth)
memset(src_a, 0, kMaxWidth);
memset(src_b, 0, kMaxWidth);
@ -88,14 +88,14 @@ TEST_F(libyuvTest, BenchmarkSumSquareError_OPT) {
EXPECT_EQ(0, h1);
free_aligned_buffer_16(src_a)
free_aligned_buffer_16(src_b)
free_aligned_buffer_64(src_a)
free_aligned_buffer_64(src_b)
}
TEST_F(libyuvTest, SumSquareError) {
const int kMaxWidth = 4096 * 3;
align_buffer_16(src_a, kMaxWidth)
align_buffer_16(src_b, kMaxWidth)
align_buffer_64(src_a, kMaxWidth)
align_buffer_64(src_b, kMaxWidth)
memset(src_a, 0, kMaxWidth);
memset(src_b, 0, kMaxWidth);
@ -130,13 +130,13 @@ TEST_F(libyuvTest, SumSquareError) {
EXPECT_EQ(c_err, opt_err);
free_aligned_buffer_16(src_a)
free_aligned_buffer_16(src_b)
free_aligned_buffer_64(src_a)
free_aligned_buffer_64(src_b)
}
TEST_F(libyuvTest, BenchmarkPsnr_OPT) {
align_buffer_16(src_a, benchmark_width_ * benchmark_height_)
align_buffer_16(src_b, benchmark_width_ * benchmark_height_)
align_buffer_64(src_a, benchmark_width_ * benchmark_height_)
align_buffer_64(src_b, benchmark_width_ * benchmark_height_)
for (int i = 0; i < benchmark_width_ * benchmark_height_; ++i) {
src_a[i] = i;
src_b[i] = i;
@ -155,8 +155,8 @@ TEST_F(libyuvTest, BenchmarkPsnr_OPT) {
EXPECT_EQ(0, 0);
free_aligned_buffer_16(src_a)
free_aligned_buffer_16(src_b)
free_aligned_buffer_64(src_a)
free_aligned_buffer_64(src_b)
}
TEST_F(libyuvTest, Psnr) {
@ -165,8 +165,8 @@ TEST_F(libyuvTest, Psnr) {
const int b = 128;
const int kSrcPlaneSize = (kSrcWidth + b * 2) * (kSrcHeight + b * 2);
const int kSrcStride = 2 * b + kSrcWidth;
align_buffer_16(src_a, kSrcPlaneSize)
align_buffer_16(src_b, kSrcPlaneSize)
align_buffer_64(src_a, kSrcPlaneSize)
align_buffer_64(src_b, kSrcPlaneSize)
memset(src_a, 0, kSrcPlaneSize);
memset(src_b, 0, kSrcPlaneSize);
@ -232,13 +232,13 @@ TEST_F(libyuvTest, Psnr) {
EXPECT_EQ(opt_err, c_err);
free_aligned_buffer_16(src_a)
free_aligned_buffer_16(src_b)
free_aligned_buffer_64(src_a)
free_aligned_buffer_64(src_b)
}
TEST_F(libyuvTest, BenchmarkSsim_OPT) {
align_buffer_16(src_a, benchmark_width_ * benchmark_height_)
align_buffer_16(src_b, benchmark_width_ * benchmark_height_)
align_buffer_64(src_a, benchmark_width_ * benchmark_height_)
align_buffer_64(src_b, benchmark_width_ * benchmark_height_)
for (int i = 0; i < benchmark_width_ * benchmark_height_; ++i) {
src_a[i] = i;
src_b[i] = i;
@ -257,8 +257,8 @@ TEST_F(libyuvTest, BenchmarkSsim_OPT) {
EXPECT_EQ(0, 0); // Pass if we get this far.
free_aligned_buffer_16(src_a)
free_aligned_buffer_16(src_b)
free_aligned_buffer_64(src_a)
free_aligned_buffer_64(src_b)
}
TEST_F(libyuvTest, Ssim) {
@ -267,8 +267,8 @@ TEST_F(libyuvTest, Ssim) {
const int b = 128;
const int kSrcPlaneSize = (kSrcWidth + b * 2) * (kSrcHeight + b * 2);
const int kSrcStride = 2 * b + kSrcWidth;
align_buffer_16(src_a, kSrcPlaneSize)
align_buffer_16(src_b, kSrcPlaneSize)
align_buffer_64(src_a, kSrcPlaneSize)
align_buffer_64(src_b, kSrcPlaneSize)
memset(src_a, 0, kSrcPlaneSize);
memset(src_b, 0, kSrcPlaneSize);
@ -330,8 +330,8 @@ TEST_F(libyuvTest, Ssim) {
EXPECT_EQ(opt_err, c_err);
free_aligned_buffer_16(src_a)
free_aligned_buffer_16(src_b)
free_aligned_buffer_64(src_a)
free_aligned_buffer_64(src_b)
}
} // namespace libyuv

View File

@ -35,17 +35,17 @@ namespace libyuv {
TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
const int kWidth = W1280; \
const int kHeight = benchmark_height_; \
align_buffer_16(src_y, kWidth * kHeight + OFF); \
align_buffer_16(src_u, \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
align_buffer_64(src_u, \
kWidth / SRC_SUBSAMP_X * kHeight / SRC_SUBSAMP_Y + OFF); \
align_buffer_16(src_v, \
align_buffer_64(src_v, \
kWidth / SRC_SUBSAMP_X * kHeight / SRC_SUBSAMP_Y + OFF); \
align_buffer_16(dst_y_c, kWidth * kHeight); \
align_buffer_16(dst_u_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_v_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_y_opt, kWidth * kHeight); \
align_buffer_16(dst_u_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_v_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_c, kWidth * kHeight); \
align_buffer_64(dst_u_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_v_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_opt, kWidth * kHeight); \
align_buffer_64(dst_u_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_v_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
srandom(time(NULL)); \
for (int i = 0; i < kHeight; ++i) \
for (int j = 0; j < kWidth; ++j) \
@ -108,15 +108,15 @@ TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
} \
} \
EXPECT_LE(max_diff, 1); \
free_aligned_buffer_16(dst_y_c) \
free_aligned_buffer_16(dst_u_c) \
free_aligned_buffer_16(dst_v_c) \
free_aligned_buffer_16(dst_y_opt) \
free_aligned_buffer_16(dst_u_opt) \
free_aligned_buffer_16(dst_v_opt) \
free_aligned_buffer_16(src_y) \
free_aligned_buffer_16(src_u) \
free_aligned_buffer_16(src_v) \
free_aligned_buffer_64(dst_y_c) \
free_aligned_buffer_64(dst_u_c) \
free_aligned_buffer_64(dst_v_c) \
free_aligned_buffer_64(dst_y_opt) \
free_aligned_buffer_64(dst_u_opt) \
free_aligned_buffer_64(dst_v_opt) \
free_aligned_buffer_64(src_y) \
free_aligned_buffer_64(src_u) \
free_aligned_buffer_64(src_v) \
}
#define TESTPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
@ -149,15 +149,15 @@ TESTPLANARTOP(I420, 2, 2, I420Mirror, 2, 2)
TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
const int kWidth = W1280; \
const int kHeight = benchmark_height_; \
align_buffer_16(src_y, kWidth * kHeight + OFF); \
align_buffer_16(src_u, \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
align_buffer_64(src_u, \
kWidth / SRC_SUBSAMP_X * kHeight / SRC_SUBSAMP_Y + OFF); \
align_buffer_16(src_v, \
align_buffer_64(src_v, \
kWidth / SRC_SUBSAMP_X * kHeight / SRC_SUBSAMP_Y + OFF); \
align_buffer_16(dst_y_c, kWidth * kHeight); \
align_buffer_16(dst_uv_c, kWidth * 2 / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_y_opt, kWidth * kHeight); \
align_buffer_16(dst_uv_opt, kWidth * 2 / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_c, kWidth * kHeight); \
align_buffer_64(dst_uv_c, kWidth * 2 / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_opt, kWidth * kHeight); \
align_buffer_64(dst_uv_opt, kWidth * 2 / SUBSAMP_X * kHeight / SUBSAMP_Y); \
srandom(time(NULL)); \
for (int i = 0; i < kHeight; ++i) \
for (int j = 0; j < kWidth; ++j) \
@ -207,13 +207,13 @@ TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
} \
} \
EXPECT_LE(max_diff, 1); \
free_aligned_buffer_16(dst_y_c) \
free_aligned_buffer_16(dst_uv_c) \
free_aligned_buffer_16(dst_y_opt) \
free_aligned_buffer_16(dst_uv_opt) \
free_aligned_buffer_16(src_y) \
free_aligned_buffer_16(src_u) \
free_aligned_buffer_16(src_v) \
free_aligned_buffer_64(dst_y_c) \
free_aligned_buffer_64(dst_uv_c) \
free_aligned_buffer_64(dst_y_opt) \
free_aligned_buffer_64(dst_uv_opt) \
free_aligned_buffer_64(src_y) \
free_aligned_buffer_64(src_u) \
free_aligned_buffer_64(src_v) \
}
#define TESTPLANARTOBP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
@ -239,15 +239,15 @@ TESTPLANARTOBP(I420, 2, 2, NV21, 2, 2)
TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
const int kWidth = W1280; \
const int kHeight = benchmark_height_; \
align_buffer_16(src_y, kWidth * kHeight + OFF); \
align_buffer_16(src_uv, 2 * kWidth / SRC_SUBSAMP_X * \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
align_buffer_64(src_uv, 2 * kWidth / SRC_SUBSAMP_X * \
kHeight / SRC_SUBSAMP_Y + OFF); \
align_buffer_16(dst_y_c, kWidth * kHeight); \
align_buffer_16(dst_u_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_v_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_y_opt, kWidth * kHeight); \
align_buffer_16(dst_u_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_v_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_c, kWidth * kHeight); \
align_buffer_64(dst_u_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_v_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_opt, kWidth * kHeight); \
align_buffer_64(dst_u_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_v_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
srandom(time(NULL)); \
for (int i = 0; i < kHeight; ++i) \
for (int j = 0; j < kWidth; ++j) \
@ -307,14 +307,14 @@ TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
} \
} \
EXPECT_LE(max_diff, 1); \
free_aligned_buffer_16(dst_y_c) \
free_aligned_buffer_16(dst_u_c) \
free_aligned_buffer_16(dst_v_c) \
free_aligned_buffer_16(dst_y_opt) \
free_aligned_buffer_16(dst_u_opt) \
free_aligned_buffer_16(dst_v_opt) \
free_aligned_buffer_16(src_y) \
free_aligned_buffer_16(src_uv) \
free_aligned_buffer_64(dst_y_c) \
free_aligned_buffer_64(dst_u_c) \
free_aligned_buffer_64(dst_v_c) \
free_aligned_buffer_64(dst_y_opt) \
free_aligned_buffer_64(dst_u_opt) \
free_aligned_buffer_64(dst_v_opt) \
free_aligned_buffer_64(src_y) \
free_aligned_buffer_64(src_uv) \
}
#define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
@ -342,11 +342,11 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
const int kHeight = benchmark_height_; \
const int kStrideB = ((kWidth * 8 * BPP_B + 7) / 8 + ALIGN - 1) / \
ALIGN * ALIGN; \
align_buffer_16(src_y, kWidth * kHeight + OFF); \
align_buffer_16(src_u, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y + OFF); \
align_buffer_16(src_v, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y + OFF); \
align_buffer_16(dst_argb_c, kStrideB * kHeight); \
align_buffer_16(dst_argb_opt, kStrideB * kHeight); \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
align_buffer_64(src_u, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y + OFF); \
align_buffer_64(src_v, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y + OFF); \
align_buffer_64(dst_argb_c, kStrideB * kHeight); \
align_buffer_64(dst_argb_opt, kStrideB * kHeight); \
memset(dst_argb_c, 0, kStrideB * kHeight); \
memset(dst_argb_opt, 0, kStrideB * kHeight); \
srandom(time(NULL)); \
@ -377,8 +377,8 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
} \
int max_diff = 0; \
/* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
align_buffer_16(dst_argb32_c, kWidth * 4 * kHeight); \
align_buffer_16(dst_argb32_opt, kWidth * 4 * kHeight); \
align_buffer_64(dst_argb32_c, kWidth * 4 * kHeight); \
align_buffer_64(dst_argb32_opt, kWidth * 4 * kHeight); \
memset(dst_argb32_c, 0, kWidth * 4 * kHeight); \
memset(dst_argb32_opt, 0, kWidth * 4 * kHeight); \
FMT_B##ToARGB(dst_argb_c, kStrideB, \
@ -399,13 +399,13 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
} \
\
EXPECT_LE(max_diff, DIFF); \
free_aligned_buffer_16(src_y) \
free_aligned_buffer_16(src_u) \
free_aligned_buffer_16(src_v) \
free_aligned_buffer_16(dst_argb_c) \
free_aligned_buffer_16(dst_argb_opt) \
free_aligned_buffer_16(dst_argb32_c) \
free_aligned_buffer_16(dst_argb32_opt) \
free_aligned_buffer_64(src_y) \
free_aligned_buffer_64(src_u) \
free_aligned_buffer_64(src_v) \
free_aligned_buffer_64(dst_argb_c) \
free_aligned_buffer_64(dst_argb_opt) \
free_aligned_buffer_64(dst_argb32_c) \
free_aligned_buffer_64(dst_argb32_opt) \
}
#define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
@ -451,10 +451,10 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
const int kWidth = W1280; \
const int kHeight = benchmark_height_; \
const int kStrideB = kWidth * BPP_B; \
align_buffer_16(src_y, kWidth * kHeight + OFF); \
align_buffer_16(src_uv, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y * 2 + OFF); \
align_buffer_16(dst_argb_c, kStrideB * kHeight); \
align_buffer_16(dst_argb_opt, kStrideB * kHeight); \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
align_buffer_64(src_uv, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y * 2 + OFF); \
align_buffer_64(dst_argb_c, kStrideB * kHeight); \
align_buffer_64(dst_argb_opt, kStrideB * kHeight); \
srandom(time(NULL)); \
for (int i = 0; i < kHeight; ++i) \
for (int j = 0; j < kWidth; ++j) \
@ -476,8 +476,8 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
kWidth, NEG kHeight); \
} \
/* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
align_buffer_16(dst_argb32_c, kWidth * 4 * kHeight); \
align_buffer_16(dst_argb32_opt, kWidth * 4 * kHeight); \
align_buffer_64(dst_argb32_c, kWidth * 4 * kHeight); \
align_buffer_64(dst_argb32_opt, kWidth * 4 * kHeight); \
memset(dst_argb32_c, 0, kWidth * 4 * kHeight); \
memset(dst_argb32_opt, 0, kWidth * 4 * kHeight); \
FMT_B##ToARGB(dst_argb_c, kStrideB, \
@ -498,12 +498,12 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
} \
} \
EXPECT_LE(max_diff, DIFF); \
free_aligned_buffer_16(src_y) \
free_aligned_buffer_16(src_uv) \
free_aligned_buffer_16(dst_argb_c) \
free_aligned_buffer_16(dst_argb_opt) \
free_aligned_buffer_16(dst_argb32_c) \
free_aligned_buffer_16(dst_argb32_opt) \
free_aligned_buffer_64(src_y) \
free_aligned_buffer_64(src_uv) \
free_aligned_buffer_64(dst_argb_c) \
free_aligned_buffer_64(dst_argb_opt) \
free_aligned_buffer_64(dst_argb32_c) \
free_aligned_buffer_64(dst_argb32_opt) \
}
#define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \
@ -527,13 +527,13 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \
const int kWidth = W1280; \
const int kHeight = benchmark_height_; \
const int kStride = (kWidth * 8 * BPP_A + 7) / 8; \
align_buffer_16(src_argb, kStride * kHeight + OFF); \
align_buffer_16(dst_y_c, kWidth * kHeight); \
align_buffer_16(dst_u_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_v_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_y_opt, kWidth * kHeight); \
align_buffer_16(dst_u_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_v_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(src_argb, kStride * kHeight + OFF); \
align_buffer_64(dst_y_c, kWidth * kHeight); \
align_buffer_64(dst_u_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_v_c, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_opt, kWidth * kHeight); \
align_buffer_64(dst_u_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_v_opt, kWidth / SUBSAMP_X * kHeight / SUBSAMP_Y); \
srandom(time(NULL)); \
for (int i = 0; i < kHeight; ++i) \
for (int j = 0; j < kStride; ++j) \
@ -586,13 +586,13 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \
} \
} \
EXPECT_LE(max_diff, DIFF); \
free_aligned_buffer_16(dst_y_c) \
free_aligned_buffer_16(dst_u_c) \
free_aligned_buffer_16(dst_v_c) \
free_aligned_buffer_16(dst_y_opt) \
free_aligned_buffer_16(dst_u_opt) \
free_aligned_buffer_16(dst_v_opt) \
free_aligned_buffer_16(src_argb) \
free_aligned_buffer_64(dst_y_c) \
free_aligned_buffer_64(dst_u_c) \
free_aligned_buffer_64(dst_v_c) \
free_aligned_buffer_64(dst_y_opt) \
free_aligned_buffer_64(dst_u_opt) \
free_aligned_buffer_64(dst_v_opt) \
free_aligned_buffer_64(src_argb) \
}
#define TESTATOPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, DIFF) \
@ -635,11 +635,11 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \
const int kWidth = W1280; \
const int kHeight = benchmark_height_; \
const int kStride = (kWidth * 8 * BPP_A + 7) / 8; \
align_buffer_16(src_argb, kStride * kHeight + OFF); \
align_buffer_16(dst_y_c, kWidth * kHeight); \
align_buffer_16(dst_uv_c, kWidth / SUBSAMP_X * 2 * kHeight / SUBSAMP_Y); \
align_buffer_16(dst_y_opt, kWidth * kHeight); \
align_buffer_16(dst_uv_opt, kWidth / SUBSAMP_X * 2 * kHeight / SUBSAMP_Y); \
align_buffer_64(src_argb, kStride * kHeight + OFF); \
align_buffer_64(dst_y_c, kWidth * kHeight); \
align_buffer_64(dst_uv_c, kWidth / SUBSAMP_X * 2 * kHeight / SUBSAMP_Y); \
align_buffer_64(dst_y_opt, kWidth * kHeight); \
align_buffer_64(dst_uv_opt, kWidth / SUBSAMP_X * 2 * kHeight / SUBSAMP_Y); \
srandom(time(NULL)); \
for (int i = 0; i < kHeight; ++i) \
for (int j = 0; j < kStride; ++j) \
@ -679,11 +679,11 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \
} \
} \
EXPECT_LE(max_diff, 4); \
free_aligned_buffer_16(dst_y_c) \
free_aligned_buffer_16(dst_uv_c) \
free_aligned_buffer_16(dst_y_opt) \
free_aligned_buffer_16(dst_uv_opt) \
free_aligned_buffer_16(src_argb) \
free_aligned_buffer_64(dst_y_c) \
free_aligned_buffer_64(dst_uv_c) \
free_aligned_buffer_64(dst_y_opt) \
free_aligned_buffer_64(dst_uv_opt) \
free_aligned_buffer_64(src_argb) \
}
#define TESTATOBIPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
@ -707,9 +707,9 @@ TEST_F(libyuvTest, FMT_A##To##FMT_B##N) { \
const int kHeight = benchmark_height_; \
const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
align_buffer_16(src_argb, kStrideA * kHeight + OFF); \
align_buffer_16(dst_argb_c, kStrideB * kHeight); \
align_buffer_16(dst_argb_opt, kStrideB * kHeight); \
align_buffer_64(src_argb, kStrideA * kHeight + OFF); \
align_buffer_64(dst_argb_c, kStrideB * kHeight); \
align_buffer_64(dst_argb_opt, kStrideB * kHeight); \
srandom(time(NULL)); \
for (int i = 0; i < kStrideA * kHeight; ++i) { \
src_argb[i + OFF] = (random() & 0xff); \
@ -734,9 +734,9 @@ TEST_F(libyuvTest, FMT_A##To##FMT_B##N) { \
} \
} \
EXPECT_LE(max_diff, DIFF); \
free_aligned_buffer_16(src_argb) \
free_aligned_buffer_16(dst_argb_c) \
free_aligned_buffer_16(dst_argb_opt) \
free_aligned_buffer_64(src_argb) \
free_aligned_buffer_64(dst_argb_c) \
free_aligned_buffer_64(dst_argb_opt) \
}
#define TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
@ -850,4 +850,3 @@ TEST_F(libyuvTest, Test565) {
}
} // namespace libyuv

View File

@ -639,9 +639,9 @@ TEST_F(libyuvTest, TestCopyPlane) {
int y_plane_size = (yw + b * 2) * (yh + b * 2);
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(dst_c, y_plane_size)
align_buffer_16(dst_opt, y_plane_size);
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(dst_c, y_plane_size)
align_buffer_64(dst_opt, y_plane_size);
memset(orig_y, 0, y_plane_size);
memset(dst_c, 0, y_plane_size);
@ -689,9 +689,9 @@ TEST_F(libyuvTest, TestCopyPlane) {
++err;
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(dst_c)
free_aligned_buffer_16(dst_opt)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(dst_c)
free_aligned_buffer_64(dst_opt)
EXPECT_EQ(0, err);
}

View File

@ -24,7 +24,7 @@ static int ARGBTestRotate(int src_width, int src_height,
int src_argb_plane_size = (src_width + b * 2) * (src_height + b * 2) * 4;
int src_stride_argb = (b * 2 + src_width) * 4;
align_buffer_16(src_argb, src_argb_plane_size)
align_buffer_64(src_argb, src_argb_plane_size)
memset(src_argb, 1, src_argb_plane_size);
int dst_argb_plane_size = (dst_width + b * 2) * (dst_height + b * 2) * 4;
@ -39,8 +39,8 @@ static int ARGBTestRotate(int src_width, int src_height,
}
}
align_buffer_16(dst_argb_c, dst_argb_plane_size)
align_buffer_16(dst_argb_opt, dst_argb_plane_size)
align_buffer_64(dst_argb_c, dst_argb_plane_size)
align_buffer_64(dst_argb_opt, dst_argb_plane_size)
memset(dst_argb_c, 2, dst_argb_plane_size);
memset(dst_argb_opt, 3, dst_argb_plane_size);
@ -91,9 +91,9 @@ static int ARGBTestRotate(int src_width, int src_height,
}
}
free_aligned_buffer_16(dst_argb_c)
free_aligned_buffer_16(dst_argb_opt)
free_aligned_buffer_16(src_argb)
free_aligned_buffer_64(dst_argb_c)
free_aligned_buffer_64(dst_argb_opt)
free_aligned_buffer_64(src_argb)
return max_diff;
}

View File

@ -35,9 +35,9 @@ TEST_F(libyuvTest, Transpose) {
ow = ih;
oh = iw;
align_buffer_16(input, iw * ih)
align_buffer_16(output_1, ow * oh)
align_buffer_16(output_2, iw * ih)
align_buffer_64(input, iw * ih)
align_buffer_64(output_1, ow * oh)
align_buffer_64(output_2, iw * ih)
for (i = 0; i < iw * ih; ++i) {
input[i] = i;
@ -63,9 +63,9 @@ TEST_F(libyuvTest, Transpose) {
PrintArray(output_2, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_1)
free_aligned_buffer_16(output_2)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_1)
free_aligned_buffer_64(output_2)
EXPECT_EQ(0, err);
}
@ -80,11 +80,11 @@ TEST_F(libyuvTest, TransposeUV) {
ow = ih;
oh = iw >> 1;
align_buffer_16(input, iw * ih)
align_buffer_16(output_a1, ow * oh)
align_buffer_16(output_b1, ow * oh)
align_buffer_16(output_a2, iw * ih)
align_buffer_16(output_b2, iw * ih)
align_buffer_64(input, iw * ih)
align_buffer_64(output_a1, ow * oh)
align_buffer_64(output_b1, ow * oh)
align_buffer_64(output_a2, iw * ih)
align_buffer_64(output_b2, iw * ih)
for (i = 0; i < iw * ih; i += 2) {
input[i] = i >> 1;
@ -118,11 +118,11 @@ TEST_F(libyuvTest, TransposeUV) {
PrintArray(output_b2, oh, ow);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_a1)
free_aligned_buffer_16(output_b1)
free_aligned_buffer_16(output_a2)
free_aligned_buffer_16(output_b2)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_a1)
free_aligned_buffer_64(output_b1)
free_aligned_buffer_64(output_a2)
free_aligned_buffer_64(output_b2)
EXPECT_EQ(0, err);
}
@ -137,11 +137,11 @@ TEST_F(libyuvTest, RotatePlane90) {
ow = ih;
oh = iw;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0, iw * ih)
align_buffer_16(output_90, ow * oh)
align_buffer_16(output_180, iw * ih)
align_buffer_16(output_270, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0, iw * ih)
align_buffer_64(output_90, ow * oh)
align_buffer_64(output_180, iw * ih)
align_buffer_64(output_270, ow * oh)
for (i = 0; i < iw * ih; ++i) {
input[i] = i;
@ -175,11 +175,11 @@ TEST_F(libyuvTest, RotatePlane90) {
PrintArray(output_0, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0)
free_aligned_buffer_16(output_90)
free_aligned_buffer_16(output_180)
free_aligned_buffer_16(output_270)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0)
free_aligned_buffer_64(output_90)
free_aligned_buffer_64(output_180)
free_aligned_buffer_64(output_270)
EXPECT_EQ(0, err);
}
@ -194,13 +194,13 @@ TEST_F(libyuvTest, RotateUV90) {
ow = ih;
oh = iw >> 1;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0_u, ow * oh)
align_buffer_16(output_0_v, ow * oh)
align_buffer_16(output_90_u, ow * oh)
align_buffer_16(output_90_v, ow * oh)
align_buffer_16(output_180_u, ow * oh)
align_buffer_16(output_180_v, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0_u, ow * oh)
align_buffer_64(output_0_v, ow * oh)
align_buffer_64(output_90_u, ow * oh)
align_buffer_64(output_90_v, ow * oh)
align_buffer_64(output_180_u, ow * oh)
align_buffer_64(output_180_v, ow * oh)
for (i = 0; i < iw * ih; i += 2) {
input[i] = i >> 1;
@ -247,13 +247,13 @@ TEST_F(libyuvTest, RotateUV90) {
PrintArray(output_0_v, oh, ow);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0_u)
free_aligned_buffer_16(output_0_v)
free_aligned_buffer_16(output_90_u)
free_aligned_buffer_16(output_90_v)
free_aligned_buffer_16(output_180_u)
free_aligned_buffer_16(output_180_v)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0_u)
free_aligned_buffer_64(output_0_v)
free_aligned_buffer_64(output_90_u)
free_aligned_buffer_64(output_90_v)
free_aligned_buffer_64(output_180_u)
free_aligned_buffer_64(output_180_v)
EXPECT_EQ(0, err);
}
@ -268,13 +268,13 @@ TEST_F(libyuvTest, RotateUV180) {
ow = iw >> 1;
oh = ih;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0_u, ow * oh)
align_buffer_16(output_0_v, ow * oh)
align_buffer_16(output_90_u, ow * oh)
align_buffer_16(output_90_v, ow * oh)
align_buffer_16(output_180_u, ow * oh)
align_buffer_16(output_180_v, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0_u, ow * oh)
align_buffer_64(output_0_v, ow * oh)
align_buffer_64(output_90_u, ow * oh)
align_buffer_64(output_90_v, ow * oh)
align_buffer_64(output_180_u, ow * oh)
align_buffer_64(output_180_v, ow * oh)
for (i = 0; i < iw * ih; i += 2) {
input[i] = i >> 1;
@ -321,13 +321,13 @@ TEST_F(libyuvTest, RotateUV180) {
PrintArray(output_0_v, ow, oh);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0_u)
free_aligned_buffer_16(output_0_v)
free_aligned_buffer_16(output_90_u)
free_aligned_buffer_16(output_90_v)
free_aligned_buffer_16(output_180_u)
free_aligned_buffer_16(output_180_v)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0_u)
free_aligned_buffer_64(output_0_v)
free_aligned_buffer_64(output_90_u)
free_aligned_buffer_64(output_90_v)
free_aligned_buffer_64(output_180_u)
free_aligned_buffer_64(output_180_v)
EXPECT_EQ(0, err);
}
@ -342,13 +342,13 @@ TEST_F(libyuvTest, RotateUV270) {
ow = ih;
oh = iw >> 1;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0_u, ow * oh)
align_buffer_16(output_0_v, ow * oh)
align_buffer_16(output_270_u, ow * oh)
align_buffer_16(output_270_v, ow * oh)
align_buffer_16(output_180_u, ow * oh)
align_buffer_16(output_180_v, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0_u, ow * oh)
align_buffer_64(output_0_v, ow * oh)
align_buffer_64(output_270_u, ow * oh)
align_buffer_64(output_270_v, ow * oh)
align_buffer_64(output_180_u, ow * oh)
align_buffer_64(output_180_v, ow * oh)
for (i = 0; i < iw * ih; i += 2) {
input[i] = i >> 1;
@ -396,13 +396,13 @@ TEST_F(libyuvTest, RotateUV270) {
PrintArray(output_0_v, oh, ow);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0_u)
free_aligned_buffer_16(output_0_v)
free_aligned_buffer_16(output_270_u)
free_aligned_buffer_16(output_270_v)
free_aligned_buffer_16(output_180_u)
free_aligned_buffer_16(output_180_v)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0_u)
free_aligned_buffer_64(output_0_v)
free_aligned_buffer_64(output_270_u)
free_aligned_buffer_64(output_270_v)
free_aligned_buffer_64(output_180_u)
free_aligned_buffer_64(output_180_v)
EXPECT_EQ(0, err);
}
@ -417,9 +417,9 @@ TEST_F(libyuvTest, RotatePlane180) {
ow = iw;
oh = ih;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0, iw * ih)
align_buffer_16(output_180, iw * ih)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0, iw * ih)
align_buffer_64(output_180, iw * ih)
for (i = 0; i < iw * ih; ++i) {
input[i] = i;
@ -445,9 +445,9 @@ TEST_F(libyuvTest, RotatePlane180) {
PrintArray(output_0, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0)
free_aligned_buffer_16(output_180)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0)
free_aligned_buffer_64(output_180)
EXPECT_EQ(0, err);
}
@ -462,11 +462,11 @@ TEST_F(libyuvTest, RotatePlane270) {
ow = ih;
oh = iw;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0, iw * ih)
align_buffer_16(output_90, ow * oh)
align_buffer_16(output_180, iw * ih)
align_buffer_16(output_270, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0, iw * ih)
align_buffer_64(output_90, ow * oh)
align_buffer_64(output_180, iw * ih)
align_buffer_64(output_270, ow * oh)
for (i = 0; i < iw * ih; ++i)
input[i] = i;
@ -499,11 +499,11 @@ TEST_F(libyuvTest, RotatePlane270) {
PrintArray(output_0, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0)
free_aligned_buffer_16(output_90)
free_aligned_buffer_16(output_180)
free_aligned_buffer_16(output_270)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0)
free_aligned_buffer_64(output_90)
free_aligned_buffer_64(output_180)
free_aligned_buffer_64(output_270)
EXPECT_EQ(0, err);
}
@ -518,9 +518,9 @@ TEST_F(libyuvTest, RotatePlane90and270) {
ow = ih;
oh = iw;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0, iw * ih)
align_buffer_16(output_90, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0, iw * ih)
align_buffer_64(output_90, ow * oh)
for (i = 0; i < iw * ih; ++i) {
input[i] = i;
@ -546,9 +546,9 @@ TEST_F(libyuvTest, RotatePlane90and270) {
PrintArray(output_0, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0)
free_aligned_buffer_16(output_90)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0)
free_aligned_buffer_64(output_90)
EXPECT_EQ(0, err);
}
@ -563,9 +563,9 @@ TEST_F(libyuvTest, RotatePlane90Pitch) {
int ow = ih;
int oh = iw;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0, iw * ih)
align_buffer_16(output_90, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0, iw * ih)
align_buffer_64(output_90, ow * oh)
for (i = 0; i < iw * ih; ++i) {
input[i] = i;
@ -603,9 +603,9 @@ TEST_F(libyuvTest, RotatePlane90Pitch) {
PrintArray(output_0, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0)
free_aligned_buffer_16(output_90)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0)
free_aligned_buffer_64(output_90)
EXPECT_EQ(0, err);
}
@ -620,9 +620,9 @@ TEST_F(libyuvTest, RotatePlane270Pitch) {
ow = ih;
oh = iw;
align_buffer_16(input, iw * ih)
align_buffer_16(output_0, iw * ih)
align_buffer_16(output_270, ow * oh)
align_buffer_64(input, iw * ih)
align_buffer_64(output_0, iw * ih)
align_buffer_64(output_270, ow * oh)
for (i = 0; i < iw * ih; ++i) {
input[i] = i;
@ -660,9 +660,9 @@ TEST_F(libyuvTest, RotatePlane270Pitch) {
PrintArray(output_0, iw, ih);
}
free_aligned_buffer_16(input)
free_aligned_buffer_16(output_0)
free_aligned_buffer_16(output_270)
free_aligned_buffer_64(input)
free_aligned_buffer_64(output_0)
free_aligned_buffer_64(output_270)
EXPECT_EQ(0, err);
}
@ -683,18 +683,18 @@ TEST_F(libyuvTest, I420Rotate90) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_u, uv_plane_size)
align_buffer_16(orig_v, uv_plane_size)
align_buffer_16(ro0_y, y_plane_size)
align_buffer_16(ro0_u, uv_plane_size)
align_buffer_16(ro0_v, uv_plane_size)
align_buffer_16(ro90_y, y_plane_size)
align_buffer_16(ro90_u, uv_plane_size)
align_buffer_16(ro90_v, uv_plane_size)
align_buffer_16(ro270_y, y_plane_size)
align_buffer_16(ro270_u, uv_plane_size)
align_buffer_16(ro270_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_u, uv_plane_size)
align_buffer_64(orig_v, uv_plane_size)
align_buffer_64(ro0_y, y_plane_size)
align_buffer_64(ro0_u, uv_plane_size)
align_buffer_64(ro0_v, uv_plane_size)
align_buffer_64(ro90_y, y_plane_size)
align_buffer_64(ro90_u, uv_plane_size)
align_buffer_64(ro90_v, uv_plane_size)
align_buffer_64(ro270_y, y_plane_size)
align_buffer_64(ro270_u, uv_plane_size)
align_buffer_64(ro270_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_u, 0, uv_plane_size);
memset(orig_v, 0, uv_plane_size);
@ -774,18 +774,18 @@ TEST_F(libyuvTest, I420Rotate90) {
}
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_u)
free_aligned_buffer_16(orig_v)
free_aligned_buffer_16(ro0_y)
free_aligned_buffer_16(ro0_u)
free_aligned_buffer_16(ro0_v)
free_aligned_buffer_16(ro90_y)
free_aligned_buffer_16(ro90_u)
free_aligned_buffer_16(ro90_v)
free_aligned_buffer_16(ro270_y)
free_aligned_buffer_16(ro270_u)
free_aligned_buffer_16(ro270_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_u)
free_aligned_buffer_64(orig_v)
free_aligned_buffer_64(ro0_y)
free_aligned_buffer_64(ro0_u)
free_aligned_buffer_64(ro0_v)
free_aligned_buffer_64(ro90_y)
free_aligned_buffer_64(ro90_u)
free_aligned_buffer_64(ro90_v)
free_aligned_buffer_64(ro270_y)
free_aligned_buffer_64(ro270_u)
free_aligned_buffer_64(ro270_v)
EXPECT_EQ(0, err);
}
@ -806,18 +806,18 @@ TEST_F(libyuvTest, I420Rotate270) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_u, uv_plane_size)
align_buffer_16(orig_v, uv_plane_size)
align_buffer_16(ro0_y, y_plane_size)
align_buffer_16(ro0_u, uv_plane_size)
align_buffer_16(ro0_v, uv_plane_size)
align_buffer_16(ro90_y, y_plane_size)
align_buffer_16(ro90_u, uv_plane_size)
align_buffer_16(ro90_v, uv_plane_size)
align_buffer_16(ro270_y, y_plane_size)
align_buffer_16(ro270_u, uv_plane_size)
align_buffer_16(ro270_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_u, uv_plane_size)
align_buffer_64(orig_v, uv_plane_size)
align_buffer_64(ro0_y, y_plane_size)
align_buffer_64(ro0_u, uv_plane_size)
align_buffer_64(ro0_v, uv_plane_size)
align_buffer_64(ro90_y, y_plane_size)
align_buffer_64(ro90_u, uv_plane_size)
align_buffer_64(ro90_v, uv_plane_size)
align_buffer_64(ro270_y, y_plane_size)
align_buffer_64(ro270_u, uv_plane_size)
align_buffer_64(ro270_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_u, 0, uv_plane_size);
memset(orig_v, 0, uv_plane_size);
@ -897,18 +897,18 @@ TEST_F(libyuvTest, I420Rotate270) {
}
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_u)
free_aligned_buffer_16(orig_v)
free_aligned_buffer_16(ro0_y)
free_aligned_buffer_16(ro0_u)
free_aligned_buffer_16(ro0_v)
free_aligned_buffer_16(ro90_y)
free_aligned_buffer_16(ro90_u)
free_aligned_buffer_16(ro90_v)
free_aligned_buffer_16(ro270_y)
free_aligned_buffer_16(ro270_u)
free_aligned_buffer_16(ro270_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_u)
free_aligned_buffer_64(orig_v)
free_aligned_buffer_64(ro0_y)
free_aligned_buffer_64(ro0_u)
free_aligned_buffer_64(ro0_v)
free_aligned_buffer_64(ro90_y)
free_aligned_buffer_64(ro90_u)
free_aligned_buffer_64(ro90_v)
free_aligned_buffer_64(ro270_y)
free_aligned_buffer_64(ro270_u)
free_aligned_buffer_64(ro270_v)
EXPECT_EQ(0, err);
}
@ -929,14 +929,14 @@ TEST_F(libyuvTest, NV12ToI420Rotate90) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_uv, nv_uv_plane_size)
align_buffer_16(ro0_y, y_plane_size)
align_buffer_16(ro0_u, uv_plane_size)
align_buffer_16(ro0_v, uv_plane_size)
align_buffer_16(ro90_y, y_plane_size)
align_buffer_16(ro90_u, uv_plane_size)
align_buffer_16(ro90_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_uv, nv_uv_plane_size)
align_buffer_64(ro0_y, y_plane_size)
align_buffer_64(ro0_u, uv_plane_size)
align_buffer_64(ro0_v, uv_plane_size)
align_buffer_64(ro90_y, y_plane_size)
align_buffer_64(ro90_u, uv_plane_size)
align_buffer_64(ro90_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_uv, 0, uv_plane_size);
memset(ro0_y, 0, y_plane_size);
@ -1008,14 +1008,14 @@ TEST_F(libyuvTest, NV12ToI420Rotate90) {
++err;
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_uv)
free_aligned_buffer_16(ro0_y)
free_aligned_buffer_16(ro0_u)
free_aligned_buffer_16(ro0_v)
free_aligned_buffer_16(ro90_y)
free_aligned_buffer_16(ro90_u)
free_aligned_buffer_16(ro90_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_uv)
free_aligned_buffer_64(ro0_y)
free_aligned_buffer_64(ro0_u)
free_aligned_buffer_64(ro0_v)
free_aligned_buffer_64(ro90_y)
free_aligned_buffer_64(ro90_u)
free_aligned_buffer_64(ro90_v)
EXPECT_EQ(0, err);
}
@ -1037,14 +1037,14 @@ TEST_F(libyuvTest, NV12ToI420Rotate270) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_uv, nv_uv_plane_size)
align_buffer_16(ro0_y, y_plane_size)
align_buffer_16(ro0_u, uv_plane_size)
align_buffer_16(ro0_v, uv_plane_size)
align_buffer_16(ro270_y, y_plane_size)
align_buffer_16(ro270_u, uv_plane_size)
align_buffer_16(ro270_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_uv, nv_uv_plane_size)
align_buffer_64(ro0_y, y_plane_size)
align_buffer_64(ro0_u, uv_plane_size)
align_buffer_64(ro0_v, uv_plane_size)
align_buffer_64(ro270_y, y_plane_size)
align_buffer_64(ro270_u, uv_plane_size)
align_buffer_64(ro270_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_uv, 0, nv_uv_plane_size);
memset(ro0_y, 0, y_plane_size);
@ -1116,14 +1116,14 @@ TEST_F(libyuvTest, NV12ToI420Rotate270) {
++err;
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_uv)
free_aligned_buffer_16(ro0_y)
free_aligned_buffer_16(ro0_u)
free_aligned_buffer_16(ro0_v)
free_aligned_buffer_16(ro270_y)
free_aligned_buffer_16(ro270_u)
free_aligned_buffer_16(ro270_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_uv)
free_aligned_buffer_64(ro0_y)
free_aligned_buffer_64(ro0_u)
free_aligned_buffer_64(ro0_v)
free_aligned_buffer_64(ro270_y)
free_aligned_buffer_64(ro270_u)
free_aligned_buffer_64(ro270_v)
EXPECT_EQ(0, err);
}
@ -1145,14 +1145,14 @@ TEST_F(libyuvTest, NV12ToI420Rotate180) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_uv, nv_uv_plane_size)
align_buffer_16(ro0_y, y_plane_size)
align_buffer_16(ro0_u, uv_plane_size)
align_buffer_16(ro0_v, uv_plane_size)
align_buffer_16(ro180_y, y_plane_size)
align_buffer_16(ro180_u, uv_plane_size)
align_buffer_16(ro180_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_uv, nv_uv_plane_size)
align_buffer_64(ro0_y, y_plane_size)
align_buffer_64(ro0_u, uv_plane_size)
align_buffer_64(ro0_v, uv_plane_size)
align_buffer_64(ro180_y, y_plane_size)
align_buffer_64(ro180_u, uv_plane_size)
align_buffer_64(ro180_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_uv, 0, nv_uv_plane_size);
memset(ro0_y, 0, y_plane_size);
@ -1221,14 +1221,14 @@ TEST_F(libyuvTest, NV12ToI420Rotate180) {
++err;
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_uv)
free_aligned_buffer_16(ro0_y)
free_aligned_buffer_16(ro0_u)
free_aligned_buffer_16(ro0_v)
free_aligned_buffer_16(ro180_y)
free_aligned_buffer_16(ro180_u)
free_aligned_buffer_16(ro180_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_uv)
free_aligned_buffer_64(ro0_y)
free_aligned_buffer_64(ro0_u)
free_aligned_buffer_64(ro0_v)
free_aligned_buffer_64(ro180_y)
free_aligned_buffer_64(ro180_u)
free_aligned_buffer_64(ro180_v)
EXPECT_EQ(0, err);
}
@ -1249,17 +1249,17 @@ TEST_F(libyuvTest, NV12ToI420RotateNegHeight90) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_uv, nv_uv_plane_size)
align_buffer_16(roa_y, y_plane_size)
align_buffer_16(roa_u, uv_plane_size)
align_buffer_16(roa_v, uv_plane_size)
align_buffer_16(rob_y, y_plane_size)
align_buffer_16(rob_u, uv_plane_size)
align_buffer_16(rob_v, uv_plane_size)
align_buffer_16(roc_y, y_plane_size)
align_buffer_16(roc_u, uv_plane_size)
align_buffer_16(roc_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_uv, nv_uv_plane_size)
align_buffer_64(roa_y, y_plane_size)
align_buffer_64(roa_u, uv_plane_size)
align_buffer_64(roa_v, uv_plane_size)
align_buffer_64(rob_y, y_plane_size)
align_buffer_64(rob_u, uv_plane_size)
align_buffer_64(rob_v, uv_plane_size)
align_buffer_64(roc_y, y_plane_size)
align_buffer_64(roc_u, uv_plane_size)
align_buffer_64(roc_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_uv, 0, nv_uv_plane_size);
memset(roa_y, 0, y_plane_size);
@ -1375,17 +1375,17 @@ TEST_F(libyuvTest, NV12ToI420RotateNegHeight90) {
PrintArray(roc_v, uv_st_0, uv_st_90);
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_uv)
free_aligned_buffer_16(roa_y)
free_aligned_buffer_16(roa_u)
free_aligned_buffer_16(roa_v)
free_aligned_buffer_16(rob_y)
free_aligned_buffer_16(rob_u)
free_aligned_buffer_16(rob_v)
free_aligned_buffer_16(roc_y)
free_aligned_buffer_16(roc_u)
free_aligned_buffer_16(roc_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_uv)
free_aligned_buffer_64(roa_y)
free_aligned_buffer_64(roa_u)
free_aligned_buffer_64(roa_v)
free_aligned_buffer_64(rob_y)
free_aligned_buffer_64(rob_u)
free_aligned_buffer_64(rob_v)
free_aligned_buffer_64(roc_y)
free_aligned_buffer_64(roc_u)
free_aligned_buffer_64(roc_v)
EXPECT_EQ(0, y_err + uv_err);
}
@ -1406,14 +1406,14 @@ TEST_F(libyuvTest, NV12ToI420RotateNegHeight180) {
srandom(time(NULL));
align_buffer_16(orig_y, y_plane_size)
align_buffer_16(orig_uv, nv_uv_plane_size)
align_buffer_16(roa_y, y_plane_size)
align_buffer_16(roa_u, uv_plane_size)
align_buffer_16(roa_v, uv_plane_size)
align_buffer_16(rob_y, y_plane_size)
align_buffer_16(rob_u, uv_plane_size)
align_buffer_16(rob_v, uv_plane_size)
align_buffer_64(orig_y, y_plane_size)
align_buffer_64(orig_uv, nv_uv_plane_size)
align_buffer_64(roa_y, y_plane_size)
align_buffer_64(roa_u, uv_plane_size)
align_buffer_64(roa_v, uv_plane_size)
align_buffer_64(rob_y, y_plane_size)
align_buffer_64(rob_u, uv_plane_size)
align_buffer_64(rob_v, uv_plane_size)
memset(orig_y, 0, y_plane_size);
memset(orig_uv, 0, nv_uv_plane_size);
memset(roa_y, 0, y_plane_size);
@ -1505,14 +1505,14 @@ TEST_F(libyuvTest, NV12ToI420RotateNegHeight180) {
PrintArray(rob_v, uv_st, uvh + b * 2);
}
free_aligned_buffer_16(orig_y)
free_aligned_buffer_16(orig_uv)
free_aligned_buffer_16(roa_y)
free_aligned_buffer_16(roa_u)
free_aligned_buffer_16(roa_v)
free_aligned_buffer_16(rob_y)
free_aligned_buffer_16(rob_u)
free_aligned_buffer_16(rob_v)
free_aligned_buffer_64(orig_y)
free_aligned_buffer_64(orig_uv)
free_aligned_buffer_64(roa_y)
free_aligned_buffer_64(roa_u)
free_aligned_buffer_64(roa_v)
free_aligned_buffer_64(rob_y)
free_aligned_buffer_64(rob_u)
free_aligned_buffer_64(rob_v)
EXPECT_EQ(0, y_err + uv_err);
}
@ -1531,14 +1531,14 @@ TEST_F(libyuvTest, NV12ToI420SplitUV) {
int uv_plane_size = (uvw + b * 2) * (uvh + b * 2);
int nv_uv_plane_size = (uvw * 2 + b * 2) * (uvh + b * 2);
align_buffer_16(src_y, y_plane_size)
align_buffer_16(src_uv, nv_uv_plane_size)
align_buffer_16(dst_y_c, y_plane_size)
align_buffer_16(dst_u_c, uv_plane_size)
align_buffer_16(dst_v_c, uv_plane_size)
align_buffer_16(dst_y_opt, y_plane_size)
align_buffer_16(dst_u_opt, uv_plane_size)
align_buffer_16(dst_v_opt, uv_plane_size)
align_buffer_64(src_y, y_plane_size)
align_buffer_64(src_uv, nv_uv_plane_size)
align_buffer_64(dst_y_c, y_plane_size)
align_buffer_64(dst_u_c, uv_plane_size)
align_buffer_64(dst_v_c, uv_plane_size)
align_buffer_64(dst_y_opt, y_plane_size)
align_buffer_64(dst_u_opt, uv_plane_size)
align_buffer_64(dst_v_opt, uv_plane_size)
memset(src_y, 0, y_plane_size);
memset(src_uv, 0, nv_uv_plane_size);
@ -1599,14 +1599,14 @@ TEST_F(libyuvTest, NV12ToI420SplitUV) {
++err;
}
}
free_aligned_buffer_16(src_y)
free_aligned_buffer_16(src_uv)
free_aligned_buffer_16(dst_y_c)
free_aligned_buffer_16(dst_u_c)
free_aligned_buffer_16(dst_v_c)
free_aligned_buffer_16(dst_y_opt)
free_aligned_buffer_16(dst_u_opt)
free_aligned_buffer_16(dst_v_opt)
free_aligned_buffer_64(src_y)
free_aligned_buffer_64(src_uv)
free_aligned_buffer_64(dst_y_c)
free_aligned_buffer_64(dst_u_c)
free_aligned_buffer_64(dst_v_c)
free_aligned_buffer_64(dst_y_opt)
free_aligned_buffer_64(dst_u_opt)
free_aligned_buffer_64(dst_v_opt)
EXPECT_EQ(0, err);
}

View File

@ -24,7 +24,7 @@ static int ARGBTestFilter(int src_width, int src_height,
int src_argb_plane_size = (src_width + b * 2) * (src_height + b * 2) * 4;
int src_stride_argb = (b * 2 + src_width) * 4;
align_buffer_16(src_argb, src_argb_plane_size)
align_buffer_64(src_argb, src_argb_plane_size)
memset(src_argb, 1, src_argb_plane_size);
int dst_argb_plane_size = (dst_width + b * 2) * (dst_height + b * 2) * 4;
@ -39,8 +39,8 @@ static int ARGBTestFilter(int src_width, int src_height,
}
}
align_buffer_16(dst_argb_c, dst_argb_plane_size)
align_buffer_16(dst_argb_opt, dst_argb_plane_size)
align_buffer_64(dst_argb_c, dst_argb_plane_size)
align_buffer_64(dst_argb_opt, dst_argb_plane_size)
memset(dst_argb_c, 2, dst_argb_plane_size);
memset(dst_argb_opt, 3, dst_argb_plane_size);
@ -95,9 +95,9 @@ static int ARGBTestFilter(int src_width, int src_height,
}
}
free_aligned_buffer_16(dst_argb_c)
free_aligned_buffer_16(dst_argb_opt)
free_aligned_buffer_16(src_argb)
free_aligned_buffer_64(dst_argb_c)
free_aligned_buffer_64(dst_argb_opt)
free_aligned_buffer_64(src_argb)
return max_diff;
}

View File

@ -13,14 +13,14 @@
#include <gtest/gtest.h>
#define align_buffer_16(var, size) \
#define align_buffer_64(var, size) \
uint8* var; \
uint8* var##_mem; \
var##_mem = reinterpret_cast<uint8*>(malloc((size) + 15)); \
var##_mem = reinterpret_cast<uint8*>(malloc((size) + 63)); \
var = reinterpret_cast<uint8*> \
((reinterpret_cast<intptr_t>(var##_mem) + 15) & ~15);
((reinterpret_cast<intptr_t>(var##_mem) + 63) & ~63);
#define free_aligned_buffer_16(var) \
#define free_aligned_buffer_64(var) \
free(var##_mem); \
var = 0;