diff --git a/README.chromium b/README.chromium index b2cfe7fb4..903df484f 100644 --- a/README.chromium +++ b/README.chromium @@ -1,6 +1,6 @@ Name: libyuv URL: http://code.google.com/p/libyuv/ -Version: 284 +Version: 285 License: BSD License File: LICENSE diff --git a/include/libyuv/planar_functions.h b/include/libyuv/planar_functions.h index 671fb0384..00f84d8b1 100644 --- a/include/libyuv/planar_functions.h +++ b/include/libyuv/planar_functions.h @@ -231,7 +231,7 @@ typedef void (*ARGBBlendRow)(const uint8* src_argb0, uint8* dst_argb, int width); // Get function to Alpha Blend ARGB pixels and store to destination. -ARGBBlendRow GetARGBBlend(uint8* dst_argb, int dst_stride_argb, int width); +ARGBBlendRow GetARGBBlend(); // Alpha Blend ARGB images and store to destination. int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, diff --git a/include/libyuv/version.h b/include/libyuv/version.h index 799c1b08e..d5883e68b 100644 --- a/include/libyuv/version.h +++ b/include/libyuv/version.h @@ -11,7 +11,7 @@ #ifndef INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_ -#define LIBYUV_VERSION 284 +#define LIBYUV_VERSION 285 #endif // INCLUDE_LIBYUV_VERSION_H_ diff --git a/source/planar_functions.cc b/source/planar_functions.cc index 680404b62..1a697e201 100644 --- a/source/planar_functions.cc +++ b/source/planar_functions.cc @@ -163,36 +163,18 @@ int ARGBCopy(const uint8* src_argb, int src_stride_argb, // Get a blender that optimized for the CPU, alignment and pixel count. // As there are 6 blenders to choose from, the caller should try to use // the same blend function for all pixels if possible. -ARGBBlendRow GetARGBBlend(uint8* dst_argb, int dst_stride_argb, int width) { +ARGBBlendRow GetARGBBlend() { void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width) = ARGBBlendRow_C; -#if defined(HAS_ARGBBLENDROW1_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBBlendRow = ARGBBlendRow1_SSSE3; #if defined(HAS_ARGBBLENDROW_SSSE3) - if (width >= 4) { - ARGBBlendRow = ARGBBlendRow_Any_SSSE3; - if (IS_ALIGNED(width, 4) && - IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { - ARGBBlendRow = ARGBBlendRow_Aligned_SSSE3; - } - } -#endif + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBBlendRow = ARGBBlendRow_SSSE3; return ARGBBlendRow; } #endif -#if defined(HAS_ARGBBLENDROW1_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - ARGBBlendRow = ARGBBlendRow1_SSE2; #if defined(HAS_ARGBBLENDROW_SSE2) - if (width >= 4) { - ARGBBlendRow = ARGBBlendRow_Any_SSE2; - if (IS_ALIGNED(width, 4) && - IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { - ARGBBlendRow = ARGBBlendRow_Aligned_SSE2; - } - } -#endif + if (TestCpuFlag(kCpuHasSSE2)) { + ARGBBlendRow = ARGBBlendRow_SSE2; } #endif return ARGBBlendRow; @@ -213,8 +195,7 @@ int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, dst_stride_argb = -dst_stride_argb; } void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1, - uint8* dst_argb, int width) = - GetARGBBlend(dst_argb, dst_stride_argb, width); + uint8* dst_argb, int width) = GetARGBBlend(); for (int y = 0; y < height; ++y) { ARGBBlendRow(src_argb0, src_argb1, dst_argb, width); @@ -626,8 +607,8 @@ int ARGB1555ToARGB(const uint8* src_argb1555, int src_stride_argb1555, src_argb1555 = src_argb1555 + (height - 1) * src_stride_argb1555; src_stride_argb1555 = -src_stride_argb1555; } - void (*ARGB1555ToARGBRow)(const uint8* src_argb1555, uint8* dst_argb, int pix) = - ARGB1555ToARGBRow_C; + void (*ARGB1555ToARGBRow)(const uint8* src_argb1555, uint8* dst_argb, + int pix) = ARGB1555ToARGBRow_C; #if defined(HAS_ARGB1555TOARGBROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8) && @@ -653,8 +634,8 @@ int ARGB4444ToARGB(const uint8* src_argb4444, int src_stride_argb4444, src_argb4444 = src_argb4444 + (height - 1) * src_stride_argb4444; src_stride_argb4444 = -src_stride_argb4444; } - void (*ARGB4444ToARGBRow)(const uint8* src_argb4444, uint8* dst_argb, int pix) = - ARGB4444ToARGBRow_C; + void (*ARGB4444ToARGBRow)(const uint8* src_argb4444, uint8* dst_argb, + int pix) = ARGB4444ToARGBRow_C; #if defined(HAS_ARGB4444TOARGBROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8) && @@ -1691,7 +1672,7 @@ int ARGBComputeCumulativeSum(const uint8* src_argb, int src_stride_argb, ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2; } #endif - memset(dst_cumsum, 0, width * sizeof(dst_cumsum[0]) * 4); // 4 ints per pixel. + memset(dst_cumsum, 0, width * sizeof(dst_cumsum[0]) * 4); // 4 int per pixel. int32* previous_cumsum = dst_cumsum; for (int y = 0; y < height; ++y) { ComputeCumulativeSumRow(src_argb, dst_cumsum, previous_cumsum, width); diff --git a/source/row.h b/source/row.h index 11175c6a3..295d593c1 100644 --- a/source/row.h +++ b/source/row.h @@ -41,7 +41,6 @@ extern "C" { #define HAS_ARGB4444TOARGBROW_SSE2 #define HAS_ARGBATTENUATE_SSSE3 #define HAS_ARGBBLENDROW_SSSE3 -#define HAS_ARGBBLENDROW1_SSSE3 #define HAS_ARGBTOARGB1555ROW_SSE2 #define HAS_ARGBTOARGB4444ROW_SSE2 #define HAS_ARGBTORAWROW_SSSE3 @@ -88,7 +87,6 @@ extern "C" { #define HAS_MIRRORROW_SSE2 #define HAS_ARGBATTENUATE_SSE2 #define HAS_ARGBBLENDROW_SSE2 -#define HAS_ARGBBLENDROW1_SSE2 #endif // The following are available on Neon platforms @@ -404,18 +402,10 @@ void YToARGBRow_SSE2(const uint8* y_buf, int width); // ARGB preattenuated alpha blend. -void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width); -void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width); -void ARGBBlendRow1_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width); -void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, +void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width); -void ARGBBlendRow_Any_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width); -void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width); +void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, + uint8* dst_argb, int width); void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width); diff --git a/source/row_common.cc b/source/row_common.cc index 686c5c8da..a1993f485 100644 --- a/source/row_common.cc +++ b/source/row_common.cc @@ -685,66 +685,6 @@ void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1, } } -#ifdef HAS_ARGBBLENDROW_SSE2 -void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - // Do 1 to 3 pixels to get destination aligned. - if ((uintptr_t)(dst_argb) & 15) { - int count = width; - if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) { - count = (-(intptr_t)(dst_argb) >> 2) & 3; - } - ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count); - src_argb0 += count * 4; - src_argb1 += count * 4; - dst_argb += count * 4; - width -= count; - } - // Do multiple of 4 pixels - if (width & ~3) { - ARGBBlendRow_Aligned_SSE2(src_argb0, src_argb1, dst_argb, width & ~3); - } - // Do remaining 1 to 3 pixels - if (width & 3) { - src_argb0 += (width & ~3) * 4; - src_argb1 += (width & ~3) * 4; - dst_argb += (width & ~3) * 4; - width &= 3; - ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width); - } -} -#endif // HAS_ARGBBLENDROW_SSE2 - -#ifdef HAS_ARGBBLENDROW_SSSE3 -void ARGBBlendRow_Any_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - // Do 1 to 3 pixels to get destination aligned. - if ((uintptr_t)(dst_argb) & 15) { - int count = width; - if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) { - count = (-(intptr_t)(dst_argb) >> 2) & 3; - } - ARGBBlendRow1_SSSE3(src_argb0, src_argb1, dst_argb, count); - src_argb0 += count * 4; - src_argb1 += count * 4; - dst_argb += count * 4; - width -= count; - } - // Do multiple of 4 pixels. - if (width & ~3) { - ARGBBlendRow_Aligned_SSSE3(src_argb0, src_argb1, dst_argb, width & ~3); - } - // Do remaining 1 to 3 pixels - if (width & 3) { - src_argb0 += (width & ~3) * 4; - src_argb1 += (width & ~3) * 4; - dst_argb += (width & ~3) * 4; - width &= 3; - ARGBBlendRow1_SSSE3(src_argb0, src_argb1, dst_argb, width); - } -} -#endif // HAS_ARGBBLENDROW_SSSE3 - // Wrappers to handle odd width #define YANY(NAMEANY, I420TORGB_SSE, I420TORGB_C, UV_SHIFT) \ void NAMEANY(const uint8* y_buf, \ diff --git a/source/row_posix.cc b/source/row_posix.cc index c111f60ae..19f4e60f5 100644 --- a/source/row_posix.cc +++ b/source/row_posix.cc @@ -2391,11 +2391,8 @@ void UYVYToUVRow_Unaligned_SSE2(const uint8* src_uyvy, int stride_uyvy, #ifdef HAS_ARGBBLENDROW_SSE2 // Blend 8 pixels at a time. -// src_argb0 unaligned. -// src_argb1 and dst_argb aligned to 16 bytes. -// width must be multiple of 4 pixels. -void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { +void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, + uint8* dst_argb, int width) { asm volatile ( "pcmpeqb %%xmm7,%%xmm7 \n" "psrlw $0xf,%%xmm7 \n" @@ -2405,86 +2402,14 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, "psllw $0x8,%%xmm5 \n" "pcmpeqb %%xmm4,%%xmm4 \n" "pslld $0x18,%%xmm4 \n" + "sub $0x1,%3 \n" + "je 91f \n" + "jl 99f \n" - // 8 pixel loop - ".p2align 4 \n" - "1: \n" - "movdqu (%0),%%xmm3 \n" - "movdqa %%xmm3,%%xmm0 \n" - "pxor %%xmm4,%%xmm3 \n" - "movdqu (%1),%%xmm2 \n" - "psrlw $0x8,%%xmm3 \n" - "pshufhw $0xf5,%%xmm3,%%xmm3 \n" - "pshuflw $0xf5,%%xmm3,%%xmm3 \n" - "pand %%xmm6,%%xmm2 \n" - "paddw %%xmm7,%%xmm3 \n" - "pmullw %%xmm3,%%xmm2 \n" - "movdqu (%1),%%xmm1 \n" - "psrlw $0x8,%%xmm1 \n" - "por %%xmm4,%%xmm0 \n" - "pmullw %%xmm3,%%xmm1 \n" - "movdqu 0x10(%0),%%xmm3 \n" - "lea 0x20(%0),%0 \n" - "psrlw $0x8,%%xmm2 \n" - "paddusb %%xmm2,%%xmm0 \n" - "pand %%xmm5,%%xmm1 \n" - "paddusb %%xmm1,%%xmm0 \n" - "sub $0x4,%3 \n" - "movdqa %%xmm0,(%2) \n" - "jle 9f \n" - "movdqa %%xmm3,%%xmm0 \n" - "pxor %%xmm4,%%xmm3 \n" - "movdqu 0x10(%1),%%xmm2 \n" - "psrlw $0x8,%%xmm3 \n" - "pshufhw $0xf5,%%xmm3,%%xmm3 \n" - "pshuflw $0xf5,%%xmm3,%%xmm3 \n" - "pand %%xmm6,%%xmm2 \n" - "paddw %%xmm7,%%xmm3 \n" - "pmullw %%xmm3,%%xmm2 \n" - "movdqu 0x10(%1),%%xmm1 \n" - "lea 0x20(%1),%1 \n" - "psrlw $0x8,%%xmm1 \n" - "por %%xmm4,%%xmm0 \n" - "pmullw %%xmm3,%%xmm1 \n" - "psrlw $0x8,%%xmm2 \n" - "paddusb %%xmm2,%%xmm0 \n" - "pand %%xmm5,%%xmm1 \n" - "paddusb %%xmm1,%%xmm0 \n" - "sub $0x4,%3 \n" - "movdqa %%xmm0,0x10(%2) \n" - "lea 0x20(%2),%2 \n" - "jg 1b \n" - "9: \n" - : "+r"(src_argb0), // %0 - "+r"(src_argb1), // %1 - "+r"(dst_argb), // %2 - "+r"(width) // %3 - : - : "memory", "cc" -#if defined(__SSE2__) - , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" -#endif - ); -} -#endif // HAS_ARGBBLENDROW_SSE2 - -#ifdef HAS_ARGBBLENDROW1_SSE2 -// Blend 1 pixel at a time, unaligned -void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - asm volatile ( - "pcmpeqb %%xmm7,%%xmm7 \n" - "psrlw $0xf,%%xmm7 \n" - "pcmpeqb %%xmm6,%%xmm6 \n" - "psrlw $0x8,%%xmm6 \n" - "pcmpeqb %%xmm5,%%xmm5 \n" - "psllw $0x8,%%xmm5 \n" - "pcmpeqb %%xmm4,%%xmm4 \n" - "pslld $0x18,%%xmm4 \n" - - // 1 pixel loop - ".p2align 4 \n" - "1: \n" + // 1 pixel loop until destination pointer is aligned. + "10: \n" + "test $0xf,%2 \n" + "je 19f \n" "movd (%0),%%xmm3 \n" "lea 0x4(%0),%0 \n" "movdqa %%xmm3,%%xmm0 \n" @@ -2508,11 +2433,96 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, "sub $0x1,%3 \n" "movd %%xmm0,(%2) \n" "lea 0x4(%2),%2 \n" - "jg 1b \n" - : "+r"(src_argb0), // %0 - "+r"(src_argb1), // %1 - "+r"(dst_argb), // %2 - "+r"(width) // %3 + "jge 10b \n" + + "19: \n" + "add $1-$4,%3 \n" + "jl 49f \n" + + // 8 pixel loop. + ".p2align 2 \n" + "41: \n" + "movdqu (%0),%%xmm3 \n" + "movdqa %%xmm3,%%xmm0 \n" + "pxor %%xmm4,%%xmm3 \n" + "psrlw $0x8,%%xmm3 \n" + "pshufhw $0xf5,%%xmm3,%%xmm3 \n" + "pshuflw $0xf5,%%xmm3,%%xmm3 \n" + "movdqu (%1),%%xmm2 \n" + "pand %%xmm6,%%xmm2 \n" + "paddw %%xmm7,%%xmm3 \n" + "pmullw %%xmm3,%%xmm2 \n" + "movdqu (%1),%%xmm1 \n" + "psrlw $0x8,%%xmm1 \n" + "por %%xmm4,%%xmm0 \n" + "pmullw %%xmm3,%%xmm1 \n" + "movdqu 0x10(%0),%%xmm3 \n" + "lea 0x20(%0),%0 \n" + "psrlw $0x8,%%xmm2 \n" + "paddusb %%xmm2,%%xmm0 \n" + "pand %%xmm5,%%xmm1 \n" + "paddusb %%xmm1,%%xmm0 \n" + "sub $0x4,%3 \n" + "movdqa %%xmm0,(%2) \n" + "jl 49f \n" + "movdqa %%xmm3,%%xmm0 \n" + "pxor %%xmm4,%%xmm3 \n" + "movdqu 0x10(%1),%%xmm2 \n" + "psrlw $0x8,%%xmm3 \n" + "pshufhw $0xf5,%%xmm3,%%xmm3 \n" + "pshuflw $0xf5,%%xmm3,%%xmm3 \n" + "pand %%xmm6,%%xmm2 \n" + "paddw %%xmm7,%%xmm3 \n" + "pmullw %%xmm3,%%xmm2 \n" + "movdqu 0x10(%1),%%xmm1 \n" + "lea 0x20(%1),%1 \n" + "psrlw $0x8,%%xmm1 \n" + "por %%xmm4,%%xmm0 \n" + "pmullw %%xmm3,%%xmm1 \n" + "psrlw $0x8,%%xmm2 \n" + "paddusb %%xmm2,%%xmm0 \n" + "pand %%xmm5,%%xmm1 \n" + "paddusb %%xmm1,%%xmm0 \n" + "sub $0x4,%3 \n" + "movdqa %%xmm0,0x10(%2) \n" + "lea 0x20(%2),%2 \n" + "jge 41b \n" + + "49: \n" + "add $0x3,%3 \n" + "jl 99f \n" + + // 1 pixel loop. + "91: \n" + "movd (%0),%%xmm3 \n" + "lea 0x4(%0),%0 \n" + "movdqa %%xmm3,%%xmm0 \n" + "pxor %%xmm4,%%xmm3 \n" + "movd (%1),%%xmm2 \n" + "psrlw $0x8,%%xmm3 \n" + "pshufhw $0xf5,%%xmm3,%%xmm3 \n" + "pshuflw $0xf5,%%xmm3,%%xmm3 \n" + "pand %%xmm6,%%xmm2 \n" + "paddw %%xmm7,%%xmm3 \n" + "pmullw %%xmm3,%%xmm2 \n" + "movd (%1),%%xmm1 \n" + "lea 0x4(%1),%1 \n" + "psrlw $0x8,%%xmm1 \n" + "por %%xmm4,%%xmm0 \n" + "pmullw %%xmm3,%%xmm1 \n" + "psrlw $0x8,%%xmm2 \n" + "paddusb %%xmm2,%%xmm0 \n" + "pand %%xmm5,%%xmm1 \n" + "paddusb %%xmm1,%%xmm0 \n" + "sub $0x1,%3 \n" + "movd %%xmm0,(%2) \n" + "lea 0x4(%2),%2 \n" + "jge 91b \n" + "99: \n" + : "+r"(src_argb0), // %0 + "+r"(src_argb1), // %1 + "+r"(dst_argb), // %2 + "+r"(width) // %3 : : "memory", "cc" #if defined(__SSE2__) @@ -2520,16 +2530,27 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, #endif ); } -#endif // HAS_ARGBBLENDROW1_SSE2 +#endif // HAS_ARGBBLENDROW_SSE2 #ifdef HAS_ARGBBLENDROW_SSSE3 -// Shuffle table for reversing the bytes. +// Shuffle table for isolating alpha. CONST uvec8 kShuffleAlpha = { 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80, 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80 }; -void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { + +// Blend 8 pixels at a time +// Shuffle table for reversing the bytes. + +// Same as SSE2, but replaces +// psrlw xmm3, 8 // alpha +// pshufhw xmm3, xmm3,0F5h // 8 alpha words +// pshuflw xmm3, xmm3,0F5h +// with.. +// pshufb xmm3, kShuffleAlpha // alpha + +void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, + uint8* dst_argb, int width) { asm volatile ( "pcmpeqb %%xmm7,%%xmm7 \n" "psrlw $0xf,%%xmm7 \n" @@ -2539,10 +2560,44 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, "psllw $0x8,%%xmm5 \n" "pcmpeqb %%xmm4,%%xmm4 \n" "pslld $0x18,%%xmm4 \n" + "sub $0x1,%3 \n" + "je 91f \n" + "jl 99f \n" - // 8 pixel loop - ".p2align 4 \n" - "1: \n" + // 1 pixel loop until destination pointer is aligned. + "10: \n" + "test $0xf,%2 \n" + "je 19f \n" + "movd (%0),%%xmm3 \n" + "lea 0x4(%0),%0 \n" + "movdqa %%xmm3,%%xmm0 \n" + "pxor %%xmm4,%%xmm3 \n" + "movd (%1),%%xmm2 \n" + "pshufb %4,%%xmm3 \n" + "pand %%xmm6,%%xmm2 \n" + "paddw %%xmm7,%%xmm3 \n" + "pmullw %%xmm3,%%xmm2 \n" + "movd (%1),%%xmm1 \n" + "lea 0x4(%1),%1 \n" + "psrlw $0x8,%%xmm1 \n" + "por %%xmm4,%%xmm0 \n" + "pmullw %%xmm3,%%xmm1 \n" + "psrlw $0x8,%%xmm2 \n" + "paddusb %%xmm2,%%xmm0 \n" + "pand %%xmm5,%%xmm1 \n" + "paddusb %%xmm1,%%xmm0 \n" + "sub $0x1,%3 \n" + "movd %%xmm0,(%2) \n" + "lea 0x4(%2),%2 \n" + "jge 10b \n" + + "19: \n" + "add $1-$4,%3 \n" + "jl 49f \n" + + // 8 pixel loop. + ".p2align 2 \n" + "41: \n" "movdqu (%0),%%xmm3 \n" "movdqa %%xmm3,%%xmm0 \n" "pxor %%xmm4,%%xmm3 \n" @@ -2558,12 +2613,12 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, "movdqu 0x10(%0),%%xmm3 \n" "lea 0x20(%0),%0 \n" "psrlw $0x8,%%xmm2 \n" - "paddusb %%xmm2,%%xmm0 \n" + "paddusb %%xmm2,%%xmm0 \n" "pand %%xmm5,%%xmm1 \n" - "paddusb %%xmm1,%%xmm0 \n" + "paddusb %%xmm1,%%xmm0 \n" "sub $0x4,%3 \n" "movdqa %%xmm0,(%2) \n" - "jle 9f \n" + "jl 49f \n" "movdqa %%xmm3,%%xmm0 \n" "pxor %%xmm4,%%xmm3 \n" "movdqu 0x10(%1),%%xmm2 \n" @@ -2583,8 +2638,37 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, "sub $0x4,%3 \n" "movdqa %%xmm0,0x10(%2) \n" "lea 0x20(%2),%2 \n" - "jg 1b \n" - "9: \n" + "jge 41b \n" + + "49: \n" + "add $0x3,%3 \n" + "jl 99f \n" + + // 1 pixel loop. + "91: \n" + "movd (%0),%%xmm3 \n" + "lea 0x4(%0),%0 \n" + "movdqa %%xmm3,%%xmm0 \n" + "pxor %%xmm4,%%xmm3 \n" + "movd (%1),%%xmm2 \n" + "pshufb %4,%%xmm3 \n" + "pand %%xmm6,%%xmm2 \n" + "paddw %%xmm7,%%xmm3 \n" + "pmullw %%xmm3,%%xmm2 \n" + "movd (%1),%%xmm1 \n" + "lea 0x4(%1),%1 \n" + "psrlw $0x8,%%xmm1 \n" + "por %%xmm4,%%xmm0 \n" + "pmullw %%xmm3,%%xmm1 \n" + "psrlw $0x8,%%xmm2 \n" + "paddusb %%xmm2,%%xmm0 \n" + "pand %%xmm5,%%xmm1 \n" + "paddusb %%xmm1,%%xmm0 \n" + "sub $0x1,%3 \n" + "movd %%xmm0,(%2) \n" + "lea 0x4(%2),%2 \n" + "jge 91b \n" + "99: \n" : "+r"(src_argb0), // %0 "+r"(src_argb1), // %1 "+r"(dst_argb), // %2 @@ -2598,59 +2682,6 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, } #endif // HAS_ARGBBLENDROW_SSSE3 - -#ifdef HAS_ARGBBLENDROW1_SSSE3 -// Blend 1 pixel at a time, unaligned -void ARGBBlendRow1_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - asm volatile ( - "pcmpeqb %%xmm7,%%xmm7 \n" - "psrlw $0xf,%%xmm7 \n" - "pcmpeqb %%xmm6,%%xmm6 \n" - "psrlw $0x8,%%xmm6 \n" - "pcmpeqb %%xmm5,%%xmm5 \n" - "psllw $0x8,%%xmm5 \n" - "pcmpeqb %%xmm4,%%xmm4 \n" - "pslld $0x18,%%xmm4 \n" - - // 1 pixel loop - ".p2align 4 \n" - "1: \n" - "movd (%0),%%xmm3 \n" - "lea 0x4(%0),%0 \n" - "movdqa %%xmm3,%%xmm0 \n" - "pxor %%xmm4,%%xmm3 \n" - "movd (%1),%%xmm2 \n" - "pshufb %4,%%xmm3 \n" - "pand %%xmm6,%%xmm2 \n" - "paddw %%xmm7,%%xmm3 \n" - "pmullw %%xmm3,%%xmm2 \n" - "movd (%1),%%xmm1 \n" - "lea 0x4(%1),%1 \n" - "psrlw $0x8,%%xmm1 \n" - "por %%xmm4,%%xmm0 \n" - "pmullw %%xmm3,%%xmm1 \n" - "psrlw $0x8,%%xmm2 \n" - "paddusb %%xmm2,%%xmm0 \n" - "pand %%xmm5,%%xmm1 \n" - "paddusb %%xmm1,%%xmm0 \n" - "sub $0x1,%3 \n" - "movd %%xmm0,(%2) \n" - "lea 0x4(%2),%2 \n" - "jg 1b \n" - : "+r"(src_argb0), // %0 - "+r"(src_argb1), // %1 - "+r"(dst_argb), // %2 - "+r"(width) // %3 - : "m"(kShuffleAlpha) // %4 - : "memory", "cc" -#if defined(__SSE2__) - , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" -#endif - ); -} -#endif // HAS_ARGBBLENDROW1_SSSE3 - #ifdef HAS_ARGBATTENUATE_SSE2 // Attenuate 4 pixels at a time. // aligned to 16 bytes diff --git a/source/row_win.cc b/source/row_win.cc index 30735a91b..88bc808bc 100644 --- a/source/row_win.cc +++ b/source/row_win.cc @@ -1252,8 +1252,8 @@ static const vec16 kUVBiasR = { BR, BR, BR, BR, BR, BR, BR, BR }; // Read 8 UV from 411 #define READYUV444 __asm { \ - __asm movq xmm0, qword ptr [esi] /* U */ \ - __asm movq xmm1, qword ptr [esi + edi] /* V */ \ + __asm movq xmm0, qword ptr [esi] /* U */ /* NOLINT */ \ + __asm movq xmm1, qword ptr [esi + edi] /* V */ /* NOLINT */ \ __asm lea esi, [esi + 8] \ __asm punpcklbw xmm0, xmm1 /* UV */ \ } @@ -1279,7 +1279,7 @@ static const vec16 kUVBiasR = { BR, BR, BR, BR, BR, BR, BR, BR }; // Read 4 UV from NV12, upsample to 8 UV #define READNV12 __asm { \ - __asm movq xmm0, qword ptr [esi] /* UV */ \ + __asm movq xmm0, qword ptr [esi] /* UV */ /* NOLINT */ \ __asm lea esi, [esi + 8] \ __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \ } @@ -2478,13 +2478,9 @@ void UYVYToUVRow_Unaligned_SSE2(const uint8* src_uyvy, int stride_uyvy, #ifdef HAS_ARGBBLENDROW_SSE2 // Blend 8 pixels at a time. -// src_argb0 unaligned. -// src_argb1 and dst_argb aligned to 16 bytes. -// width must be multiple of 4 pixels. -// TODO(fbarchard): handle less than 4 pixels and unaligned pointer __declspec(naked) __declspec(align(16)) -void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { +void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, + uint8* dst_argb, int width) { __asm { push esi mov eax, [esp + 4 + 4] // src_argb0 @@ -2500,18 +2496,56 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, pcmpeqb xmm4, xmm4 // generate mask 0xff000000 pslld xmm4, 24 - align 16 - convertloop: - movdqu xmm3, [eax] + sub ecx, 1 + je convertloop1 // only 1 pixel? + jl convertloop1b + + // 1 pixel loop until destination pointer is aligned. + alignloop1: + test edx, 15 // aligned? + je alignloop1b + movd xmm3, [eax] + lea eax, [eax + 4] movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha - movdqu xmm2, [esi] // _r_b + movd xmm2, [esi] // _r_b psrlw xmm3, 8 // alpha pshufhw xmm3, xmm3,0F5h // 8 alpha words pshuflw xmm3, xmm3,0F5h pand xmm2, xmm6 // _r_b paddw xmm3, xmm7 // 256 - alpha pmullw xmm2, xmm3 // _r_b * alpha + movd xmm1, [esi] // _a_g + lea esi, [esi + 4] + psrlw xmm1, 8 // _a_g + por xmm0, xmm4 // set alpha to 255 + pmullw xmm1, xmm3 // _a_g * alpha + psrlw xmm2, 8 // _r_b convert to 8 bits again + paddusb xmm0, xmm2 // + src argb + pand xmm1, xmm5 // a_g_ convert to 8 bits again + paddusb xmm0, xmm1 // + src argb + sub ecx, 1 + movd [edx], xmm0 + lea edx, [edx + 4] + jge alignloop1 + + alignloop1b: + add ecx, 1 - 4 + jl convertloop4b + + // 8 pixel loop. + align 4 + convertloop4: + movdqu xmm3, [eax] + movdqa xmm0, xmm3 // src argb + pxor xmm3, xmm4 // ~alpha + psrlw xmm3, 8 // alpha + pshufhw xmm3, xmm3,0F5h // 8 alpha words + pshuflw xmm3, xmm3,0F5h + movdqu xmm2, [esi] // _r_b + pand xmm2, xmm6 // _r_b + paddw xmm3, xmm7 // 256 - alpha + pmullw xmm2, xmm3 // _r_b * alpha movdqu xmm1, [esi] // _a_g psrlw xmm1, 8 // _a_g por xmm0, xmm4 // set alpha to 255 @@ -2524,7 +2558,7 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, paddusb xmm0, xmm1 // + src argb sub ecx, 4 movdqa [edx], xmm0 - jle done + jl convertloop4b movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha @@ -2547,19 +2581,62 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, sub ecx, 4 movdqa [edx + 16], xmm0 lea edx, [edx + 32] - jg convertloop + jge convertloop4 - done: + convertloop4b: + add ecx, 4 - 1 + jl convertloop1b + + // 1 pixel loop. + convertloop1: + movd xmm3, [eax] + lea eax, [eax + 4] + movdqa xmm0, xmm3 // src argb + pxor xmm3, xmm4 // ~alpha + movd xmm2, [esi] // _r_b + psrlw xmm3, 8 // alpha + pshufhw xmm3, xmm3,0F5h // 8 alpha words + pshuflw xmm3, xmm3,0F5h + pand xmm2, xmm6 // _r_b + paddw xmm3, xmm7 // 256 - alpha + pmullw xmm2, xmm3 // _r_b * alpha + movd xmm1, [esi] // _a_g + lea esi, [esi + 4] + psrlw xmm1, 8 // _a_g + por xmm0, xmm4 // set alpha to 255 + pmullw xmm1, xmm3 // _a_g * alpha + psrlw xmm2, 8 // _r_b convert to 8 bits again + paddusb xmm0, xmm2 // + src argb + pand xmm1, xmm5 // a_g_ convert to 8 bits again + paddusb xmm0, xmm1 // + src argb + sub ecx, 1 + movd [edx], xmm0 + lea edx, [edx + 4] + jge convertloop1 + + convertloop1b: pop esi ret } } #endif // HAS_ARGBBLENDROW_SSE2 -#ifdef HAS_ARGBBLENDROW1_SSE2 -// Blend 1 pixel at a time, unaligned. +#ifdef HAS_ARGBBLENDROW_SSSE3 +// Shuffle table for isolating alpha. +static const uvec8 kShuffleAlpha = { + 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80, + 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80 +}; +// Same as SSE2, but replaces +// psrlw xmm3, 8 // alpha +// pshufhw xmm3, xmm3,0F5h // 8 alpha words +// pshuflw xmm3, xmm3,0F5h +// with.. +// pshufb xmm3, kShuffleAlpha // alpha +// Blend 8 pixels at a time + __declspec(naked) __declspec(align(16)) -void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, +void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { __asm { push esi @@ -2576,16 +2653,20 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, pcmpeqb xmm4, xmm4 // generate mask 0xff000000 pslld xmm4, 24 - align 16 - convertloop: + sub ecx, 1 + je convertloop1 // only 1 pixel? + jl convertloop1b + + // 1 pixel loop until destination pointer is aligned. + alignloop1: + test edx, 15 // aligned? + je alignloop1b movd xmm3, [eax] lea eax, [eax + 4] movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha movd xmm2, [esi] // _r_b - psrlw xmm3, 8 // alpha - pshufhw xmm3, xmm3,0F5h // 8 alpha words - pshuflw xmm3, xmm3,0F5h + pshufb xmm3, kShuffleAlpha // alpha pand xmm2, xmm6 // _r_b paddw xmm3, xmm7 // 256 - alpha pmullw xmm2, xmm3 // _r_b * alpha @@ -2601,52 +2682,15 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, sub ecx, 1 movd [edx], xmm0 lea edx, [edx + 4] - jg convertloop + jge alignloop1 - pop esi - ret - } -} -#endif // HAS_ARGBBLENDROW1_SSE2 + alignloop1b: + add ecx, 1 - 4 + jl convertloop4b -#ifdef HAS_ARGBBLENDROW_SSSE3 -// Shuffle table for reversing the bytes. -static const uvec8 kShuffleAlpha = { - 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80, - 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80 -}; - -// Blend 8 pixels at a time -// Shuffle table for reversing the bytes. - -// Same as SSE2, but replaces -// psrlw xmm3, 8 // alpha -// pshufhw xmm3, xmm3,0F5h // 8 alpha words -// pshuflw xmm3, xmm3,0F5h -// with.. -// pshufb xmm3, kShuffleAlpha // alpha - -// Destination aligned to 16 bytes, multiple of 4 pixels. -__declspec(naked) __declspec(align(16)) -void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - __asm { - push esi - mov eax, [esp + 4 + 4] // src_argb0 - mov esi, [esp + 4 + 8] // src_argb1 - mov edx, [esp + 4 + 12] // dst_argb - mov ecx, [esp + 4 + 16] // width - pcmpeqb xmm7, xmm7 // generate constant 1 - psrlw xmm7, 15 - pcmpeqb xmm6, xmm6 // generate mask 0x00ff00ff - psrlw xmm6, 8 - pcmpeqb xmm5, xmm5 // generate mask 0xff00ff00 - psllw xmm5, 8 - pcmpeqb xmm4, xmm4 // generate mask 0xff000000 - pslld xmm4, 24 - - align 16 - convertloop: + // 8 pixel loop. + align 4 + convertloop4: movdqu xmm3, [eax] movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha @@ -2667,7 +2711,7 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, paddusb xmm0, xmm1 // + src argb sub ecx, 4 movdqa [edx], xmm0 - jle done + jl convertloop4b movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha @@ -2688,37 +2732,14 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, sub ecx, 4 movdqa [edx + 16], xmm0 lea edx, [edx + 32] - jg convertloop + jge convertloop4 - done: - pop esi - ret - } -} -#endif // HAS_ARGBBLENDROW_SSSE3 + convertloop4b: + add ecx, 4 - 1 + jl convertloop1b -#ifdef HAS_ARGBBLENDROW1_SSSE3 -// Blend 1 pixel at a time, unaligned. -__declspec(naked) __declspec(align(16)) -void ARGBBlendRow1_SSSE3(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - __asm { - push esi - mov eax, [esp + 4 + 4] // src_argb0 - mov esi, [esp + 4 + 8] // src_argb1 - mov edx, [esp + 4 + 12] // dst_argb - mov ecx, [esp + 4 + 16] // width - pcmpeqb xmm7, xmm7 // generate constant 1 - psrlw xmm7, 15 - pcmpeqb xmm6, xmm6 // generate mask 0x00ff00ff - psrlw xmm6, 8 - pcmpeqb xmm5, xmm5 // generate mask 0xff00ff00 - psllw xmm5, 8 - pcmpeqb xmm4, xmm4 // generate mask 0xff000000 - pslld xmm4, 24 - - align 16 - convertloop: + // 1 pixel loop. + convertloop1: movd xmm3, [eax] lea eax, [eax + 4] movdqa xmm0, xmm3 // src argb @@ -2740,13 +2761,14 @@ void ARGBBlendRow1_SSSE3(const uint8* src_argb0, const uint8* src_argb1, sub ecx, 1 movd [edx], xmm0 lea edx, [edx + 4] - jg convertloop + jge convertloop1 + convertloop1b: pop esi ret } } -#endif // HAS_ARGBBLENDROW1_SSSE3 +#endif // HAS_ARGBBLENDROW_SSSE3 #ifdef HAS_ARGBATTENUATE_SSE2 // Attenuate 4 pixels at a time.