diff --git a/README.chromium b/README.chromium index 5a5e728d9..59b47dc93 100644 --- a/README.chromium +++ b/README.chromium @@ -1,6 +1,6 @@ Name: libyuv URL: http://code.google.com/p/libyuv/ -Version: 669 +Version: 670 License File: LICENSE Description: diff --git a/include/libyuv/version.h b/include/libyuv/version.h index dd1da19a4..a784b222b 100644 --- a/include/libyuv/version.h +++ b/include/libyuv/version.h @@ -11,6 +11,6 @@ #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT #define INCLUDE_LIBYUV_VERSION_H_ -#define LIBYUV_VERSION 669 +#define LIBYUV_VERSION 670 #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT diff --git a/source/planar_functions.cc b/source/planar_functions.cc index a096d74ab..2133f4e2b 100644 --- a/source/planar_functions.cc +++ b/source/planar_functions.cc @@ -558,10 +558,7 @@ int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0, void (*ARGBMultiplyRow)(const uint8* src0, const uint8* src1, uint8* dst, int width) = ARGBMultiplyRow_C; #if defined(HAS_ARGBMULTIPLYROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && width >= 4 && - IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) && - IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) && - IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { + if (TestCpuFlag(kCpuHasSSE2) && width >= 4) { ARGBMultiplyRow = ARGBMultiplyRow_Any_SSE2; if (IS_ALIGNED(width, 4)) { ARGBMultiplyRow = ARGBMultiplyRow_SSE2; @@ -622,11 +619,13 @@ int ARGBAdd(const uint8* src_argb0, int src_stride_argb0, void (*ARGBAddRow)(const uint8* src0, const uint8* src1, uint8* dst, int width) = ARGBAddRow_C; -#if defined(HAS_ARGBADDROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && width >= 4 && - IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) && - IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) && - IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { +#if defined(HAS_ARGBADDROW_SSE2) && defined(_MSC_VER) + if (TestCpuFlag(kCpuHasSSE2)) { + ARGBAddRow = ARGBAddRow_SSE2; + } +#endif +#if defined(HAS_ARGBADDROW_SSE2) && !defined(_MSC_VER) + if (TestCpuFlag(kCpuHasSSE2) && width >= 4) { ARGBAddRow = ARGBAddRow_Any_SSE2; if (IS_ALIGNED(width, 4)) { ARGBAddRow = ARGBAddRow_SSE2; @@ -688,10 +687,7 @@ int ARGBSubtract(const uint8* src_argb0, int src_stride_argb0, void (*ARGBSubtractRow)(const uint8* src0, const uint8* src1, uint8* dst, int width) = ARGBSubtractRow_C; #if defined(HAS_ARGBSUBTRACTROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && width >= 4 && - IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) && - IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) && - IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { + if (TestCpuFlag(kCpuHasSSE2) && width >= 4) { ARGBSubtractRow = ARGBSubtractRow_Any_SSE2; if (IS_ALIGNED(width, 4)) { ARGBSubtractRow = ARGBSubtractRow_SSE2; diff --git a/source/row_common.cc b/source/row_common.cc index e336d6557..34ec3f2dd 100644 --- a/source/row_common.cc +++ b/source/row_common.cc @@ -732,7 +732,14 @@ void ARGBMultiplyRow_C(const uint8* src_argb0, const uint8* src_argb1, #undef REPEAT8 #undef SHADE -#define SHADE(f, v) ((v + f) > 255) ? 255 : (v + f) +#ifdef __llvm__ +#define min0(v) ((-(v) >> 31) & (v)) +#define max255(v) (((256 - (v)) >> 31) | (v)) +#else +#define min0(v) (((v) < 0) ? 0 : v) +#define max255(v) (((v) > 255) ? 255 : (v)) +#endif +#define SHADE(f, v) max255(v + f) void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { @@ -756,7 +763,7 @@ void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1, } #undef SHADE -#define SHADE(f, v) ((f - v) < 0) ? 0 : (f - v) +#define SHADE(f, v) min0(f - v) void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { @@ -779,6 +786,8 @@ void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1, } } #undef SHADE +#undef min0 +#undef max255 // Sobel functions which mimics SSSE3. void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2, diff --git a/source/row_posix.cc b/source/row_posix.cc index 59979a7ff..6acdba9ce 100644 --- a/source/row_posix.cc +++ b/source/row_posix.cc @@ -4201,7 +4201,6 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width, #ifdef HAS_ARGBMULTIPLYROW_SSE2 // Multiply 2 rows of ARGB pixels together, 4 pixels at a time. -// Aligned to 16 bytes. void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -4212,10 +4211,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, // 4 pixel loop. ".p2align 4 \n" "1: \n" - "movdqa (%0),%%xmm0 \n" - "movdqa (%0,%1),%%xmm2 \n" - "movdqa %%xmm0,%%xmm1 \n" - "movdqa %%xmm2,%%xmm3 \n" + "movdqu (%0),%%xmm0 \n" + "movdqu (%0,%1),%%xmm2 \n" + "movdqu %%xmm0,%%xmm1 \n" + "movdqu %%xmm2,%%xmm3 \n" "punpcklbw %%xmm0,%%xmm0 \n" "punpckhbw %%xmm1,%%xmm1 \n" "punpcklbw %%xmm5,%%xmm2 \n" @@ -4224,7 +4223,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, "pmulhuw %%xmm3,%%xmm1 \n" "packuswb %%xmm1,%%xmm0 \n" "sub $0x4,%3 \n" - "movdqa %%xmm0,(%0,%2,1) \n" + "movdqu %%xmm0,(%0,%2,1) \n" "lea 0x10(%0),%0 \n" "jg 1b \n" : "+r"(src_argb0), // %0 @@ -4242,7 +4241,6 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, #ifdef HAS_ARGBADDROW_SSE2 // Add 2 rows of ARGB pixels together, 4 pixels at a time. -// Aligned to 16 bytes. void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -4252,11 +4250,11 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, // 4 pixel loop. ".p2align 4 \n" "1: \n" - "movdqa (%0),%%xmm0 \n" - "movdqa (%0,%1),%%xmm1 \n" + "movdqu (%0),%%xmm0 \n" + "movdqu (%0,%1),%%xmm1 \n" "paddusb %%xmm1,%%xmm0 \n" "sub $0x4,%3 \n" - "movdqa %%xmm0,(%0,%2,1) \n" + "movdqu %%xmm0,(%0,%2,1) \n" "lea 0x10(%0),%0 \n" "jg 1b \n" : "+r"(src_argb0), // %0 @@ -4274,7 +4272,6 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, #ifdef HAS_ARGBSUBTRACTROW_SSE2 // Subtract 2 rows of ARGB pixels, 4 pixels at a time. -// Aligned to 16 bytes. void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -4284,11 +4281,11 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, // 4 pixel loop. ".p2align 4 \n" "1: \n" - "movdqa (%0),%%xmm0 \n" - "movdqa (%0,%1),%%xmm1 \n" + "movdqu (%0),%%xmm0 \n" + "movdqu (%0,%1),%%xmm1 \n" "psubusb %%xmm1,%%xmm0 \n" "sub $0x4,%3 \n" - "movdqa %%xmm0,(%0,%2,1) \n" + "movdqu %%xmm0,(%0,%2,1) \n" "lea 0x10(%0),%0 \n" "jg 1b \n" : "+r"(src_argb0), // %0 diff --git a/source/row_win.cc b/source/row_win.cc index 3e3c2666d..59a58d726 100644 --- a/source/row_win.cc +++ b/source/row_win.cc @@ -5223,7 +5223,6 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width, #ifdef HAS_ARGBMULTIPLYROW_SSE2 // Multiply 2 rows of ARGB pixels together, 4 pixels at a time. -// Aligned to 16 bytes. __declspec(naked) __declspec(align(16)) void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { @@ -5239,10 +5238,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, align 16 convertloop: - movdqa xmm0, [eax] // read 4 pixels from src_argb0 - movdqa xmm2, [eax + esi] // read 4 pixels from src_argb1 - movdqa xmm1, xmm0 - movdqa xmm3, xmm2 + movdqu xmm0, [eax] // read 4 pixels from src_argb0 + movdqu xmm2, [eax + esi] // read 4 pixels from src_argb1 + movdqu xmm1, xmm0 + movdqu xmm3, xmm2 punpcklbw xmm0, xmm0 // first 2 punpckhbw xmm1, xmm1 // next 2 punpcklbw xmm2, xmm5 // first 2 @@ -5251,7 +5250,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, pmulhuw xmm1, xmm3 // src_argb0 * src_argb1 next 2 packuswb xmm0, xmm1 sub ecx, 4 - movdqa [eax + edx], xmm0 + movdqu [eax + edx], xmm0 lea eax, [eax + 16] jg convertloop @@ -5263,7 +5262,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, #ifdef HAS_ARGBADDROW_SSE2 // Add 2 rows of ARGB pixels together, 4 pixels at a time. -// Aligned to 16 bytes. +// TODO(fbarchard): Port this to posix, neon and other math functions. __declspec(naked) __declspec(align(16)) void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { @@ -5273,20 +5272,36 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, mov esi, [esp + 4 + 8] // src_argb1 mov edx, [esp + 4 + 12] // dst_argb mov ecx, [esp + 4 + 16] // width - pxor xmm5, xmm5 // constant 0 sub esi, eax sub edx, eax + sub ecx, 4 + jl convertloop49 + align 16 - convertloop: - movdqa xmm0, [eax] // read 4 pixels from src_argb0 - movdqa xmm1, [eax + esi] // read 4 pixels from src_argb1 + convertloop4: + movdqu xmm0, [eax] // read 4 pixels from src_argb0 + movdqu xmm1, [eax + esi] // read 4 pixels from src_argb1 paddusb xmm0, xmm1 // src_argb0 + src_argb1 sub ecx, 4 - movdqa [eax + edx], xmm0 + movdqu [eax + edx], xmm0 lea eax, [eax + 16] - jg convertloop + jge convertloop4 + convertloop49: + add ecx, 4 - 1 + jl convertloop19 + + convertloop1: + movd xmm0, [eax] // read 1 pixels from src_argb0 + movd xmm1, [eax + esi] // read 1 pixels from src_argb1 + paddusb xmm0, xmm1 // src_argb0 + src_argb1 + sub ecx, 1 + movd [eax + edx], xmm0 + lea eax, [eax + 4] + jge convertloop1 + + convertloop19: pop esi ret } @@ -5295,7 +5310,6 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, #ifdef HAS_ARGBSUBTRACTROW_SSE2 // Subtract 2 rows of ARGB pixels together, 4 pixels at a time. -// Aligned to 16 bytes. __declspec(naked) __declspec(align(16)) void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { @@ -5310,11 +5324,11 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, align 16 convertloop: - movdqa xmm0, [eax] // read 4 pixels from src_argb0 - movdqa xmm1, [eax + esi] // read 4 pixels from src_argb1 + movdqu xmm0, [eax] // read 4 pixels from src_argb0 + movdqu xmm1, [eax + esi] // read 4 pixels from src_argb1 psubusb xmm0, xmm1 // src_argb0 - src_argb1 sub ecx, 4 - movdqa [eax + edx], xmm0 + movdqu [eax + edx], xmm0 lea eax, [eax + 16] jg convertloop @@ -5373,7 +5387,6 @@ void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, mov esi, [esp + 4 + 8] // src_argb1 mov edx, [esp + 4 + 12] // dst_argb mov ecx, [esp + 4 + 16] // width - vpxor ymm5, ymm5, ymm5 // constant 0 sub esi, eax sub edx, eax