/* * Copyright (c) 2011 The LibYuv project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "libyuv/compare.h" #include #include #ifdef _OPENMP #include #endif #include "libyuv/basic_types.h" #include "libyuv/cpu_id.h" #include "source/row.h" #ifdef __cplusplus namespace libyuv { extern "C" { #endif // hash seed of 5381 recommended. // Internal C version of HashDjb2 with int sized count for efficiency. static uint32 HashDjb2_C(const uint8* src, int count, uint32 seed) { uint32 hash = seed; for (int i = 0; i < count; ++i) { hash += (hash << 5) + src[i]; } return hash; } // This module is for Visual C x86 #if !defined(YUV_DISABLE_ASM) && defined(_M_IX86) #define HAS_HASHDJB2_SSE41 static const vec32 kMulL33 = { 0xEC41D4E1, // 33 * 33 * 33 * 33 * 33 * 33 * 33 33 * 33 * 33 * 33 * 33 * 33, 33 * 33 * 33 * 33 * 33, 33 * 33 * 33 * 33 * 1 }; static const vec32 kMulH33 = { 33 * 33 * 33, 33 * 33, 33, 1 }; static const vec32 kHash4x33 = { 33 * 33 * 33 * 33, 0, 0, 0 }; static const vec32 kHash8x33 = { 0x747C7101, // 33 * 33 * 33 * 33 * 33 * 33 * 33 * 33, 0, 0, 0 }; // hash0 = initial state // hash1 = hash0 * 33 + src[0] // hash2 = hash1 * 33 + src[1] = (hash0 * 33 + src[0]) * 33 + src[1] // hash3 = hash2 * 33 + src[2] = (hash1 * 33 + src[1]) * 33 + src[2] = // ((hash0 * 33 + src[0]) * 33 + src[1]) * 33 + src[2] // hash4 = hash3 * 33 + src[3] = (hash2 * 33 + src[2]) * 33 + src[3] = // ((hash1 * 33 + src[1]) * 33 + src[2]) * 33 + src[3] = // (((hash0 * 33 + src[0]) * 33 + src[1]) * 33 + src[2]) * 33 + src[3] // movzxbd xmm1, [eax] // SSE4.1 requires VS2010 // pmulld requires Studio2008 // does 8 at a time, unaligned __declspec(naked) __declspec(align(16)) static uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) { __asm { mov eax, [esp + 4] // src mov ecx, [esp + 8] // count movd xmm0, [esp + 12] // seed pxor xmm7, xmm7 // constant 0 for unpck movdqa xmm4, kHash8x33 movdqa xmm5, kMulL33 movdqa xmm6, kMulH33 align 16 wloop: movq xmm1, qword ptr [eax] // src[0-7] lea eax, [eax + 8] punpcklbw xmm1, xmm7 movdqa xmm3, xmm1 punpcklwd xmm1, xmm7 // pmulld xmm1, xmm5 _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 _asm _emit 0x40 _asm _emit 0xCD punpckhwd xmm3, xmm7 // pmulld xmm3, xmm6 _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 _asm _emit 0x40 _asm _emit 0xDE sub ecx, 8 // pmulld xmm0, xmm4 // hash *= 33 ^ 8 _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 _asm _emit 0x40 _asm _emit 0xC4 paddd xmm1, xmm3 // add 2nd 4 to first 4 pshufd xmm2, xmm1, 14 // upper 2 dwords paddd xmm1, xmm2 pshufd xmm2, xmm1, 1 paddd xmm1, xmm2 paddd xmm0, xmm1 jg wloop movd eax, xmm0 // return hash ret } } #define HAS_HASHDJB2_ALIGNED_SSE41 static const vec32 kHash16x33 = { -1831214591, 0, 0, 0 }; // 33 ^ 16 static const vec32 kHashMul0 = { 204809697, // 33 ^ 15 -1555599935, // 33 ^ 14 994064801, // 33 ^ 13 1331628417, // 33 ^ 12 }; static const vec32 kHashMul1 = { 821255521, // 33 ^ 11 -2057521855, // 33 ^ 10 67801377, // 33 ^ 9 1954312449, // 33 ^ 8 }; static const vec32 kHashMul2 = { -331229983, // 33 ^ 7 1291467969, // 33 ^ 6 39135393, // 33 ^ 5 1185921, // 33 ^ 4 }; static const vec32 kHashMul3 = { 35937, // 33 ^ 3 1089, // 33 ^ 2 33, // 33 ^ 1 1, // 33 ^ 0 }; // movzxbd xmm1, [eax] // SSE4.1 requires VS2010 // pmulld requires Studio2008 // does 16 at a time, aligned // TODO(fbarchard): For SSE2 version use pmuludq // pmulld xmm1, xmm5 // becomes // movdqa xmm2, xmm1 // pmuludq xmm1, [33*33*33, 0, 33, 0] // psrldq xmm2, 8 // pmuludq xmm2, [33*33, 0, 1, 0] // paddd xmm1, xmm2 // pshufd xmm2, xmm1, 2 // paddd xmm1, xmm2 //27: 66 0F 38 40 C6 pmulld xmm0,xmm6 //44: 66 0F 38 40 DD pmulld xmm3,xmm5 //59: 66 0F 38 40 E5 pmulld xmm4,xmm5 //72: 66 0F 38 40 D5 pmulld xmm2,xmm5 //83: 66 0F 38 40 CD pmulld xmm1,xmm5 #define pmulld(reg) _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 \ _asm _emit 0x40 _asm _emit reg __declspec(naked) __declspec(align(16)) static uint32 HashDjb2_Aligned_SSE41(const uint8* src, int count, uint32 seed) { __asm { mov eax, [esp + 4] // src mov ecx, [esp + 8] // count movd xmm0, [esp + 12] // seed pxor xmm7, xmm7 // constant 0 for unpck movdqa xmm6, kHash16x33 align 16 wloop: movdqa xmm1, [eax] // src[0-15] lea eax, [eax + 16] pmulld(0xc6) // pmulld xmm0,xmm6 hash *= 33 ^ 8 movdqa xmm5, kHashMul0 movdqa xmm2, xmm1 punpcklbw xmm2, xmm7 // src[0-7] movdqa xmm3, xmm2 punpcklwd xmm3, xmm7 // src[0-3] pmulld(0xdd) // pmulld xmm3, xmm5 movdqa xmm5, kHashMul1 movdqa xmm4, xmm2 punpckhwd xmm4, xmm7 // src[4-7] pmulld(0xe5) // pmulld xmm4, xmm5 movdqa xmm5, kHashMul2 punpckhbw xmm1, xmm7 // src[8-15] movdqa xmm2, xmm1 punpcklwd xmm2, xmm7 // src[8-11] pmulld(0xd5) // pmulld xmm2, xmm5 movdqa xmm5, kHashMul3 punpckhwd xmm1, xmm7 // src[12-15] pmulld(0xcd) // pmulld xmm1, xmm5 paddd xmm3, xmm4 // add 16 results paddd xmm1, xmm2 sub ecx, 16 paddd xmm1, xmm3 pshufd xmm2, xmm1, 14 // upper 2 dwords paddd xmm1, xmm2 pshufd xmm2, xmm1, 1 paddd xmm1, xmm2 paddd xmm0, xmm1 jg wloop movd eax, xmm0 // return hash ret } } #if 0 // This following works but is slower than movdqa version // 66 0f 38 31 08 pmovzxbd xmm1, [eax] // 66 0f 38 31 50 04 pmovzxbd xmm2, [eax + 4] // 66 0f 38 31 58 08 pmovzxbd xmm3, [eax + 8] // 66 0f 38 31 60 0c pmovzxbd xmm4, [eax + 12] #define pmovzxbd0(rmem) _asm _emit 0x66 _asm _emit 0x0f _asm _emit 0x38 \ _asm _emit 0x31 _asm _emit rmem #define pmovzxbd(rmem0, rmem1) _asm _emit 0x66 _asm _emit 0x0f _asm _emit 0x38 \ _asm _emit 0x31 _asm _emit rmem0 _asm _emit rmem1 __declspec(naked) __declspec(align(16)) static uint32 HashDjb2_Unaligned_SSE41(const uint8* src, int count, uint32 seed) { __asm { mov eax, [esp + 4] // src mov ecx, [esp + 8] // count movd xmm0, [esp + 12] // seed movdqa xmm5, kHash16x33 align 16 wloop: pmovzxbd0(0x08) // src[0-3] pmovzxbd xmm1, [eax] pmulld xmm1, kHashMul0 pmovzxbd(0x50, 0x04) // src[4-7] pmovzxbd xmm2, [eax + 4] pmulld xmm2, kHashMul1 pmovzxbd(0x58, 0x08) // src[8-11] pmovzxbd xmm3, [eax + 8] pmulld xmm3, kHashMul2 pmovzxbd(0x60, 0x0c) // src[12-15] pmovzxbd xmm4, [eax + 12] pmulld xmm4, kHashMul3 lea eax, [eax + 16] pmulld xmm0, xmm5 // hash *= 33 ^ 8 paddd xmm1, xmm2 // add 16 results paddd xmm3, xmm4 sub ecx, 16 paddd xmm1, xmm3 pshufd xmm2, xmm1, 14 // upper 2 dwords paddd xmm1, xmm2 pshufd xmm2, xmm1, 1 paddd xmm1, xmm2 paddd xmm0, xmm1 jg wloop movd eax, xmm0 // return hash ret } } #endif #endif // hash seed of 5381 recommended. uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed) { uint32 (*Hash)(const uint8* src, int count, uint32 seed) = HashDjb2_C; #if defined(HAS_HASHDJB2_SSE41) if (TestCpuFlag(kCpuHasSSE41) && IS_ALIGNED(count, 8)) { Hash = HashDjb2_SSE41; if (IS_ALIGNED(count, 16)) { Hash = HashDjb2_Aligned_SSE41; } } #endif const int kBlockSize = 1 << 15; // 32768; while (count >= static_cast(kBlockSize)) { seed = Hash(src, kBlockSize, seed); src += kBlockSize; count -= kBlockSize; } int remainder = static_cast(count) & ~15; if (remainder) { seed = Hash(src, remainder, seed); src += remainder; count -= remainder; } remainder = static_cast(count) & 15; if (remainder) { seed = HashDjb2_C(src, remainder, seed); } return seed; } #if !defined(YUV_DISABLE_ASM) && defined(__ARM_NEON__) #define HAS_SUMSQUAREERROR_NEON static uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count) { volatile uint32 sse; asm volatile ( "vmov.u8 q7, #0 \n" "vmov.u8 q9, #0 \n" "vmov.u8 q8, #0 \n" "vmov.u8 q10, #0 \n" "1: \n" "vld1.u8 {q0}, [%0]! \n" "vld1.u8 {q1}, [%1]! \n" "vsubl.u8 q2, d0, d2 \n" "vsubl.u8 q3, d1, d3 \n" "vmlal.s16 q7, d4, d4 \n" "vmlal.s16 q8, d6, d6 \n" "vmlal.s16 q8, d5, d5 \n" "vmlal.s16 q10, d7, d7 \n" "subs %2, %2, #16 \n" "bgt 1b \n" "vadd.u32 q7, q7, q8 \n" "vadd.u32 q9, q9, q10 \n" "vadd.u32 q10, q7, q9 \n" "vpaddl.u32 q1, q10 \n" "vadd.u64 d0, d2, d3 \n" "vmov.32 %3, d0[0] \n" : "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse) : : "memory", "cc", "q0", "q1", "q2", "q3", "q7", "q8", "q9", "q10"); return sse; } #elif !defined(YUV_DISABLE_ASM) && defined(_M_IX86) #define HAS_SUMSQUAREERROR_SSE2 __declspec(naked) __declspec(align(16)) static uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) { __asm { mov eax, [esp + 4] // src_a mov edx, [esp + 8] // src_b mov ecx, [esp + 12] // count pxor xmm0, xmm0 pxor xmm5, xmm5 sub edx, eax align 16 wloop: movdqa xmm1, [eax] movdqa xmm2, [eax + edx] lea eax, [eax + 16] sub ecx, 16 movdqa xmm3, xmm1 // abs trick psubusb xmm1, xmm2 psubusb xmm2, xmm3 por xmm1, xmm2 movdqa xmm2, xmm1 punpcklbw xmm1, xmm5 punpckhbw xmm2, xmm5 pmaddwd xmm1, xmm1 pmaddwd xmm2, xmm2 paddd xmm0, xmm1 paddd xmm0, xmm2 jg wloop pshufd xmm1, xmm0, 0EEh paddd xmm0, xmm1 pshufd xmm1, xmm0, 01h paddd xmm0, xmm1 movd eax, xmm0 ret } } #elif !defined(YUV_DISABLE_ASM) && (defined(__x86_64__) || defined(__i386__)) #define HAS_SUMSQUAREERROR_SSE2 static uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) { uint32 sse; asm volatile ( "pxor %%xmm0,%%xmm0 \n" "pxor %%xmm5,%%xmm5 \n" "sub %0,%1 \n" "1: \n" "movdqa (%0),%%xmm1 \n" "movdqa (%0,%1,1),%%xmm2 \n" "lea 0x10(%0),%0 \n" "sub $0x10,%2 \n" "movdqa %%xmm1,%%xmm3 \n" "psubusb %%xmm2,%%xmm1 \n" "psubusb %%xmm3,%%xmm2 \n" "por %%xmm2,%%xmm1 \n" "movdqa %%xmm1,%%xmm2 \n" "punpcklbw %%xmm5,%%xmm1 \n" "punpckhbw %%xmm5,%%xmm2 \n" "pmaddwd %%xmm1,%%xmm1 \n" "pmaddwd %%xmm2,%%xmm2 \n" "paddd %%xmm1,%%xmm0 \n" "paddd %%xmm2,%%xmm0 \n" "jg 1b \n" "pshufd $0xee,%%xmm0,%%xmm1 \n" "paddd %%xmm1,%%xmm0 \n" "pshufd $0x1,%%xmm0,%%xmm1 \n" "paddd %%xmm1,%%xmm0 \n" "movd %%xmm0,%3 \n" : "+r"(src_a), // %0 "+r"(src_b), // %1 "+r"(count), // %2 "=g"(sse) // %3 : : "memory", "cc" #if defined(__SSE2__) , "xmm0", "xmm1", "xmm2", "xmm5" #endif ); return sse; } #endif static uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count) { uint32 sse = 0u; for (int i = 0; i < count; ++i) { int diff = src_a[i] - src_b[i]; sse += static_cast(diff * diff); } return sse; } uint64 ComputeSumSquareError(const uint8* src_a, const uint8* src_b, int count) { uint32 (*SumSquareError)(const uint8* src_a, const uint8* src_b, int count) = SumSquareError_C; #if defined(HAS_SUMSQUAREERROR_NEON) if (TestCpuFlag(kCpuHasNEON)) { SumSquareError = SumSquareError_NEON; } #elif defined(HAS_SUMSQUAREERROR_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(src_a, 16) && IS_ALIGNED(src_b, 16)) { // Note only used for multiples of 16 so count is not checked. SumSquareError = SumSquareError_SSE2; } #endif // 32K values will fit a 32bit int return value from SumSquareError. // After each block of 32K, accumulate into 64 bit int. const int kBlockSize = 1 << 15; // 32768; uint64 sse = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+: sse) #endif for (int i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) { sse += SumSquareError(src_a + i, src_b + i, kBlockSize); } src_a += count & ~(kBlockSize - 1); src_b += count & ~(kBlockSize - 1); int remainder = count & (kBlockSize - 1) & ~15; if (remainder) { sse += SumSquareError(src_a, src_b, remainder); src_a += remainder; src_b += remainder; } remainder = count & 15; if (remainder) { sse += SumSquareError_C(src_a, src_b, remainder); } return sse; } uint64 ComputeSumSquareErrorPlane(const uint8* src_a, int stride_a, const uint8* src_b, int stride_b, int width, int height) { uint32 (*SumSquareError)(const uint8* src_a, const uint8* src_b, int count) = SumSquareError_C; #if defined(HAS_SUMSQUAREERROR_NEON) if (TestCpuFlag(kCpuHasNEON)) { SumSquareError = SumSquareError_NEON; } #elif defined(HAS_SUMSQUAREERROR_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 16) && IS_ALIGNED(src_a, 16) && IS_ALIGNED(stride_a, 16) && IS_ALIGNED(src_b, 16) && IS_ALIGNED(stride_b, 16)) { SumSquareError = SumSquareError_SSE2; } #endif uint64 sse = 0; for (int h = 0; h < height; ++h) { sse += SumSquareError(src_a, src_b, width); src_a += stride_a; src_b += stride_b; } return sse; } double SumSquareErrorToPsnr(uint64 sse, uint64 count) { double psnr; if (sse > 0) { double mse = static_cast(count) / static_cast(sse); psnr = 10.0 * log10(255.0 * 255.0 * mse); } else { psnr = kMaxPsnr; // Limit to prevent divide by 0 } if (psnr > kMaxPsnr) psnr = kMaxPsnr; return psnr; } double CalcFramePsnr(const uint8* src_a, int stride_a, const uint8* src_b, int stride_b, int width, int height) { const uint64 samples = width * height; const uint64 sse = ComputeSumSquareErrorPlane(src_a, stride_a, src_b, stride_b, width, height); return SumSquareErrorToPsnr(sse, samples); } double I420Psnr(const uint8* src_y_a, int stride_y_a, const uint8* src_u_a, int stride_u_a, const uint8* src_v_a, int stride_v_a, const uint8* src_y_b, int stride_y_b, const uint8* src_u_b, int stride_u_b, const uint8* src_v_b, int stride_v_b, int width, int height) { const uint64 sse_y = ComputeSumSquareErrorPlane(src_y_a, stride_y_a, src_y_b, stride_y_b, width, height); const int width_uv = (width + 1) >> 1; const int height_uv = (height + 1) >> 1; const uint64 sse_u = ComputeSumSquareErrorPlane(src_u_a, stride_u_a, src_u_b, stride_u_b, width_uv, height_uv); const uint64 sse_v = ComputeSumSquareErrorPlane(src_v_a, stride_v_a, src_v_b, stride_v_b, width_uv, height_uv); const uint64 samples = width * height + 2 * (width_uv * height_uv); const uint64 sse = sse_y + sse_u + sse_v; return SumSquareErrorToPsnr(sse, samples); } static const int64 cc1 = 26634; // (64^2*(.01*255)^2 static const int64 cc2 = 239708; // (64^2*(.03*255)^2 static double Ssim8x8_C(const uint8* src_a, int stride_a, const uint8* src_b, int stride_b) { int64 sum_a = 0; int64 sum_b = 0; int64 sum_sq_a = 0; int64 sum_sq_b = 0; int64 sum_axb = 0; for (int i = 0; i < 8; ++i) { for (int j = 0; j < 8; ++j) { sum_a += src_a[j]; sum_b += src_b[j]; sum_sq_a += src_a[j] * src_a[j]; sum_sq_b += src_b[j] * src_b[j]; sum_axb += src_a[j] * src_b[j]; } src_a += stride_a; src_b += stride_b; } const int64 count = 64; // scale the constants by number of pixels const int64 c1 = (cc1 * count * count) >> 12; const int64 c2 = (cc2 * count * count) >> 12; const int64 sum_a_x_sum_b = sum_a * sum_b; const int64 ssim_n = (2 * sum_a_x_sum_b + c1) * (2 * count * sum_axb - 2 * sum_a_x_sum_b + c2); const int64 sum_a_sq = sum_a*sum_a; const int64 sum_b_sq = sum_b*sum_b; const int64 ssim_d = (sum_a_sq + sum_b_sq + c1) * (count * sum_sq_a - sum_a_sq + count * sum_sq_b - sum_b_sq + c2); if (ssim_d == 0.0) return DBL_MAX; return ssim_n * 1.0 / ssim_d; } // We are using a 8x8 moving window with starting location of each 8x8 window // on the 4x4 pixel grid. Such arrangement allows the windows to overlap // block boundaries to penalize blocking artifacts. double CalcFrameSsim(const uint8* src_a, int stride_a, const uint8* src_b, int stride_b, int width, int height) { int samples = 0; double ssim_total = 0; double (*Ssim8x8)(const uint8* src_a, int stride_a, const uint8* src_b, int stride_b); Ssim8x8 = Ssim8x8_C; // sample point start with each 4x4 location for (int i = 0; i < height - 8; i += 4) { for (int j = 0; j < width - 8; j += 4) { ssim_total += Ssim8x8(src_a + j, stride_a, src_b + j, stride_b); samples++; } src_a += stride_a * 4; src_b += stride_b * 4; } ssim_total /= samples; return ssim_total; } double I420Ssim(const uint8* src_y_a, int stride_y_a, const uint8* src_u_a, int stride_u_a, const uint8* src_v_a, int stride_v_a, const uint8* src_y_b, int stride_y_b, const uint8* src_u_b, int stride_u_b, const uint8* src_v_b, int stride_v_b, int width, int height) { const double ssim_y = CalcFrameSsim(src_y_a, stride_y_a, src_y_b, stride_y_b, width, height); const int width_uv = (width + 1) >> 1; const int height_uv = (height + 1) >> 1; const double ssim_u = CalcFrameSsim(src_u_a, stride_u_a, src_u_b, stride_u_b, width_uv, height_uv); const double ssim_v = CalcFrameSsim(src_v_a, stride_v_a, src_v_b, stride_v_b, width_uv, height_uv); return ssim_y * 0.8 + 0.1 * (ssim_u + ssim_v); } #ifdef __cplusplus } // extern "C" } // namespace libyuv #endif