diff --git a/include/libyuv/row.h b/include/libyuv/row.h index 5e0d2ae9f..5c110dd2b 100644 --- a/include/libyuv/row.h +++ b/include/libyuv/row.h @@ -6679,6 +6679,7 @@ void GaussCol_F32_C(const float* src0, int width); void GaussRow_C(const uint32_t* src, uint16_t* dst, int width); +void GaussRow_NEON(const uint32_t* src, uint16_t* dst, int width); void GaussCol_C(const uint16_t* src0, const uint16_t* src1, const uint16_t* src2, @@ -6686,6 +6687,13 @@ void GaussCol_C(const uint16_t* src0, const uint16_t* src4, uint32_t* dst, int width); +void GaussCol_NEON(const uint16_t* src0, + const uint16_t* src1, + const uint16_t* src2, + const uint16_t* src3, + const uint16_t* src4, + uint32_t* dst, + int width); void ClampFloatToZero_SSE2(const float* src_x, float* dst_y, int width); diff --git a/include/libyuv/scale_row.h b/include/libyuv/scale_row.h index 40f728b79..3d9a11b02 100644 --- a/include/libyuv/scale_row.h +++ b/include/libyuv/scale_row.h @@ -347,6 +347,10 @@ void ScaleRowDown2Box_16_C(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst, int dst_width); +void ScaleRowDown2Box_16_NEON(const uint16_t* src_ptr, + ptrdiff_t src_stride, + uint16_t* dst, + int dst_width); void ScaleRowDown2Box_16To8_C(const uint16_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, diff --git a/source/row_neon.cc b/source/row_neon.cc index d99b13727..1211a3727 100644 --- a/source/row_neon.cc +++ b/source/row_neon.cc @@ -1833,11 +1833,12 @@ struct RgbUVConstants { }; // 8x1 pixels. -void ARGBToUV444MatrixRow_NEON(const uint8_t* src_argb, - uint8_t* dst_u, - uint8_t* dst_v, - int width, - const struct RgbUVConstants* rgbuvconstants) { +static void ARGBToUV444MatrixRow_NEON( + const uint8_t* src_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width, + const struct RgbUVConstants* rgbuvconstants) { asm volatile ( "vld1.8 {d0}, [%4] \n" // load rgbuvconstants @@ -2752,10 +2753,10 @@ static const struct RgbConstants kRgb24I601Constants = {{25, 129, 66, 0}, static const struct RgbConstants kRawI601Constants = {{66, 129, 25, 0}, 0x1080}; // ARGB expects first 3 values to contain RGB and 4th value is ignored. -void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "vld1.8 {d0}, [%3] \n" // load rgbconstants "vdup.u8 d20, d0[0] \n" @@ -2802,10 +2803,10 @@ void ABGRToYJRow_NEON(const uint8_t* src_abgr, uint8_t* dst_yj, int width) { // RGBA expects first value to be A and ignored, then 3 values to contain RGB. // Same code as ARGB, except the LD4 -void RGBAToYMatrixRow_NEON(const uint8_t* src_rgba, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBAToYMatrixRow_NEON(const uint8_t* src_rgba, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "vld1.8 {d0}, [%3] \n" // load rgbconstants "vdup.u8 d20, d0[0] \n" @@ -2846,10 +2847,10 @@ void BGRAToYRow_NEON(const uint8_t* src_bgra, uint8_t* dst_y, int width) { RGBAToYMatrixRow_NEON(src_bgra, dst_y, width, &kRawI601Constants); } -void RGBToYMatrixRow_NEON(const uint8_t* src_rgb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBToYMatrixRow_NEON(const uint8_t* src_rgb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "vld1.8 {d0}, [%3] \n" // load rgbconstants "vdup.u8 d20, d0[0] \n" diff --git a/source/row_neon64.cc b/source/row_neon64.cc index 8320bcb0b..fae634234 100644 --- a/source/row_neon64.cc +++ b/source/row_neon64.cc @@ -2722,11 +2722,12 @@ struct RgbUVConstantsI8 { }; // 8x1 pixels. -void ARGBToUV444MatrixRow_NEON(const uint8_t* src_argb, - uint8_t* dst_u, - uint8_t* dst_v, - int width, - const struct RgbUVConstantsU8* rgbuvconstants) { +static void ARGBToUV444MatrixRow_NEON( + const uint8_t* src_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width, + const struct RgbUVConstantsU8* rgbuvconstants) { asm volatile( "ldr d0, [%4] \n" // load rgbuvconstants "dup v24.16b, v0.b[0] \n" // UB 0.875 coefficient @@ -2763,7 +2764,7 @@ void ARGBToUV444MatrixRow_NEON(const uint8_t* src_argb, "v27", "v28", "v29"); } -void ARGBToUV444MatrixRow_NEON_I8MM( +static void ARGBToUV444MatrixRow_NEON_I8MM( const uint8_t* src_argb, uint8_t* dst_u, uint8_t* dst_v, @@ -3519,10 +3520,10 @@ struct RgbConstants { }; // ARGB expects first 3 values to contain RGB and 4th value is ignored. -void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "ldr d0, [%3] \n" // load rgbconstants "dup v6.16b, v0.b[0] \n" @@ -3552,11 +3553,11 @@ void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, "v17"); } -void -ARGBToYMatrixRow_NEON_DotProd(const uint8_t* src_argb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void ARGBToYMatrixRow_NEON_DotProd( + const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "ldr d0, [%3] \n" // load rgbconstants "dup v16.4s, v0.s[0] \n" @@ -3655,10 +3656,10 @@ void ABGRToYJRow_NEON_DotProd(const uint8_t* src_abgr, // RGBA expects first value to be A and ignored, then 3 values to contain RGB. // Same code as ARGB, except the LD4 -void RGBAToYMatrixRow_NEON(const uint8_t* src_rgba, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBAToYMatrixRow_NEON(const uint8_t* src_rgba, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "ldr d0, [%3] \n" // load rgbconstants "dup v6.16b, v0.b[0] \n" @@ -3727,10 +3728,10 @@ void BGRAToYRow_NEON_DotProd(const uint8_t* src_bgra, &kRawI601DotProdConstants); } -void RGBToYMatrixRow_NEON(const uint8_t* src_rgb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBToYMatrixRow_NEON(const uint8_t* src_rgb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile ( "ldr d0, [%3] \n" // load rgbconstants "dup v5.16b, v0.b[0] \n" diff --git a/source/row_rvv.cc b/source/row_rvv.cc index 0533866c0..62c6b2631 100644 --- a/source/row_rvv.cc +++ b/source/row_rvv.cc @@ -2079,10 +2079,10 @@ static const struct RgbConstants kRawI601Constants = {{66, 129, 25, 0}, // ARGB expects first 3 values to contain RGB and 4th value is ignored #ifdef HAS_ARGBTOYMATRIXROW_RVV #ifdef LIBYUV_RVV_HAS_TUPLE_TYPE -void ARGBToYMatrixRow_RVV(const uint8_t* src_argb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void ARGBToYMatrixRow_RVV(const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { assert(width != 0); size_t w = (size_t)width; vuint8m2_t v_by, v_gy, v_ry; // vectors are to store RGBToY constant @@ -2112,10 +2112,10 @@ void ARGBToYMatrixRow_RVV(const uint8_t* src_argb, } while (w > 0); } #else -void ARGBToYMatrixRow_RVV(const uint8_t* src_argb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void ARGBToYMatrixRow_RVV(const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { assert(width != 0); size_t w = (size_t)width; vuint8m2_t v_by, v_gy, v_ry; // vectors are to store RGBToY constant @@ -2171,10 +2171,10 @@ void ABGRToYJRow_RVV(const uint8_t* src_abgr, uint8_t* dst_yj, int width) { // RGBA expects first value to be A and ignored, then 3 values to contain RGB. #ifdef HAS_RGBATOYMATRIXROW_RVV #ifdef LIBYUV_RVV_HAS_TUPLE_TYPE -void RGBAToYMatrixRow_RVV(const uint8_t* src_rgba, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBAToYMatrixRow_RVV(const uint8_t* src_rgba, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { assert(width != 0); size_t w = (size_t)width; vuint8m2_t v_by, v_gy, v_ry; // vectors are to store RGBToY constant @@ -2204,10 +2204,10 @@ void RGBAToYMatrixRow_RVV(const uint8_t* src_rgba, } while (w > 0); } #else -void RGBAToYMatrixRow_RVV(const uint8_t* src_rgba, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBAToYMatrixRow_RVV(const uint8_t* src_rgba, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { assert(width != 0); size_t w = (size_t)width; vuint8m2_t v_by, v_gy, v_ry; // vectors are to store RGBToY constant @@ -2256,10 +2256,10 @@ void BGRAToYRow_RVV(const uint8_t* src_bgra, uint8_t* dst_y, int width) { #ifdef HAS_RGBTOYMATRIXROW_RVV #ifdef LIBYUV_RVV_HAS_TUPLE_TYPE -void RGBToYMatrixRow_RVV(const uint8_t* src_rgb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBToYMatrixRow_RVV(const uint8_t* src_rgb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { assert(width != 0); size_t w = (size_t)width; vuint8m2_t v_by, v_gy, v_ry; // vectors are to store RGBToY constant @@ -2289,10 +2289,10 @@ void RGBToYMatrixRow_RVV(const uint8_t* src_rgb, } while (w > 0); } #else -void RGBToYMatrixRow_RVV(const uint8_t* src_rgb, - uint8_t* dst_y, - int width, - const struct RgbConstants* rgbconstants) { +static void RGBToYMatrixRow_RVV(const uint8_t* src_rgb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { assert(width != 0); size_t w = (size_t)width; vuint8m2_t v_by, v_gy, v_ry; // vectors are to store RGBToY constant diff --git a/source/row_sve.cc b/source/row_sve.cc index f847f4b92..6ab5e6884 100644 --- a/source/row_sve.cc +++ b/source/row_sve.cc @@ -588,12 +588,12 @@ static const int16_t kABGRToUVJCoefficients[] = { -21, 63, -42, 0, 63, -10, -53, 0, }; -void ARGBToUVMatrixRow_SVE2(const uint8_t* src_argb, - int src_stride_argb, - uint8_t* dst_u, - uint8_t* dst_v, - int width, - const int16_t* uvconstants) { +static void ARGBToUVMatrixRow_SVE2(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width, + const int16_t* uvconstants) { const uint8_t* src_argb_1 = src_argb + src_stride_argb; uint64_t vl; asm volatile ( diff --git a/source/scale_neon.cc b/source/scale_neon.cc index 309d7b0bf..c1148b6a5 100644 --- a/source/scale_neon.cc +++ b/source/scale_neon.cc @@ -1080,91 +1080,6 @@ void ScaleFilterCols_NEON(uint8_t* dst_ptr, #undef LOAD2_DATA8_LANE -// 16x2 -> 16x1 -void ScaleFilterRows_NEON(uint8_t* dst_ptr, - const uint8_t* src_ptr, - ptrdiff_t src_stride, - int dst_width, - int source_y_fraction) { - asm volatile ( - "cmp %4, #0 \n" - "beq 100f \n" - "add %2, %1 \n" - "cmp %4, #64 \n" - "beq 75f \n" - "cmp %4, #128 \n" - "beq 50f \n" - "cmp %4, #192 \n" - "beq 25f \n" - - "vdup.8 d5, %4 \n" - "rsb %4, #256 \n" - "vdup.8 d4, %4 \n" - // General purpose row blend. - "1: \n" - "vld1.8 {q0}, [%1]! \n" - "vld1.8 {q1}, [%2]! \n" - "subs %3, %3, #16 \n" - "vmull.u8 q13, d0, d4 \n" - "vmull.u8 q14, d1, d4 \n" - "vmlal.u8 q13, d2, d5 \n" - "vmlal.u8 q14, d3, d5 \n" - "vrshrn.u16 d0, q13, #8 \n" - "vrshrn.u16 d1, q14, #8 \n" - "vst1.8 {q0}, [%0]! \n" - "bgt 1b \n" - "b 99f \n" - - // Blend 25 / 75. - "25: \n" - "vld1.8 {q0}, [%1]! \n" - "vld1.8 {q1}, [%2]! \n" - "subs %3, %3, #16 \n" - "vrhadd.u8 q0, q1 \n" - "vrhadd.u8 q0, q1 \n" - "vst1.8 {q0}, [%0]! \n" - "bgt 25b \n" - "b 99f \n" - - // Blend 50 / 50. - "50: \n" - "vld1.8 {q0}, [%1]! \n" - "vld1.8 {q1}, [%2]! \n" - "subs %3, %3, #16 \n" - "vrhadd.u8 q0, q1 \n" - "vst1.8 {q0}, [%0]! \n" - "bgt 50b \n" - "b 99f \n" - - // Blend 75 / 25. - "75: \n" - "vld1.8 {q1}, [%1]! \n" - "vld1.8 {q0}, [%2]! \n" - "subs %3, %3, #16 \n" - "vrhadd.u8 q0, q1 \n" - "vrhadd.u8 q0, q1 \n" - "vst1.8 {q0}, [%0]! \n" - "bgt 75b \n" - "b 99f \n" - - // Blend 100 / 0 - Copy row unchanged. - "100: \n" - "vld1.8 {q0}, [%1]! \n" - "subs %3, %3, #16 \n" - "vst1.8 {q0}, [%0]! \n" - "bgt 100b \n" - - "99: \n" - "vst1.8 {d1[7]}, [%0] \n" - : "+r"(dst_ptr), // %0 - "+r"(src_ptr), // %1 - "+r"(src_stride), // %2 - "+r"(dst_width), // %3 - "+r"(source_y_fraction) // %4 - : - : "q0", "q1", "d4", "d5", "q13", "q14", "memory", "cc"); -} - void ScaleARGBRowDown2_NEON(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, diff --git a/source/scale_neon64.cc b/source/scale_neon64.cc index d440c28c9..51f65d68d 100644 --- a/source/scale_neon64.cc +++ b/source/scale_neon64.cc @@ -1458,58 +1458,6 @@ void ScaleRowDown2Box_16_NEON(const uint16_t* src_ptr, ); } -// Read 8x2 upsample with filtering and write 16x1. -// Actually reads an extra pixel, so 9x2. -void ScaleRowUp2_16_NEON(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width) { - asm volatile ( - "add %1, %0, %1, lsl #1 \n" // ptr + stide * 2 - "movi v0.8h, #9 \n" // constants - "movi v1.4s, #3 \n" - - "1: \n" - "ld1 {v3.8h}, [%0], %4 \n" // TL read first 8 - "ld1 {v4.8h}, [%0], %5 \n" // TR read 8 offset by 1 - "ld1 {v5.8h}, [%1], %4 \n" // BL read 8 from next row - "ld1 {v6.8h}, [%1], %5 \n" // BR offset by 1 - "subs %w3, %w3, #16 \n" // 16 dst pixels per loop - "umull v16.4s, v3.4h, v0.4h \n" - "umull2 v7.4s, v3.8h, v0.8h \n" - "umull v18.4s, v4.4h, v0.4h \n" - "umull2 v17.4s, v4.8h, v0.8h \n" - "prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead - "uaddw v16.4s, v16.4s, v6.4h \n" - "uaddl2 v19.4s, v6.8h, v3.8h \n" - "uaddl v3.4s, v6.4h, v3.4h \n" - "uaddw2 v6.4s, v7.4s, v6.8h \n" - "uaddl2 v7.4s, v5.8h, v4.8h \n" - "uaddl v4.4s, v5.4h, v4.4h \n" - "uaddw v18.4s, v18.4s, v5.4h \n" - "prfm pldl1keep, [%1, 448] \n" - "mla v16.4s, v4.4s, v1.4s \n" - "mla v18.4s, v3.4s, v1.4s \n" - "mla v6.4s, v7.4s, v1.4s \n" - "uaddw2 v4.4s, v17.4s, v5.8h \n" - "uqrshrn v16.4h, v16.4s, #4 \n" - "mla v4.4s, v19.4s, v1.4s \n" - "uqrshrn2 v16.8h, v6.4s, #4 \n" - "uqrshrn v17.4h, v18.4s, #4 \n" - "uqrshrn2 v17.8h, v4.4s, #4 \n" - "st2 {v16.8h-v17.8h}, [%2], #32 \n" - "b.gt 1b \n" - : "+r"(src_ptr), // %0 - "+r"(src_stride), // %1 - "+r"(dst), // %2 - "+r"(dst_width) // %3 - : "r"(2LL), // %4 - "r"(14LL) // %5 - : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", - "v17", "v18", "v19" // Clobber List - ); -} - void ScaleUVRowDown2_NEON(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, diff --git a/source/scale_uv.cc b/source/scale_uv.cc index 31c27e913..b9db64eef 100644 --- a/source/scale_uv.cc +++ b/source/scale_uv.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "libyuv/scale.h" +#include "libyuv/scale_uv.h" #include #include diff --git a/unit_test/convert_test.cc b/unit_test/convert_test.cc index 84b2e722c..ef30b12b5 100644 --- a/unit_test/convert_test.cc +++ b/unit_test/convert_test.cc @@ -326,18 +326,18 @@ TESTAPLANARTOP(Android420, NV21, 2, 1, 0, 2, 2, I420, 2, 2) #undef TESTAPLANARTOPI // wrapper to keep API the same -int I400ToNV21(const uint8_t* src_y, - int src_stride_y, - const uint8_t* /* src_u */, - int /* src_stride_u */, - const uint8_t* /* src_v */, - int /* src_stride_v */, - uint8_t* dst_y, - int dst_stride_y, - uint8_t* dst_vu, - int dst_stride_vu, - int width, - int height) { +static int I400ToNV21(const uint8_t* src_y, + int src_stride_y, + const uint8_t* /* src_u */, + int /* src_stride_u */, + const uint8_t* /* src_v */, + int /* src_stride_v */, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_vu, + int dst_stride_vu, + int width, + int height) { return I400ToNV21(src_y, src_stride_y, dst_y, dst_stride_y, dst_vu, dst_stride_vu, width, height); } diff --git a/unit_test/cpu_thread_test.cc b/unit_test/cpu_thread_test.cc index 69aab74e7..b6c0fa066 100644 --- a/unit_test/cpu_thread_test.cc +++ b/unit_test/cpu_thread_test.cc @@ -27,7 +27,7 @@ namespace libyuv { #ifdef LIBYUV_HAVE_PTHREAD -void* ThreadMain(void* arg) { +static void* ThreadMain(void* arg) { int* flags = static_cast(arg); *flags = TestCpuFlag(kCpuInitialized); diff --git a/unit_test/planar_test.cc b/unit_test/planar_test.cc index 9973318f5..36cfb7a5b 100644 --- a/unit_test/planar_test.cc +++ b/unit_test/planar_test.cc @@ -2496,13 +2496,13 @@ TEST_F(LibYUVPlanarTest, DISABLED_ARM(TestARGBPolynomial)) { } } -int TestHalfFloatPlane(int benchmark_width, - int benchmark_height, - int benchmark_iterations, - int disable_cpu_flags, - int benchmark_cpu_info, - float scale, - int mask) { +static int TestHalfFloatPlane(int benchmark_width, + int benchmark_height, + int benchmark_iterations, + int disable_cpu_flags, + int benchmark_cpu_info, + float scale, + int mask) { int i, j; const int y_plane_size = benchmark_width * benchmark_height * 2; @@ -2631,12 +2631,12 @@ TEST_F(LibYUVPlanarTest, TestHalfFloatPlane_12bit_One) { EXPECT_LE(diff, 1); } -float TestByteToFloat(int benchmark_width, - int benchmark_height, - int benchmark_iterations, - int disable_cpu_flags, - int benchmark_cpu_info, - float scale) { +static float TestByteToFloat(int benchmark_width, + int benchmark_height, + int benchmark_iterations, + int disable_cpu_flags, + int benchmark_cpu_info, + float scale) { int i, j; const int y_plane_size = benchmark_width * benchmark_height; diff --git a/unit_test/rotate_argb_test.cc b/unit_test/rotate_argb_test.cc index 74952c4e6..4c7b0b250 100644 --- a/unit_test/rotate_argb_test.cc +++ b/unit_test/rotate_argb_test.cc @@ -16,15 +16,15 @@ namespace libyuv { -void TestRotateBpp(int src_width, - int src_height, - int dst_width, - int dst_height, - libyuv::RotationMode mode, - int benchmark_iterations, - int disable_cpu_flags, - int benchmark_cpu_info, - const int kBpp) { +static void TestRotateBpp(int src_width, + int src_height, + int dst_width, + int dst_height, + libyuv::RotationMode mode, + int benchmark_iterations, + int disable_cpu_flags, + int benchmark_cpu_info, + const int kBpp) { if (src_width < 1) { src_width = 1; } diff --git a/unit_test/scale_argb_test.cc b/unit_test/scale_argb_test.cc index f54a68f11..71d1bd4db 100644 --- a/unit_test/scale_argb_test.cc +++ b/unit_test/scale_argb_test.cc @@ -369,26 +369,25 @@ TEST_SCALESWAPXY1(ARGBScale, Bilinear, 0) // Scale with YUV conversion to ARGB and clipping. // TODO(fbarchard): Add fourcc support. All 4 ARGB formats is easy to support. -LIBYUV_API -int YUVToARGBScaleReference2(const uint8_t* src_y, - int src_stride_y, - const uint8_t* src_u, - int src_stride_u, - const uint8_t* src_v, - int src_stride_v, - uint32_t /* src_fourcc */, - int src_width, - int src_height, - uint8_t* dst_argb, - int dst_stride_argb, - uint32_t /* dst_fourcc */, - int dst_width, - int dst_height, - int clip_x, - int clip_y, - int clip_width, - int clip_height, - enum FilterMode filtering) { +static int YUVToARGBScaleReference2(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint32_t /* src_fourcc */, + int src_width, + int src_height, + uint8_t* dst_argb, + int dst_stride_argb, + uint32_t /* dst_fourcc */, + int dst_width, + int dst_height, + int clip_x, + int clip_y, + int clip_width, + int clip_height, + enum FilterMode filtering) { uint8_t* argb_buffer = static_cast(malloc(src_width * src_height * 4)); int r; diff --git a/unit_test/scale_plane_test.cc b/unit_test/scale_plane_test.cc index 9ce47a02c..3df71a59e 100644 --- a/unit_test/scale_plane_test.cc +++ b/unit_test/scale_plane_test.cc @@ -128,11 +128,6 @@ TEST_F(LibYUVScaleTest, TestScaleRowDown2Box_Odd_SSSE3) { } #endif // HAS_SCALEROWDOWN2_SSSE3 -extern "C" void ScaleRowDown2Box_16_NEON(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); - TEST_F(LibYUVScaleTest, TestScaleRowDown2Box_16) { SIMD_ALIGNED(uint16_t orig_pixels[2560 * 2]); SIMD_ALIGNED(uint16_t dst_pixels_c[1280]); diff --git a/unit_test/unit_test.cc b/unit_test/unit_test.cc index 6e2b539c3..3fcbd4fac 100644 --- a/unit_test/unit_test.cc +++ b/unit_test/unit_test.cc @@ -67,7 +67,7 @@ static LIBYUV_BOOL TestEnv(const char*) { } #endif -int TestCpuEnv(int cpu_info) { +static int TestCpuEnv(int cpu_info) { #if defined(__arm__) || defined(__aarch64__) if (TestEnv("LIBYUV_DISABLE_NEON")) { cpu_info &= ~libyuv::kCpuHasNEON; diff --git a/util/psnr_main.cc b/util/psnr_main.cc index 8b9fd9724..a11cd3fc4 100644 --- a/util/psnr_main.cc +++ b/util/psnr_main.cc @@ -65,9 +65,9 @@ int num_threads = 0; #endif // Parse PYUV format. ie name.1920x800_24Hz_P420.yuv -bool ExtractResolutionFromFilename(const char* name, - int* width_ptr, - int* height_ptr) { +static bool ExtractResolutionFromFilename(const char* name, + int* width_ptr, + int* height_ptr) { // Isolate the .width_height. section of the filename by searching for a // dot or underscore followed by a digit. for (int i = 0; name[i]; ++i) { @@ -105,25 +105,12 @@ bool ExtractResolutionFromFilename(const char* name, return false; } -// Scale Y channel from 16..240 to 0..255. -// This can be useful when comparing codecs that are inconsistant about Y -uint8_t ScaleY(uint8_t y) { - int ny = (y - 16) * 256 / 224; - if (ny < 0) { - ny = 0; - } - if (ny > 255) { - ny = 255; - } - return static_cast(ny); -} - // MSE = Mean Square Error -double GetMSE(double sse, double size) { +static double GetMSE(double sse, double size) { return sse / size; } -void PrintHelp(const char* program) { +static void PrintHelp(const char* program) { printf("%s [-options] org_seq rec_seq [rec_seq2.. etc]\n", program); #ifdef HAVE_JPEG printf("jpeg or raw YUV 420 supported.\n"); @@ -151,7 +138,7 @@ void PrintHelp(const char* program) { exit(0); } -void ParseOptions(int argc, const char* argv[]) { +static void ParseOptions(int argc, const char* argv[]) { if (argc <= 1) { PrintHelp(argv[0]); } @@ -240,15 +227,15 @@ void ParseOptions(int argc, const char* argv[]) { } } -bool UpdateMetrics(uint8_t* ch_org, - uint8_t* ch_rec, - const int y_size, - const int uv_size, - const size_t total_size, - int number_of_frames, - metric* cur_distortion_psnr, - metric* distorted_frame, - bool compute_psnr) { +static bool UpdateMetrics(uint8_t* ch_org, + uint8_t* ch_rec, + const int y_size, + const int uv_size, + const size_t total_size, + int number_of_frames, + metric* cur_distortion_psnr, + metric* distorted_frame, + bool compute_psnr) { const int uv_offset = (do_swap_uv ? uv_size : 0); const uint8_t* const u_org = ch_org + y_size + uv_offset; const uint8_t* const u_rec = ch_rec + y_size;