diff --git a/include/libyuv/row.h b/include/libyuv/row.h index ba3919980..0a82aad31 100644 --- a/include/libyuv/row.h +++ b/include/libyuv/row.h @@ -195,6 +195,7 @@ extern "C" { #define HAS_ARGBTOYROW_NEON #define HAS_ARGBTOUV444ROW_NEON #define HAS_ARGBTOUV422ROW_NEON +#define HAS_ARGBTOUV411ROW_NEON #define HAS_BGRATOYROW_NEON #define HAS_ABGRTOYROW_NEON #define HAS_RGBATOYROW_NEON @@ -348,6 +349,8 @@ void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, int pix); void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, int pix); +void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, + int pix); void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix); void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix); void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix); diff --git a/source/convert_from_argb.cc b/source/convert_from_argb.cc index 5603cf645..af9e97eb1 100644 --- a/source/convert_from_argb.cc +++ b/source/convert_from_argb.cc @@ -153,6 +153,8 @@ int ARGBToI411(const uint8* src_argb, int src_stride_argb, src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } + void (*ARGBToUV411Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, + int pix) = ARGBToUV411Row_C; void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = ARGBToYRow_C; #if defined(HAS_ARGBTOYROW_SSSE3) @@ -171,12 +173,15 @@ int ARGBToI411(const uint8* src_argb, int src_stride_argb, ARGBToYRow = ARGBToYRow_Any_NEON; if (IS_ALIGNED(width, 8)) { ARGBToYRow = ARGBToYRow_NEON; + if (IS_ALIGNED(width, 32)) { + ARGBToUV411Row = ARGBToUV411Row_NEON; + } } } #endif for (int y = 0; y < height; ++y) { - ARGBToUV411Row_C(src_argb, dst_u, dst_v, width); + ARGBToUV411Row(src_argb, dst_u, dst_v, width); ARGBToYRow(src_argb, dst_y, width); src_argb += src_stride_argb; dst_y += dst_stride_y; diff --git a/source/row_neon.cc b/source/row_neon.cc index e953155fb..2cb39ca7f 100644 --- a/source/row_neon.cc +++ b/source/row_neon.cc @@ -1622,7 +1622,7 @@ void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient - "vmov.s16 q14, #94 /2 \n" // VG -0.7344 coefficient + "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 ".p2align 2 \n" "1: \n" @@ -1661,6 +1661,115 @@ void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, } #endif // HAS_ARGBTOUV422ROW_NEON +// 32x1 pixels -> 8x1. pix is number of argb pixels. e.g. 32. +#ifdef HAS_ARGBTOUV411ROW_NEON +void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, + int pix) { + asm volatile ( + "vmov.s16 q10, #112 / 4 \n" // UB / VR 0.875 coefficient + "vmov.s16 q11, #74 / 4 \n" // UG -0.5781 coefficient + "vmov.s16 q12, #38 / 4 \n" // UR -0.2969 coefficient + "vmov.s16 q13, #18 / 4 \n" // VB -0.1406 coefficient + "vmov.s16 q14, #94 / 4 \n" // VG -0.7344 coefficient + "vmov.u16 q15, #0x8080 \n" // 128.5 + ".p2align 2 \n" + "1: \n" + "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. + "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels. + "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts. + "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts. + "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts. + "vld4.8 {d8, d10, d12, d14}, [%0]! \n" // load 8 more ARGB pixels. + "vld4.8 {d9, d11, d13, d15}, [%0]! \n" // load last 8 ARGB pixels. + "vpaddl.u8 q4, q4 \n" // B 16 bytes -> 8 shorts. + "vpaddl.u8 q5, q5 \n" // G 16 bytes -> 8 shorts. + "vpaddl.u8 q6, q6 \n" // R 16 bytes -> 8 shorts. + "vpadd.u16 d0, d0, d1 \n" // B 16 shorts -> 8 shorts. + "vpadd.u16 d1, d8, d9 \n" // B + "vpadd.u16 d2, d2, d3 \n" // G 16 shorts -> 8 shorts. + "vpadd.u16 d3, d10, d11 \n" // G + "vpadd.u16 d4, d4, d5 \n" // R 16 shorts -> 8 shorts. + "vpadd.u16 d5, d12, d13 \n" // R + "subs %3, %3, #32 \n" // 32 processed per loop. + "vmul.s16 q8, q0, q10 \n" // B + "vmls.s16 q8, q1, q11 \n" // G + "vmls.s16 q8, q2, q12 \n" // R + "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned + "vmul.s16 q9, q2, q10 \n" // R + "vmls.s16 q9, q1, q14 \n" // G + "vmls.s16 q9, q0, q13 \n" // B + "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned + "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U + "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V + "vst1.8 {d0}, [%1]! \n" // store 8 pixels U. + "vst1.8 {d1}, [%2]! \n" // store 8 pixels V. + "bgt 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+r"(pix) // %3 + : + : "memory", "cc", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} +#endif // HAS_ARGBTOUV411ROW_NEON + +// 32x1 pixels -> 8x1. pix is number of argb pixels. e.g. 32. +#ifdef HAS_ARGBTOUV411ROW_NEON_ALT +void ARGBToUV411Row_Alt_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, + int pix) { + asm volatile ( + "vmov.s16 q10, #112 / 4 \n" // UB / VR 0.875 coefficient + "vmov.s16 q11, #74 / 4 \n" // UG -0.5781 coefficient + "vmov.s16 q12, #38 / 4 \n" // UR -0.2969 coefficient + "vmov.s16 q13, #18 / 4 \n" // VB -0.1406 coefficient + "vmov.s16 q14, #94 / 4 \n" // VG -0.7344 coefficient + "vmov.u16 q15, #0x8080 \n" // 128.5 + ".p2align 2 \n" + "1: \n" + "vld4.32 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. + "vld4.32 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels. + "vaddl.u8 q0, q0, q1 \n" + "vaddl.u8 q2, q2, q3 \n" + "vadd.u16 q4, q0, q2 \n" // 4 pixels <- 16 + "vld4.32 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. + "vld4.32 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels. + "vaddl.u8 q0, q0, q1 \n" + "vaddl.u8 q2, q2, q3 \n" + "vadd.u16 q3, q0, q2 \n" // 4 more pixels <- 16 + "vtbl.u16 q0, {q3, q4}, q5 \n" // REQUIRES SETUP + "vtbl.u16 q1, {q3, q4}, q6 \n" + "vtbl.u16 q2, {q3, q4}, q7 \n" // 8 pixels + + "subs %3, %3, #16 \n" // 32 processed per loop. + "vmul.s16 q8, q0, q10 \n" // B + "vmls.s16 q8, q1, q11 \n" // G + "vmls.s16 q8, q2, q12 \n" // R + "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned + + "vmul.s16 q9, q2, q10 \n" // R + "vmls.s16 q9, q1, q14 \n" // G + "vmls.s16 q9, q0, q13 \n" // B + "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned + + "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U + "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V + + "vst1.8 {d0}, [%1]! \n" // store 8 pixels U. + "vst1.8 {d1}, [%2]! \n" // store 8 pixels V. + "bgt 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+r"(pix) // %3 + : + : "memory", "cc", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} +#endif // HAS_ARGBTOUV411ROW_NEON_ALTERNATIVE + #ifdef HAS_RGB565TOYROW_NEON void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) { asm volatile ( diff --git a/unit_test/convert_test.cc b/unit_test/convert_test.cc index d8a04c71e..82ff8d063 100644 --- a/unit_test/convert_test.cc +++ b/unit_test/convert_test.cc @@ -123,7 +123,7 @@ TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_ - 4, _Any, +, 0) \ + benchmark_width_ - 4, _Any, +, 0) \ TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_, _Unaligned, +, 1) \ @@ -321,7 +321,7 @@ TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_ - 4, _Any, +, 0) \ + benchmark_width_ - 4, _Any, +, 0) \ TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_, _Unaligned, +, 1) \ @@ -411,7 +411,7 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \ #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ DIFF) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ - benchmark_width_ - 4, DIFF, _Any, +, 0) \ + benchmark_width_ - 4, DIFF, _Any, +, 0) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ benchmark_width_, DIFF, _Unaligned, +, 1) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ @@ -508,7 +508,7 @@ TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \ #define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \ TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ - benchmark_width_ - 4, DIFF, _Any, +, 0) \ + benchmark_width_ - 4, DIFF, _Any, +, 0) \ TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ benchmark_width_, DIFF, _Unaligned, +, 1) \ TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ @@ -522,7 +522,7 @@ TESTBIPLANARTOB(NV12, 2, 2, RGB565, 2, 9) TESTBIPLANARTOB(NV21, 2, 2, RGB565, 2, 9) #define TESTATOPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - W1280, N, NEG, OFF) \ + W1280, DIFF, N, NEG, OFF) \ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ const int kWidth = W1280; \ const int kHeight = benchmark_height_; \ @@ -563,7 +563,7 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ } \ } \ } \ - EXPECT_LE(max_diff, 2); \ + EXPECT_LE(max_diff, DIFF); \ for (int i = 0; i < kHeight / SUBSAMP_Y; ++i) { \ for (int j = 0; j < kWidth / SUBSAMP_X; ++j) { \ int abs_diff = \ @@ -574,7 +574,7 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ } \ } \ } \ - EXPECT_LE(max_diff, 2); \ + EXPECT_LE(max_diff, DIFF); \ for (int i = 0; i < kHeight / SUBSAMP_Y; ++i) { \ for (int j = 0; j < kWidth / SUBSAMP_X; ++j) { \ int abs_diff = \ @@ -585,7 +585,7 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ } \ } \ } \ - EXPECT_LE(max_diff, 2); \ + EXPECT_LE(max_diff, DIFF); \ free_aligned_buffer_16(dst_y_c) \ free_aligned_buffer_16(dst_u_c) \ free_aligned_buffer_16(dst_v_c) \ @@ -595,38 +595,38 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ free_aligned_buffer_16(src_argb) \ } -#define TESTATOPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ +#define TESTATOPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, DIFF) \ TESTATOPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_ - 4, _Any, +, 0) \ + benchmark_width_ - 4, DIFF, _Any, +, 0) \ TESTATOPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_, _Unaligned, +, 1) \ + benchmark_width_, DIFF, _Unaligned, +, 1) \ TESTATOPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_, _Invert, -, 0) \ + benchmark_width_, DIFF, _Invert, -, 0) \ TESTATOPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_, _Opt, +, 0) + benchmark_width_, DIFF, _Opt, +, 0) -TESTATOPLANAR(ARGB, 4, I420, 2, 2) -TESTATOPLANAR(BGRA, 4, I420, 2, 2) -TESTATOPLANAR(ABGR, 4, I420, 2, 2) -TESTATOPLANAR(RGBA, 4, I420, 2, 2) -TESTATOPLANAR(RAW, 3, I420, 2, 2) -TESTATOPLANAR(RGB24, 3, I420, 2, 2) -TESTATOPLANAR(RGB565, 2, I420, 2, 2) -TESTATOPLANAR(ARGB1555, 2, I420, 2, 2) -TESTATOPLANAR(ARGB4444, 2, I420, 2, 2) -TESTATOPLANAR(ARGB, 4, I411, 4, 1) -TESTATOPLANAR(ARGB, 4, I422, 2, 1) -TESTATOPLANAR(ARGB, 4, I444, 1, 1) -TESTATOPLANAR(V210, 16 / 6, I420, 2, 2) -TESTATOPLANAR(YUY2, 2, I420, 2, 2) -TESTATOPLANAR(UYVY, 2, I420, 2, 2) -TESTATOPLANAR(YUY2, 2, I422, 2, 1) -TESTATOPLANAR(UYVY, 2, I422, 2, 1) -TESTATOPLANAR(I400, 1, I420, 2, 2) -TESTATOPLANAR(BayerBGGR, 1, I420, 2, 2) -TESTATOPLANAR(BayerRGGB, 1, I420, 2, 2) -TESTATOPLANAR(BayerGBRG, 1, I420, 2, 2) -TESTATOPLANAR(BayerGRBG, 1, I420, 2, 2) +TESTATOPLANAR(ARGB, 4, I420, 2, 2, 2) +TESTATOPLANAR(BGRA, 4, I420, 2, 2, 2) +TESTATOPLANAR(ABGR, 4, I420, 2, 2, 2) +TESTATOPLANAR(RGBA, 4, I420, 2, 2, 2) +TESTATOPLANAR(RAW, 3, I420, 2, 2, 2) +TESTATOPLANAR(RGB24, 3, I420, 2, 2, 2) +TESTATOPLANAR(RGB565, 2, I420, 2, 2, 2) +TESTATOPLANAR(ARGB1555, 2, I420, 2, 2, 2) +TESTATOPLANAR(ARGB4444, 2, I420, 2, 2, 2) +TESTATOPLANAR(ARGB, 4, I411, 4, 1, 4) +TESTATOPLANAR(ARGB, 4, I422, 2, 1, 2) +TESTATOPLANAR(ARGB, 4, I444, 1, 1, 2) +TESTATOPLANAR(V210, 16 / 6, I420, 2, 2, 2) +TESTATOPLANAR(YUY2, 2, I420, 2, 2, 2) +TESTATOPLANAR(UYVY, 2, I420, 2, 2, 2) +TESTATOPLANAR(YUY2, 2, I422, 2, 1, 2) +TESTATOPLANAR(UYVY, 2, I422, 2, 1, 2) +TESTATOPLANAR(I400, 1, I420, 2, 2, 2) +TESTATOPLANAR(BayerBGGR, 1, I420, 2, 2, 2) +TESTATOPLANAR(BayerRGGB, 1, I420, 2, 2, 2) +TESTATOPLANAR(BayerGBRG, 1, I420, 2, 2, 2) +TESTATOPLANAR(BayerGRBG, 1, I420, 2, 2, 2) #define TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ W1280, N, NEG, OFF) \ @@ -670,8 +670,8 @@ TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ for (int i = 0; i < kHeight / SUBSAMP_Y; ++i) { \ for (int j = 0; j < kWidth / SUBSAMP_X * 2; ++j) { \ int abs_diff = \ - abs(static_cast(dst_uv_c[i * kWidth / SUBSAMP_X * 2 + j]) - \ - static_cast(dst_uv_opt[i * kWidth / SUBSAMP_X * 2 + j])); \ + abs(static_cast(dst_uv_c[i * kWidth / SUBSAMP_X * 2 + j]) - \ + static_cast(dst_uv_opt[i * kWidth / SUBSAMP_X * 2 + j])); \ if (abs_diff > max_diff) { \ max_diff = abs_diff; \ } \