From 15449263c4bba75bc396dc3d60266efee6ab6c66 Mon Sep 17 00:00:00 2001 From: "fbarchard@google.com" Date: Mon, 29 Oct 2012 16:24:53 +0000 Subject: [PATCH] NV12ToRGB565 use NV12ToARGB any row function BUG=136 TEST=sudo LIBYUV_REPEAT=1000 nice --5 ./libyuv_unittest --gtest_filter=*565* | grep ms Review URL: https://webrtc-codereview.appspot.com/965004 git-svn-id: http://libyuv.googlecode.com/svn/trunk@452 16f28f9a-4ce2-e073-06de-1de4eb20be90 --- include/libyuv/row.h | 47 ++++++++---- source/convert_argb.cc | 3 +- source/convert_from.cc | 64 ++++++---------- source/planar_functions.cc | 28 +++++-- source/row_common.cc | 41 +++++++++- source/row_neon.cc | 42 +++++++++++ source/row_posix.cc | 20 +++++ source/row_win.cc | 148 ++++++++++++++++++++++++++++++++++++- 8 files changed, 324 insertions(+), 69 deletions(-) diff --git a/include/libyuv/row.h b/include/libyuv/row.h index 77d2e10e9..7ea8bf088 100644 --- a/include/libyuv/row.h +++ b/include/libyuv/row.h @@ -87,6 +87,7 @@ extern "C" { #define HAS_I422TOYUY2ROW_SSE2 #define HAS_I422TOUYVYROW_SSE2 #define HAS_MERGEUV_SSE2 +#define HAS_I422TORGB565ROW_SSSE3 // Effects #define HAS_ARGBAFFINEROW_SSE2 @@ -149,6 +150,7 @@ extern "C" { #define HAS_I422TOBGRAROW_NEON #define HAS_I422TORAWROW_NEON #define HAS_I422TORGB24ROW_NEON +#define HAS_I422TORGB565ROW_NEON #define HAS_I422TORGBAROW_NEON #define HAS_MIRRORROW_NEON #define HAS_MIRRORROWUV_NEON @@ -249,6 +251,11 @@ void I422ToRAWRow_NEON(const uint8* y_buf, const uint8* v_buf, uint8* rgb_buf, int width); +void I422ToRGB565Row_NEON(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width); void NV12ToARGBRow_NEON(const uint8* y_buf, const uint8* uv_buf, uint8* rgb_buf, @@ -474,6 +481,11 @@ void I422ToRAWRow_C(const uint8* y_buf, const uint8* v_buf, uint8* raw_buf, int width); +void I422ToRGB565Row_C(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* dst_rgb565, + int width); void YToARGBRow_C(const uint8* y_buf, uint8* rgb_buf, @@ -524,6 +536,11 @@ void I422ToRGBARow_SSSE3(const uint8* y_buf, const uint8* v_buf, uint8* rgba_buf, int width); +void I422ToRGB565Row_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width); // RGB24/RAW are unaligned. void I422ToRGB24Row_SSSE3(const uint8* y_buf, @@ -543,92 +560,87 @@ void I444ToARGBRow_Unaligned_SSSE3(const uint8* y_buf, const uint8* v_buf, uint8* argb_buf, int width); - void I422ToARGBRow_Unaligned_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* argb_buf, int width); - void I411ToARGBRow_Unaligned_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, int width); - void NV12ToARGBRow_Unaligned_SSSE3(const uint8* y_buf, const uint8* uv_buf, uint8* argb_buf, int width); - void NV21ToARGBRow_Unaligned_SSSE3(const uint8* y_buf, const uint8* vu_buf, uint8* argb_buf, int width); - void I422ToBGRARow_Unaligned_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* bgra_buf, int width); - void I422ToABGRRow_Unaligned_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* abgr_buf, int width); - void I422ToRGBARow_Unaligned_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgba_buf, int width); - +void I422ToRGB565Row_Unaligned_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width); void I444ToARGBRow_Any_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* argb_buf, int width); - void I422ToARGBRow_Any_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* argb_buf, int width); - void I411ToARGBRow_Any_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, int width); - void NV12ToARGBRow_Any_SSSE3(const uint8* y_buf, const uint8* uv_buf, uint8* argb_buf, int width); - void NV21ToARGBRow_Any_SSSE3(const uint8* y_buf, const uint8* vu_buf, uint8* argb_buf, int width); - void I422ToBGRARow_Any_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* bgra_buf, int width); - void I422ToABGRRow_Any_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* abgr_buf, int width); - void I422ToRGBARow_Any_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgba_buf, int width); +void I422ToRGB565Row_Any_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgba_buf, + int width); // RGB24/RAW are unaligned. void I422ToRGB24Row_Any_SSSE3(const uint8* y_buf, @@ -711,6 +723,11 @@ void I422ToRAWRow_Any_NEON(const uint8* y_buf, const uint8* v_buf, uint8* rgb_buf, int width); +void I422ToRGB565Row_Any_NEON(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width); void NV12ToARGBRow_Any_NEON(const uint8* y_buf, const uint8* uv_buf, uint8* argb_buf, diff --git a/source/convert_argb.cc b/source/convert_argb.cc index 5b8d285cd..5a0df8a42 100644 --- a/source/convert_argb.cc +++ b/source/convert_argb.cc @@ -575,8 +575,7 @@ int NV12ToARGB(const uint8* src_y, int src_stride_y, } } } -#endif -#if defined(HAS_NV12TOARGBROW_NEON) +#elif defined(HAS_NV12TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON) && width >= 8) { NV12ToARGBRow = NV12ToARGBRow_Any_NEON; if (IS_ALIGNED(width, 8)) { diff --git a/source/convert_from.cc b/source/convert_from.cc index 027006c7c..4b12a4cc1 100644 --- a/source/convert_from.cc +++ b/source/convert_from.cc @@ -921,65 +921,45 @@ LIBYUV_API int I420ToRGB565(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, const uint8* src_v, int src_stride_v, - uint8* dst_rgb, int dst_stride_rgb, + uint8* dst_rgb565, int dst_stride_rgb565, int width, int height) { - if (!src_y || !src_u || !src_v || - !dst_rgb || + if (!src_y || !src_u || !src_v || !dst_rgb565 || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; - dst_rgb = dst_rgb + (height - 1) * dst_stride_rgb; - dst_stride_rgb = -dst_stride_rgb; + dst_rgb565 = dst_rgb565 + (height - 1) * dst_stride_rgb565; + dst_stride_rgb565 = -dst_stride_rgb565; } - void (*I422ToARGBRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToARGBRow_C; -#if defined(HAS_I422TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToARGBRow = I422ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 16)) { - I422ToARGBRow = I422ToARGBRow_NEON; - } - } -#elif defined(HAS_I422TOARGBROW_SSSE3) + void (*I422ToRGB565Row)(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width) = I422ToRGB565Row_C; +#if defined(HAS_I422TORGB565ROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && width >= 8) { - I422ToARGBRow = I422ToARGBRow_Any_SSSE3; + I422ToRGB565Row = I422ToRGB565Row_Any_SSSE3; if (IS_ALIGNED(width, 8)) { - I422ToARGBRow = I422ToARGBRow_SSSE3; + I422ToRGB565Row = I422ToRGB565Row_Unaligned_SSSE3; + if (IS_ALIGNED(dst_rgb565, 16) && IS_ALIGNED(dst_stride_rgb565, 16)) { + I422ToRGB565Row = I422ToRGB565Row_SSSE3; + } } } -#elif defined(HAS_I422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2)) { - I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2; - } -#endif - - SIMD_ALIGNED(uint8 row[kMaxStride]); - void (*ARGBToRGB565Row)(const uint8* src_rgb, uint8* dst_rgb, int pix) = - ARGBToRGB565Row_C; -#if defined(HAS_ARGBTORGB565ROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - if (width * 2 <= kMaxStride) { - ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2; - } - if (IS_ALIGNED(width, 4)) { - ARGBToRGB565Row = ARGBToRGB565Row_SSE2; +#elif defined(HAS_I422TORGB565ROW_NEON) + if (TestCpuFlag(kCpuHasNEON) && width >= 8) { + I422ToRGB565Row = I422ToRGB565Row_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I422ToRGB565Row = I422ToRGB565Row_NEON; } } #endif for (int y = 0; y < height; ++y) { - I422ToARGBRow(src_y, src_u, src_v, row, width); - ARGBToRGB565Row(row, dst_rgb, width); - dst_rgb += dst_stride_rgb; + I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, width); + dst_rgb565 += dst_stride_rgb565; src_y += src_stride_y; if (y & 1) { src_u += src_stride_u; diff --git a/source/planar_functions.cc b/source/planar_functions.cc index 58b6ace2f..83efdad08 100644 --- a/source/planar_functions.cc +++ b/source/planar_functions.cc @@ -574,12 +574,18 @@ int NV12ToRGB565(const uint8* src_y, int src_stride_y, uint8* rgb_buf, int width) = NV12ToARGBRow_C; #if defined(HAS_NV12TOARGBROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) { - NV12ToARGBRow = NV12ToARGBRow_SSSE3; + if (TestCpuFlag(kCpuHasSSSE3) && width >= 8) { + NV12ToARGBRow = NV12ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + NV12ToARGBRow = NV12ToARGBRow_SSSE3; + } } #elif defined(HAS_NV12TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) { - NV12ToARGBRow = NV12ToARGBRow_NEON; + if (TestCpuFlag(kCpuHasNEON) && width >= 8) { + NV12ToARGBRow = NV12ToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + NV12ToARGBRow = NV12ToARGBRow_NEON; + } } #endif if (width * 4 > kMaxStride) { @@ -629,12 +635,18 @@ int NV21ToRGB565(const uint8* src_y, int src_stride_y, uint8* rgb_buf, int width) = NV21ToARGBRow_C; #if defined(HAS_NV21TOARGBROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) { - NV21ToARGBRow = NV21ToARGBRow_SSSE3; + if (TestCpuFlag(kCpuHasSSSE3) && width >= 8) { + NV21ToARGBRow = NV21ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + NV21ToARGBRow = NV21ToARGBRow_SSSE3; + } } #elif defined(HAS_NV21TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) { - NV21ToARGBRow = NV21ToARGBRow_NEON; + if (TestCpuFlag(kCpuHasNEON) && width >= 8) { + NV21ToARGBRow = NV21ToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + NV21ToARGBRow = NV21ToARGBRow_NEON; + } } #endif if (width * 4 > kMaxStride) { diff --git a/source/row_common.cc b/source/row_common.cc index 3c353ff42..0268b7217 100644 --- a/source/row_common.cc +++ b/source/row_common.cc @@ -538,7 +538,43 @@ void I422ToRAWRow_C(const uint8* y_buf, } if (width & 1) { YuvPixel2(y_buf[0], u_buf[0], v_buf[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 2, rgb_buf + 1, rgb_buf + 0); + } +} + +void I422ToRGB565Row_C(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* dst_rgb565, + int width) { + uint8 b0; + uint8 g0; + uint8 r0; + uint8 b1; + uint8 g1; + uint8 r1; + for (int x = 0; x < width - 1; x += 2) { + YuvPixel2(y_buf[0], u_buf[0], v_buf[0], &b0, &g0, &r0); + YuvPixel2(y_buf[1], u_buf[0], v_buf[0], &b1, &g1, &r1); + b0 = b0 >> 3; + g0 = g0 >> 2; + r0 = r0 >> 3; + b1 = b1 >> 3; + g1 = g1 >> 2; + r1 = r1 >> 3; + *reinterpret_cast(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) | + (b1 << 16) | (g1 << 21) | (r1 << 27); + y_buf += 2; + u_buf += 1; + v_buf += 1; + dst_rgb565 += 4; // Advance 2 pixels. + } + if (width & 1) { + YuvPixel2(y_buf[0], u_buf[0], v_buf[0], &b0, &g0, &r0); + b0 = b0 >> 3; + g0 = g0 >> 2; + r0 = r0 >> 3; + *reinterpret_cast(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11); } } @@ -1027,6 +1063,8 @@ YANY(I422ToABGRRow_Any_SSSE3, I422ToABGRRow_Unaligned_SSSE3, I422ToABGRRow_C, 1, 4, 7) YANY(I422ToRGBARow_Any_SSSE3, I422ToRGBARow_Unaligned_SSSE3, I422ToRGBARow_C, 1, 4, 7) +YANY(I422ToRGB565Row_Any_SSSE3, I422ToRGB565Row_Unaligned_SSSE3, + I422ToRGB565Row_C, 1, 2, 7) // I422ToRGB24Row_SSSE3 is unaligned. YANY(I422ToRGB24Row_Any_SSSE3, I422ToRGB24Row_SSSE3, I422ToRGB24Row_C, 1, 3, 7) YANY(I422ToRAWRow_Any_SSSE3, I422ToRAWRow_SSSE3, I422ToRAWRow_C, 1, 3, 7) @@ -1040,6 +1078,7 @@ YANY(I422ToABGRRow_Any_NEON, I422ToABGRRow_NEON, I422ToABGRRow_C, 1, 4, 7) YANY(I422ToRGBARow_Any_NEON, I422ToRGBARow_NEON, I422ToRGBARow_C, 1, 4, 7) YANY(I422ToRGB24Row_Any_NEON, I422ToRGB24Row_NEON, I422ToRGB24Row_C, 1, 3, 7) YANY(I422ToRAWRow_Any_NEON, I422ToRAWRow_NEON, I422ToRAWRow_C, 1, 3, 7) +YANY(I422ToRGB565Row_Any_NEON, I422ToRGB565Row_NEON, I422ToRGB565Row_C, 1, 2, 7) YANY(I422ToYUY2Row_Any_NEON, I422ToYUY2Row_NEON, I422ToYUY2Row_C, 1, 2, 15) YANY(I422ToUYVYRow_Any_NEON, I422ToUYVYRow_NEON, I422ToUYVYRow_C, 1, 2, 15) #endif // HAS_I422TOARGBROW_NEON diff --git a/source/row_neon.cc b/source/row_neon.cc index 9df1a2c3a..a2fd9f43b 100644 --- a/source/row_neon.cc +++ b/source/row_neon.cc @@ -273,6 +273,48 @@ void I422ToRAWRow_NEON(const uint8* src_y, } #endif // HAS_I422TORAWROW_NEON +#ifdef HAS_I422TORGB565ROW_NEON +void I422ToRGB565Row_NEON(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_rgb565, + int width) { + asm volatile ( + "vld1.u8 {d24}, [%5] \n" + "vld1.u8 {d25}, [%6] \n" + "vmov.u8 d26, #128 \n" + "vmov.u16 q14, #74 \n" + "vmov.u16 q15, #16 \n" + ".p2align 2 \n" + "1: \n" + READYUV422 + YUV422TORGB + "subs %4, %4, #8 \n" + "vshr.u8 d20, d20, #3 \n" // B + "vshr.u8 d21, d21, #2 \n" // G + "vshr.u8 d22, d22, #3 \n" // R + "vmovl.u8 q8, d20 \n" // B + "vmovl.u8 q9, d21 \n" // G + "vmovl.u8 q10, d22 \n" // R + "vshl.u16 q9, q9, #5 \n" // G + "vshl.u16 q10, q10, #11 \n" // R + "vorr q0, q8, q9 \n" // BG + "vorr q0, q0, q10 \n" // BGR + "vst1.8 {q0}, [%3]! \n" // store 8 pixels RGB565. + "bgt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_u), // %1 + "+r"(src_v), // %2 + "+r"(dst_rgb565), // %3 + "+r"(width) // %4 + : "r"(&kUVToRB), // %5 + "r"(&kUVToG) // %6 + : "cc", "memory", "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} +#endif // HAS_I422TORGB565ROW_NEON + #ifdef HAS_NV12TOARGBROW_NEON void NV12ToARGBRow_NEON(const uint8* src_y, const uint8* src_uv, diff --git a/source/row_posix.cc b/source/row_posix.cc index 1078ed654..ca97f1af6 100644 --- a/source/row_posix.cc +++ b/source/row_posix.cc @@ -4276,6 +4276,26 @@ void I422ToUYVYRow_SSE2(const uint8* src_y, ); } +void I422ToRGB565Row_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width) { + SIMD_ALIGNED(uint8 row[kMaxStride]); + I422ToARGBRow_SSSE3(y_buf, u_buf, v_buf, row, width); + ARGBToRGB565Row_SSE2(row, rgb_buf, width); +} + +void I422ToRGB565Row_Unaligned_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + int width) { + SIMD_ALIGNED(uint8 row[kMaxStride]); + I422ToARGBRow_SSSE3(y_buf, u_buf, v_buf, row, width); + ARGBToRGB565Row_SSE2(row, rgb_buf, width); +} + #endif // defined(__x86_64__) || defined(__i386__) #ifdef __cplusplus diff --git a/source/row_win.cc b/source/row_win.cc index 086a73544..6268f83f7 100644 --- a/source/row_win.cc +++ b/source/row_win.cc @@ -581,7 +581,7 @@ __asm { por xmm0, xmm1 // BGR packssdw xmm0, xmm0 lea eax, [eax + 16] - movq qword ptr [edx], xmm0 // store 4 pixels of ARGB1555 + movq qword ptr [edx], xmm0 // store 4 pixels of RGB565 lea edx, [edx + 8] sub ecx, 4 jg convertloop @@ -1757,6 +1757,79 @@ void I422ToRAWRow_SSSE3(const uint8* y_buf, } } +// 8 pixels, dest aligned 16. +// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes). +__declspec(naked) __declspec(align(16)) +void I422ToRGB565Row_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb565_buf, + int width) { + __asm { + push esi + push edi + mov eax, [esp + 8 + 4] // Y + mov esi, [esp + 8 + 8] // U + mov edi, [esp + 8 + 12] // V + mov edx, [esp + 8 + 16] // rgb565 + mov ecx, [esp + 8 + 20] // width + sub edi, esi + pxor xmm4, xmm4 + pcmpeqb xmm5, xmm5 // generate mask 0x0000001f + psrld xmm5, 27 + pcmpeqb xmm6, xmm6 // generate mask 0x000007e0 + psrld xmm6, 26 + pslld xmm6, 5 + pcmpeqb xmm7, xmm7 // generate mask 0xfffff800 + pslld xmm7, 11 + + align 16 + convertloop: + READYUV422 + YUVTORGB + + // Step 3: Weave into RRGB + punpcklbw xmm0, xmm1 // BG + punpcklbw xmm2, xmm2 // RR + movdqa xmm1, xmm0 + punpcklwd xmm0, xmm2 // BGRR first 4 pixels + punpckhwd xmm1, xmm2 // BGRR next 4 pixels + + // Step 3b: RRGB -> RGB565 + movdqa xmm3, xmm0 // B first 4 pixels of argb + movdqa xmm2, xmm0 // G + pslld xmm0, 8 // R + psrld xmm3, 3 // B + psrld xmm2, 5 // G + psrad xmm0, 16 // R + pand xmm3, xmm5 // B + pand xmm2, xmm6 // G + pand xmm0, xmm7 // R + por xmm3, xmm2 // BG + por xmm0, xmm3 // BGR + movdqa xmm3, xmm1 // B next 4 pixels of argb + movdqa xmm2, xmm1 // G + pslld xmm1, 8 // R + psrld xmm3, 3 // B + psrld xmm2, 5 // G + psrad xmm1, 16 // R + pand xmm3, xmm5 // B + pand xmm2, xmm6 // G + pand xmm1, xmm7 // R + por xmm3, xmm2 // BG + por xmm1, xmm3 // BGR + packssdw xmm0, xmm1 + sub ecx, 8 + movdqa [edx], xmm0 // store 8 pixels of RGB565 + lea edx, [edx + 16] + jg convertloop + + pop edi + pop esi + ret + } +} + // 8 pixels, dest aligned 16. // 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes). __declspec(naked) __declspec(align(16)) @@ -2050,6 +2123,78 @@ void I411ToARGBRow_Unaligned_SSSE3(const uint8* y_buf, } } +// 8 pixels, dest aligned 16. +// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes). +__declspec(naked) __declspec(align(16)) +void I422ToRGB565Row_Unaligned_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb565_buf, + int width) { + __asm { + push esi + push edi + mov eax, [esp + 8 + 4] // Y + mov esi, [esp + 8 + 8] // U + mov edi, [esp + 8 + 12] // V + mov edx, [esp + 8 + 16] // rgb565 + mov ecx, [esp + 8 + 20] // width + sub edi, esi + pxor xmm4, xmm4 + pcmpeqb xmm5, xmm5 // generate mask 0x0000001f + psrld xmm5, 27 + pcmpeqb xmm6, xmm6 // generate mask 0x000007e0 + psrld xmm6, 26 + pslld xmm6, 5 + pcmpeqb xmm7, xmm7 // generate mask 0xfffff800 + pslld xmm7, 11 + + align 16 + convertloop: + READYUV422 + YUVTORGB + + // Step 3: Weave into RRGB + punpcklbw xmm0, xmm1 // BG + punpcklbw xmm2, xmm2 // RR + movdqa xmm1, xmm0 + punpcklwd xmm0, xmm2 // BGRR first 4 pixels + punpckhwd xmm1, xmm2 // BGRR next 4 pixels + + // Step 3b: RRGB -> RGB565 + movdqa xmm3, xmm0 // B first 4 pixels of argb + movdqa xmm2, xmm0 // G + pslld xmm0, 8 // R + psrld xmm3, 3 // B + psrld xmm2, 5 // G + psrad xmm0, 16 // R + pand xmm3, xmm5 // B + pand xmm2, xmm6 // G + pand xmm0, xmm7 // R + por xmm3, xmm2 // BG + por xmm0, xmm3 // BGR + movdqa xmm3, xmm1 // B next 4 pixels of argb + movdqa xmm2, xmm1 // G + pslld xmm1, 8 // R + psrld xmm3, 3 // B + psrld xmm2, 5 // G + psrad xmm1, 16 // R + pand xmm3, xmm5 // B + pand xmm2, xmm6 // G + pand xmm1, xmm7 // R + por xmm3, xmm2 // BG + por xmm1, xmm3 // BGR + packssdw xmm0, xmm1 + sub ecx, 8 + movdqu [edx], xmm0 // store 8 pixels of RGB565 + lea edx, [edx + 16] + jg convertloop + + pop edi + pop esi + ret + } +} // 8 pixels, dest aligned 16. // 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes). @@ -4419,6 +4564,7 @@ void I422ToUYVYRow_SSE2(const uint8* src_y, ret } } + #endif // _M_IX86 #ifdef __cplusplus