diff --git a/README.chromium b/README.chromium index f68fdb4e5..c86ef0c85 100644 --- a/README.chromium +++ b/README.chromium @@ -1,6 +1,6 @@ Name: libyuv URL: http://code.google.com/p/libyuv/ -Version: 1613 +Version: 1614 License: BSD License File: LICENSE diff --git a/include/libyuv/planar_functions.h b/include/libyuv/planar_functions.h index 881b0c5c6..9662516c5 100644 --- a/include/libyuv/planar_functions.h +++ b/include/libyuv/planar_functions.h @@ -39,6 +39,20 @@ void SetPlane(uint8* dst_y, int dst_stride_y, int width, int height, uint32 value); +// Split interleaved UV plane into separate U and V planes. +LIBYUV_API +void SplitUVPlane(const uint8* src_uv, int src_stride_uv, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height); + +// Merge separate U and V planes into one interleaved UV plane. +LIBYUV_API +void MergeUVPlane(const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_uv, int dst_stride_uv, + int width, int height); + // Copy I400. Supports inverting. LIBYUV_API int I400ToI400(const uint8* src_y, int src_stride_y, diff --git a/include/libyuv/version.h b/include/libyuv/version.h index d4c83e29a..d4e89a235 100644 --- a/include/libyuv/version.h +++ b/include/libyuv/version.h @@ -11,6 +11,6 @@ #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT #define INCLUDE_LIBYUV_VERSION_H_ -#define LIBYUV_VERSION 1613 +#define LIBYUV_VERSION 1614 #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT diff --git a/source/convert.cc b/source/convert.cc index cd667bf56..a33742d24 100644 --- a/source/convert.cc +++ b/source/convert.cc @@ -40,14 +40,14 @@ static int I4xxToI420(const uint8* src_y, int src_stride_y, const int dst_y_height = Abs(src_y_height); const int dst_uv_width = SUBSAMPLE(dst_y_width, 1, 1); const int dst_uv_height = SUBSAMPLE(dst_y_height, 1, 1); - if (src_y_width == 0 || src_y_height == 0 || - src_uv_width == 0 || src_uv_height == 0) { + if (src_uv_width == 0 || src_uv_height == 0) { return -1; } - // TODO(fbarchard): support NULL for dst_y - ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, - dst_y, dst_stride_y, dst_y_width, dst_y_height, - kFilterBilinear); + if (dst_y) { + ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, + dst_y, dst_stride_y, dst_y_width, dst_y_height, + kFilterBilinear); + } ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height, dst_u, dst_stride_u, dst_uv_width, dst_uv_height, kFilterBilinear); @@ -229,75 +229,6 @@ static void CopyPlane2(const uint8* src, int src_stride_0, int src_stride_1, } } -// Support function for NV12 etc UV channels. -// Width and height are plane sizes (typically half pixel width). -static void SplitUVPlane(const uint8* src_uv, int src_stride_uv, - uint8* dst_u, int dst_stride_u, - uint8* dst_v, int dst_stride_v, - int width, int height) { - int y; - void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int width) = SplitUVRow_C; - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_u = dst_u + (height - 1) * dst_stride_u; - dst_v = dst_v + (height - 1) * dst_stride_v; - dst_stride_u = -dst_stride_u; - dst_stride_v = -dst_stride_v; - } - // Coalesce rows. - if (src_stride_uv == width * 2 && - dst_stride_u == width && - dst_stride_v == width) { - width *= height; - height = 1; - src_stride_uv = dst_stride_u = dst_stride_v = 0; - } -#if defined(HAS_SPLITUVROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - SplitUVRow = SplitUVRow_Any_SSE2; - if (IS_ALIGNED(width, 16)) { - SplitUVRow = SplitUVRow_SSE2; - } - } -#endif -#if defined(HAS_SPLITUVROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - SplitUVRow = SplitUVRow_Any_AVX2; - if (IS_ALIGNED(width, 32)) { - SplitUVRow = SplitUVRow_AVX2; - } - } -#endif -#if defined(HAS_SPLITUVROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - SplitUVRow = SplitUVRow_Any_NEON; - if (IS_ALIGNED(width, 16)) { - SplitUVRow = SplitUVRow_NEON; - } - } -#endif -#if defined(HAS_SPLITUVROW_DSPR2) - if (TestCpuFlag(kCpuHasDSPR2) && - IS_ALIGNED(dst_u, 4) && IS_ALIGNED(dst_stride_u, 4) && - IS_ALIGNED(dst_v, 4) && IS_ALIGNED(dst_stride_v, 4)) { - SplitUVRow = SplitUVRow_Any_DSPR2; - if (IS_ALIGNED(width, 16)) { - SplitUVRow = SplitUVRow_DSPR2; - } - } -#endif - - for (y = 0; y < height; ++y) { - // Copy a row of UV. - SplitUVRow(src_uv, dst_u, dst_v, width); - dst_u += dst_stride_u; - dst_v += dst_stride_v; - src_uv += src_stride_uv; - } -} - // Support converting from FOURCC_M420 // Useful for bandwidth constrained transports like USB 1.0 and 2.0 and for // easy conversion to I420. @@ -324,7 +255,9 @@ static int X420ToI420(const uint8* src_y, if (height < 0) { height = -height; halfheight = (height + 1) >> 1; - dst_y = dst_y + (height - 1) * dst_stride_y; + if (dst_y) { + dst_y = dst_y + (height - 1) * dst_stride_y; + } dst_u = dst_u + (halfheight - 1) * dst_stride_u; dst_v = dst_v + (halfheight - 1) * dst_stride_v; dst_stride_y = -dst_stride_y; diff --git a/source/convert_from.cc b/source/convert_from.cc index 2341dca95..3b2dca816 100644 --- a/source/convert_from.cc +++ b/source/convert_from.cc @@ -361,6 +361,7 @@ int I420ToUYVY(const uint8* src_y, int src_stride_y, return 0; } +// TODO(fbarchard): test negative height for invert. LIBYUV_API int I420ToNV12(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, @@ -368,76 +369,19 @@ int I420ToNV12(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y, uint8* dst_uv, int dst_stride_uv, int width, int height) { - int y; - void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv, - int width) = MergeUVRow_C; - // Coalesce rows. - int halfwidth = (width + 1) >> 1; - int halfheight = (height + 1) >> 1; - if (!src_u || !src_v || !dst_uv || + if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 || height == 0) { return -1; } - // Negative height means invert the image. - if (height < 0) { - height = -height; - halfheight = (height + 1) >> 1; - if (dst_y) { - dst_y = dst_y + (height - 1) * dst_stride_y; - } - dst_uv = dst_uv + (halfheight - 1) * dst_stride_uv; - dst_stride_y = -dst_stride_y; - dst_stride_uv = -dst_stride_uv; - } - if (src_stride_y == width && - dst_stride_y == width) { - width *= height; - height = 1; - src_stride_y = dst_stride_y = 0; - } - // Coalesce rows. - if (src_stride_u == halfwidth && - src_stride_v == halfwidth && - dst_stride_uv == halfwidth * 2) { - halfwidth *= halfheight; - halfheight = 1; - src_stride_u = src_stride_v = dst_stride_uv = 0; - } -#if defined(HAS_MERGEUVROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - MergeUVRow_ = MergeUVRow_Any_SSE2; - if (IS_ALIGNED(halfwidth, 16)) { - MergeUVRow_ = MergeUVRow_SSE2; - } - } -#endif -#if defined(HAS_MERGEUVROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - MergeUVRow_ = MergeUVRow_Any_AVX2; - if (IS_ALIGNED(halfwidth, 32)) { - MergeUVRow_ = MergeUVRow_AVX2; - } - } -#endif -#if defined(HAS_MERGEUVROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - MergeUVRow_ = MergeUVRow_Any_NEON; - if (IS_ALIGNED(halfwidth, 16)) { - MergeUVRow_ = MergeUVRow_NEON; - } - } -#endif - + int halfwidth = (width + 1) / 2; + int halfheight = height > 0 ? (height + 1) / 2 : (height - 1) / 2; if (dst_y) { CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); } - for (y = 0; y < halfheight; ++y) { - // Merge a row of U and V into a row of UV. - MergeUVRow_(src_u, src_v, dst_uv, halfwidth); - src_u += src_stride_u; - src_v += src_stride_v; - dst_uv += dst_stride_uv; - } + MergeUVPlane(src_u, src_stride_u, + src_v, src_stride_v, + dst_uv, dst_stride_uv, + halfwidth, halfheight); return 0; } diff --git a/source/planar_functions.cc b/source/planar_functions.cc index 811ee5b72..a764f8da4 100644 --- a/source/planar_functions.cc +++ b/source/planar_functions.cc @@ -31,6 +31,12 @@ void CopyPlane(const uint8* src_y, int src_stride_y, int width, int height) { int y; void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C; + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_y = dst_y + (height - 1) * dst_stride_y; + dst_stride_y = -dst_stride_y; + } // Coalesce rows. if (src_stride_y == width && dst_stride_y == width) { @@ -76,6 +82,7 @@ void CopyPlane(const uint8* src_y, int src_stride_y, } } +// TODO(fbarchard): Consider support for negative height. LIBYUV_API void CopyPlane_16(const uint16* src_y, int src_stride_y, uint16* dst_y, int dst_stride_y, @@ -224,6 +231,133 @@ int I420ToI400(const uint8* src_y, int src_stride_y, return 0; } +// Support function for NV12 etc UV channels. +// Width and height are plane sizes (typically half pixel width). +LIBYUV_API +void SplitUVPlane(const uint8* src_uv, int src_stride_uv, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height) { + int y; + void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) = SplitUVRow_C; + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_u = dst_u + (height - 1) * dst_stride_u; + dst_v = dst_v + (height - 1) * dst_stride_v; + dst_stride_u = -dst_stride_u; + dst_stride_v = -dst_stride_v; + } + // Coalesce rows. + if (src_stride_uv == width * 2 && + dst_stride_u == width && + dst_stride_v == width) { + width *= height; + height = 1; + src_stride_uv = dst_stride_u = dst_stride_v = 0; + } +#if defined(HAS_SPLITUVROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + SplitUVRow = SplitUVRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + SplitUVRow = SplitUVRow_SSE2; + } + } +#endif +#if defined(HAS_SPLITUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + SplitUVRow = SplitUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + SplitUVRow = SplitUVRow_AVX2; + } + } +#endif +#if defined(HAS_SPLITUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + SplitUVRow = SplitUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + SplitUVRow = SplitUVRow_NEON; + } + } +#endif +#if defined(HAS_SPLITUVROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && + IS_ALIGNED(dst_u, 4) && IS_ALIGNED(dst_stride_u, 4) && + IS_ALIGNED(dst_v, 4) && IS_ALIGNED(dst_stride_v, 4)) { + SplitUVRow = SplitUVRow_Any_DSPR2; + if (IS_ALIGNED(width, 16)) { + SplitUVRow = SplitUVRow_DSPR2; + } + } +#endif + + for (y = 0; y < height; ++y) { + // Copy a row of UV. + SplitUVRow(src_uv, dst_u, dst_v, width); + dst_u += dst_stride_u; + dst_v += dst_stride_v; + src_uv += src_stride_uv; + } +} + +LIBYUV_API +void MergeUVPlane(const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_uv, int dst_stride_uv, + int width, int height) { + int y; + void (*MergeUVRow)(const uint8* src_u, const uint8* src_v, uint8* dst_uv, + int width) = MergeUVRow_C; + // Coalesce rows. + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_uv = dst_uv + (height - 1) * dst_stride_uv; + dst_stride_uv = -dst_stride_uv; + } + // Coalesce rows. + if (src_stride_u == width && + src_stride_v == width && + dst_stride_uv == width * 2) { + width *= height; + height = 1; + src_stride_u = src_stride_v = dst_stride_uv = 0; + } +#if defined(HAS_MERGEUVROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + MergeUVRow = MergeUVRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + MergeUVRow = MergeUVRow_SSE2; + } + } +#endif +#if defined(HAS_MERGEUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + MergeUVRow = MergeUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + MergeUVRow = MergeUVRow_AVX2; + } + } +#endif +#if defined(HAS_MERGEUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + MergeUVRow = MergeUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + MergeUVRow = MergeUVRow_NEON; + } + } +#endif + + for (y = 0; y < height; ++y) { + // Merge a row of U and V into a row of UV. + MergeUVRow(src_u, src_v, dst_uv, width); + src_u += src_stride_u; + src_v += src_stride_v; + dst_uv += dst_stride_uv; + } +} + // Mirror a plane of data. void MirrorPlane(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y,