mirror of
https://chromium.googlesource.com/libyuv/libyuv
synced 2026-02-16 23:29:52 +08:00
Add BT.709 Full Range yuv constants.
MAKEYUVCONSTANTS macro to generate struct for YUV to RGB Fix I444AlphaToARGB unit test for ARM by adjusting C version to match Neon implementation. Bug: libyuv:879, libyuv:878, libyuv:877, libyuv:862, b/178283356 Change-Id: Iedb171fbf668316e7d45ab9e3481de6205ed31e2 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/2646472 Commit-Queue: Frank Barchard <fbarchard@chromium.org> Reviewed-by: Wan-Teh Chang <wtc@google.com>
This commit is contained in:
parent
08d0dce5fc
commit
37480f12c6
@ -1,6 +1,6 @@
|
|||||||
Name: libyuv
|
Name: libyuv
|
||||||
URL: http://code.google.com/p/libyuv/
|
URL: http://code.google.com/p/libyuv/
|
||||||
Version: 1770
|
Version: 1772
|
||||||
License: BSD
|
License: BSD
|
||||||
License File: LICENSE
|
License File: LICENSE
|
||||||
|
|
||||||
|
|||||||
@ -23,12 +23,14 @@ extern "C" {
|
|||||||
// Conversion matrix for YUV to RGB
|
// Conversion matrix for YUV to RGB
|
||||||
LIBYUV_API extern const struct YuvConstants kYuvI601Constants; // BT.601
|
LIBYUV_API extern const struct YuvConstants kYuvI601Constants; // BT.601
|
||||||
LIBYUV_API extern const struct YuvConstants kYuvJPEGConstants; // JPeg
|
LIBYUV_API extern const struct YuvConstants kYuvJPEGConstants; // JPeg
|
||||||
|
LIBYUV_API extern const struct YuvConstants kYuvF709Constants; // BT.709 full
|
||||||
LIBYUV_API extern const struct YuvConstants kYuvH709Constants; // BT.709
|
LIBYUV_API extern const struct YuvConstants kYuvH709Constants; // BT.709
|
||||||
LIBYUV_API extern const struct YuvConstants kYuv2020Constants; // BT.2020
|
LIBYUV_API extern const struct YuvConstants kYuv2020Constants; // BT.2020
|
||||||
|
|
||||||
// Conversion matrix for YVU to BGR
|
// Conversion matrix for YVU to BGR
|
||||||
LIBYUV_API extern const struct YuvConstants kYvuI601Constants; // BT.601
|
LIBYUV_API extern const struct YuvConstants kYvuI601Constants; // BT.601
|
||||||
LIBYUV_API extern const struct YuvConstants kYvuJPEGConstants; // JPeg
|
LIBYUV_API extern const struct YuvConstants kYvuJPEGConstants; // JPeg
|
||||||
|
LIBYUV_API extern const struct YuvConstants kYvuF709Constants; // BT.709 full
|
||||||
LIBYUV_API extern const struct YuvConstants kYvuH709Constants; // BT.709
|
LIBYUV_API extern const struct YuvConstants kYvuH709Constants; // BT.709
|
||||||
LIBYUV_API extern const struct YuvConstants kYvu2020Constants; // BT.2020
|
LIBYUV_API extern const struct YuvConstants kYvu2020Constants; // BT.2020
|
||||||
|
|
||||||
@ -37,6 +39,7 @@ LIBYUV_API extern const struct YuvConstants kYvu2020Constants; // BT.2020
|
|||||||
// TODO(fbarchard): Add macro for each Matrix function.
|
// TODO(fbarchard): Add macro for each Matrix function.
|
||||||
#define kYuvI601ConstantsVU kYvuI601Constants
|
#define kYuvI601ConstantsVU kYvuI601Constants
|
||||||
#define kYuvJPEGConstantsVU kYvuJPEGConstants
|
#define kYuvJPEGConstantsVU kYvuJPEGConstants
|
||||||
|
#define kYuvF709ConstantsVU kYvuF709Constants
|
||||||
#define kYuvH709ConstantsVU kYvuH709Constants
|
#define kYuvH709ConstantsVU kYvuH709Constants
|
||||||
#define kYuv2020ConstantsVU kYvu2020Constants
|
#define kYuv2020ConstantsVU kYvu2020Constants
|
||||||
#define NV12ToABGRMatrix(a, b, c, d, e, f, g, h, i) \
|
#define NV12ToABGRMatrix(a, b, c, d, e, f, g, h, i) \
|
||||||
@ -118,6 +121,32 @@ int J420ToABGR(const uint8_t* src_y,
|
|||||||
int width,
|
int width,
|
||||||
int height);
|
int height);
|
||||||
|
|
||||||
|
// Convert F420 to ARGB. BT.709 full range
|
||||||
|
LIBYUV_API
|
||||||
|
int F420ToARGB(const uint8_t* src_y,
|
||||||
|
int src_stride_y,
|
||||||
|
const uint8_t* src_u,
|
||||||
|
int src_stride_u,
|
||||||
|
const uint8_t* src_v,
|
||||||
|
int src_stride_v,
|
||||||
|
uint8_t* dst_argb,
|
||||||
|
int dst_stride_argb,
|
||||||
|
int width,
|
||||||
|
int height);
|
||||||
|
|
||||||
|
// Convert F420 to ABGR. BT.709 full range
|
||||||
|
LIBYUV_API
|
||||||
|
int F420ToABGR(const uint8_t* src_y,
|
||||||
|
int src_stride_y,
|
||||||
|
const uint8_t* src_u,
|
||||||
|
int src_stride_u,
|
||||||
|
const uint8_t* src_v,
|
||||||
|
int src_stride_v,
|
||||||
|
uint8_t* dst_abgr,
|
||||||
|
int dst_stride_abgr,
|
||||||
|
int width,
|
||||||
|
int height);
|
||||||
|
|
||||||
// Convert H420 to ARGB.
|
// Convert H420 to ARGB.
|
||||||
LIBYUV_API
|
LIBYUV_API
|
||||||
int H420ToARGB(const uint8_t* src_y,
|
int H420ToARGB(const uint8_t* src_y,
|
||||||
|
|||||||
@ -201,14 +201,28 @@ int I444Copy(const uint8_t* src_y,
|
|||||||
int height);
|
int height);
|
||||||
|
|
||||||
// Copy NV12. Supports inverting.
|
// Copy NV12. Supports inverting.
|
||||||
int NV12Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_uv,
|
int NV12Copy(const uint8_t* src_y,
|
||||||
int src_stride_uv, uint8_t* dst_y, int dst_stride_y,
|
int src_stride_y,
|
||||||
uint8_t* dst_uv, int dst_stride_uv, int width, int height);
|
const uint8_t* src_uv,
|
||||||
|
int src_stride_uv,
|
||||||
|
uint8_t* dst_y,
|
||||||
|
int dst_stride_y,
|
||||||
|
uint8_t* dst_uv,
|
||||||
|
int dst_stride_uv,
|
||||||
|
int width,
|
||||||
|
int height);
|
||||||
|
|
||||||
// Copy NV21. Supports inverting.
|
// Copy NV21. Supports inverting.
|
||||||
int NV21Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_vu,
|
int NV21Copy(const uint8_t* src_y,
|
||||||
int src_stride_vu, uint8_t* dst_y, int dst_stride_y,
|
int src_stride_y,
|
||||||
uint8_t* dst_vu, int dst_stride_vu, int width, int height);
|
const uint8_t* src_vu,
|
||||||
|
int src_stride_vu,
|
||||||
|
uint8_t* dst_y,
|
||||||
|
int dst_stride_y,
|
||||||
|
uint8_t* dst_vu,
|
||||||
|
int dst_stride_vu,
|
||||||
|
int width,
|
||||||
|
int height);
|
||||||
|
|
||||||
// Convert YUY2 to I422.
|
// Convert YUY2 to I422.
|
||||||
LIBYUV_API
|
LIBYUV_API
|
||||||
|
|||||||
@ -11,6 +11,6 @@
|
|||||||
#ifndef INCLUDE_LIBYUV_VERSION_H_
|
#ifndef INCLUDE_LIBYUV_VERSION_H_
|
||||||
#define INCLUDE_LIBYUV_VERSION_H_
|
#define INCLUDE_LIBYUV_VERSION_H_
|
||||||
|
|
||||||
#define LIBYUV_VERSION 1770
|
#define LIBYUV_VERSION 1772
|
||||||
|
|
||||||
#endif // INCLUDE_LIBYUV_VERSION_H_
|
#endif // INCLUDE_LIBYUV_VERSION_H_
|
||||||
|
|||||||
@ -94,16 +94,21 @@ enum FourCC {
|
|||||||
FOURCC('J', '4', '4', '4'), // jpeg (bt.601 full), unofficial fourcc
|
FOURCC('J', '4', '4', '4'), // jpeg (bt.601 full), unofficial fourcc
|
||||||
FOURCC_J400 =
|
FOURCC_J400 =
|
||||||
FOURCC('J', '4', '0', '0'), // jpeg (bt.601 full), unofficial fourcc
|
FOURCC('J', '4', '0', '0'), // jpeg (bt.601 full), unofficial fourcc
|
||||||
|
FOURCC_F420 = FOURCC('F', '4', '2', '0'), // bt.709 full, unofficial fourcc
|
||||||
|
FOURCC_F422 = FOURCC('F', '4', '2', '2'), // bt.709 full, unofficial fourcc
|
||||||
|
FOURCC_F444 = FOURCC('F', '4', '4', '4'), // bt.709 full, unofficial fourcc
|
||||||
FOURCC_H420 = FOURCC('H', '4', '2', '0'), // bt.709, unofficial fourcc
|
FOURCC_H420 = FOURCC('H', '4', '2', '0'), // bt.709, unofficial fourcc
|
||||||
FOURCC_H422 = FOURCC('H', '4', '2', '2'), // bt.709, unofficial fourcc
|
FOURCC_H422 = FOURCC('H', '4', '2', '2'), // bt.709, unofficial fourcc
|
||||||
FOURCC_H444 = FOURCC('H', '4', '4', '4'), // bt.709, unofficial fourcc
|
FOURCC_H444 = FOURCC('H', '4', '4', '4'), // bt.709, unofficial fourcc
|
||||||
FOURCC_U420 = FOURCC('U', '4', '2', '0'), // bt.2020, unofficial fourcc
|
FOURCC_U420 = FOURCC('U', '4', '2', '0'), // bt.2020, unofficial fourcc
|
||||||
FOURCC_U422 = FOURCC('U', '4', '2', '2'), // bt.2020, unofficial fourcc
|
FOURCC_U422 = FOURCC('U', '4', '2', '2'), // bt.2020, unofficial fourcc
|
||||||
FOURCC_U444 = FOURCC('U', '4', '4', '4'), // bt.2020, unofficial fourcc
|
FOURCC_U444 = FOURCC('U', '4', '4', '4'), // bt.2020, unofficial fourcc
|
||||||
|
FOURCC_F010 = FOURCC('F', '0', '1', '0'), // bt.709 full range 10 bit 420
|
||||||
FOURCC_H010 = FOURCC('H', '0', '1', '0'), // bt.709 10 bit 420
|
FOURCC_H010 = FOURCC('H', '0', '1', '0'), // bt.709 10 bit 420
|
||||||
FOURCC_U010 = FOURCC('U', '0', '1', '0'), // bt.2020 10 bit 420
|
FOURCC_U010 = FOURCC('U', '0', '1', '0'), // bt.2020 10 bit 420
|
||||||
FOURCC_H210 = FOURCC('H', '0', '1', '0'), // bt.709 10 bit 422
|
FOURCC_F210 = FOURCC('F', '2', '1', '0'), // bt.709 full range 10 bit 422
|
||||||
FOURCC_U210 = FOURCC('U', '0', '1', '0'), // bt.2020 10 bit 422
|
FOURCC_H210 = FOURCC('H', '2', '1', '0'), // bt.709 10 bit 422
|
||||||
|
FOURCC_U210 = FOURCC('U', '2', '1', '0'), // bt.2020 10 bit 422
|
||||||
|
|
||||||
// 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc.
|
// 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc.
|
||||||
FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
|
FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
|
||||||
|
|||||||
@ -350,9 +350,16 @@ int I420ToI400(const uint8_t* src_y,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy NV12. Supports inverting.
|
// Copy NV12. Supports inverting.
|
||||||
int NV12Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_uv,
|
int NV12Copy(const uint8_t* src_y,
|
||||||
int src_stride_uv, uint8_t* dst_y, int dst_stride_y,
|
int src_stride_y,
|
||||||
uint8_t* dst_uv, int dst_stride_uv, int width, int height) {
|
const uint8_t* src_uv,
|
||||||
|
int src_stride_uv,
|
||||||
|
uint8_t* dst_y,
|
||||||
|
int dst_stride_y,
|
||||||
|
uint8_t* dst_uv,
|
||||||
|
int dst_stride_uv,
|
||||||
|
int width,
|
||||||
|
int height) {
|
||||||
if (!src_y || !dst_y || !src_uv || !dst_uv || width <= 0 || height == 0) {
|
if (!src_y || !dst_y || !src_uv || !dst_uv || width <= 0 || height == 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -375,9 +382,16 @@ int NV12Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_uv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy NV21. Supports inverting.
|
// Copy NV21. Supports inverting.
|
||||||
int NV21Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_vu,
|
int NV21Copy(const uint8_t* src_y,
|
||||||
int src_stride_vu, uint8_t* dst_y, int dst_stride_y,
|
int src_stride_y,
|
||||||
uint8_t* dst_vu, int dst_stride_vu, int width, int height) {
|
const uint8_t* src_vu,
|
||||||
|
int src_stride_vu,
|
||||||
|
uint8_t* dst_y,
|
||||||
|
int dst_stride_y,
|
||||||
|
uint8_t* dst_vu,
|
||||||
|
int dst_stride_vu,
|
||||||
|
int width,
|
||||||
|
int height) {
|
||||||
return NV12Copy(src_y, src_stride_y, src_vu, src_stride_vu, dst_y,
|
return NV12Copy(src_y, src_stride_y, src_vu, src_stride_vu, dst_y,
|
||||||
dst_stride_y, dst_vu, dst_stride_vu, width, height);
|
dst_stride_y, dst_vu, dst_stride_vu, width, height);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1327,7 +1327,68 @@ void J400ToARGBRow_C(const uint8_t* src_y, uint8_t* dst_argb, int width) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(fbarchard): Unify these structures to be platform independent.
|
// Macros to create SIMD specific yuv to rgb conversion constants.
|
||||||
|
|
||||||
|
#if defined(__aarch64__)
|
||||||
|
#define MAKEYUVCONSTANTS(name, YG, YGB, UB, UG, VG, VR, BB, BG, BR) \
|
||||||
|
const struct YuvConstants SIMD_ALIGNED(kYuv##name##Constants) = { \
|
||||||
|
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR}, \
|
||||||
|
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR}, \
|
||||||
|
{UG, VG, UG, VG, UG, VG, UG, VG}, \
|
||||||
|
{UG, VG, UG, VG, UG, VG, UG, VG}, \
|
||||||
|
{BB, BG, BR, YGB, 0, 0, 0, 0}, \
|
||||||
|
{0x0101 * YG, YG, 0, 0}}; \
|
||||||
|
const struct YuvConstants SIMD_ALIGNED(kYvu##name##Constants) = { \
|
||||||
|
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB}, \
|
||||||
|
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB}, \
|
||||||
|
{VG, UG, VG, UG, VG, UG, VG, UG}, \
|
||||||
|
{VG, UG, VG, UG, VG, UG, VG, UG}, \
|
||||||
|
{BR, BG, BB, YGB, 0, 0, 0, 0}, \
|
||||||
|
{0x0101 * YG, YG, 0, 0}};
|
||||||
|
|
||||||
|
#elif defined(__arm__)
|
||||||
|
#define MAKEYUVCONSTANTS(name, YG, YGB, UB, UG, VG, VR, BB, BG, BR) \
|
||||||
|
const struct YuvConstants SIMD_ALIGNED(kYuv##name##Constants) = { \
|
||||||
|
{-UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0}, \
|
||||||
|
{UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0}, \
|
||||||
|
{BB, BG, BR, YGB, 0, 0, 0, 0}, \
|
||||||
|
{0x0101 * YG, YG, 0, 0}}; \
|
||||||
|
const struct YuvConstants SIMD_ALIGNED(kYvu##name##Constants) = { \
|
||||||
|
{-VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0}, \
|
||||||
|
{VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0}, \
|
||||||
|
{BR, BG, BB, YGB, 0, 0, 0, 0}, \
|
||||||
|
{0x0101 * YG, YG, 0, 0}};
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define MAKEYUVCONSTANTS(name, YG, YGB, UB, UG, VG, VR, BB, BG, BR) \
|
||||||
|
const struct YuvConstants SIMD_ALIGNED(kYuv##name##Constants) = { \
|
||||||
|
{UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, \
|
||||||
|
UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0}, \
|
||||||
|
{UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, \
|
||||||
|
UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG}, \
|
||||||
|
{0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, \
|
||||||
|
0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR}, \
|
||||||
|
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB}, \
|
||||||
|
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG}, \
|
||||||
|
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR}, \
|
||||||
|
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG}, \
|
||||||
|
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, \
|
||||||
|
YGB, YGB}}; \
|
||||||
|
const struct YuvConstants SIMD_ALIGNED(kYvu##name##Constants) = { \
|
||||||
|
{VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, \
|
||||||
|
VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0}, \
|
||||||
|
{VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, \
|
||||||
|
VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG}, \
|
||||||
|
{0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, \
|
||||||
|
0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB}, \
|
||||||
|
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR}, \
|
||||||
|
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG}, \
|
||||||
|
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB}, \
|
||||||
|
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG}, \
|
||||||
|
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, \
|
||||||
|
YGB, YGB}};
|
||||||
|
#endif
|
||||||
|
|
||||||
// TODO(fbarchard): Generate SIMD structures from float matrix.
|
// TODO(fbarchard): Generate SIMD structures from float matrix.
|
||||||
|
|
||||||
// BT.601 YUV to RGB reference
|
// BT.601 YUV to RGB reference
|
||||||
@ -1350,60 +1411,7 @@ void J400ToARGBRow_C(const uint8_t* src_y, uint8_t* dst_argb, int width) {
|
|||||||
#define BG (UG * 128 + VG * 128 + YGB)
|
#define BG (UG * 128 + VG * 128 + YGB)
|
||||||
#define BR (VR * 128 + YGB)
|
#define BR (VR * 128 + YGB)
|
||||||
|
|
||||||
#if defined(__aarch64__) // 64 bit arm
|
MAKEYUVCONSTANTS(I601, YG, YGB, UB, UG, VG, VR, BB, BG, BR)
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = {
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = {
|
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
#elif defined(__arm__) // 32 bit arm
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = {
|
|
||||||
{-UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = {
|
|
||||||
{-VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
#else
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = {
|
|
||||||
{UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
|
|
||||||
UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
|
|
||||||
UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
|
|
||||||
0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = {
|
|
||||||
{VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
|
|
||||||
VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
|
|
||||||
VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
|
|
||||||
0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef BB
|
#undef BB
|
||||||
#undef BG
|
#undef BG
|
||||||
@ -1435,60 +1443,7 @@ const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = {
|
|||||||
#define BG (UG * 128 + VG * 128 + YGB)
|
#define BG (UG * 128 + VG * 128 + YGB)
|
||||||
#define BR (VR * 128 + YGB)
|
#define BR (VR * 128 + YGB)
|
||||||
|
|
||||||
#if defined(__aarch64__)
|
MAKEYUVCONSTANTS(JPEG, YG, YGB, UB, UG, VG, VR, BB, BG, BR)
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = {
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = {
|
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
#elif defined(__arm__)
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = {
|
|
||||||
{-UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = {
|
|
||||||
{-VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
#else
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = {
|
|
||||||
{UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
|
|
||||||
UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
|
|
||||||
UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
|
|
||||||
0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = {
|
|
||||||
{VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
|
|
||||||
VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
|
|
||||||
VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
|
|
||||||
0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef BB
|
#undef BB
|
||||||
#undef BG
|
#undef BG
|
||||||
@ -1504,7 +1459,6 @@ const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = {
|
|||||||
// R = (Y - 16) * 1.164 - V * -1.793
|
// R = (Y - 16) * 1.164 - V * -1.793
|
||||||
// G = (Y - 16) * 1.164 - U * 0.213 - V * 0.533
|
// G = (Y - 16) * 1.164 - U * 0.213 - V * 0.533
|
||||||
// B = (Y - 16) * 1.164 - U * -2.112
|
// B = (Y - 16) * 1.164 - U * -2.112
|
||||||
// See also http://www.equasys.de/colorconversion.html
|
|
||||||
|
|
||||||
// Y contribution to R,G,B. Scale and bias.
|
// Y contribution to R,G,B. Scale and bias.
|
||||||
#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
|
#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
|
||||||
@ -1522,60 +1476,45 @@ const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = {
|
|||||||
#define BG (UG * 128 + VG * 128 + YGB)
|
#define BG (UG * 128 + VG * 128 + YGB)
|
||||||
#define BR (VR * 128 + YGB)
|
#define BR (VR * 128 + YGB)
|
||||||
|
|
||||||
#if defined(__aarch64__)
|
MAKEYUVCONSTANTS(H709, YG, YGB, UB, UG, VG, VR, BB, BG, BR)
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = {
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
#undef BB
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
#undef BG
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
#undef BR
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
#undef YGB
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
#undef UB
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
#undef UG
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = {
|
#undef VG
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
#undef VR
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
#undef YG
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
// BT.709 full range YUV to RGB reference
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
// R = Y - V * -1.5748
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
// G = Y - U * 0.18732 - V * 0.46812
|
||||||
#elif defined(__arm__)
|
// B = Y - U * -1.8556
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = {
|
// WR = 0.2126
|
||||||
{-UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0},
|
// WB = 0.0722
|
||||||
{UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0},
|
// WR and WB given, the equations are:
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
// R = Y + (2 * (1 - WR)) * V;
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
// G = Y - ((2 * ((WR * (1 - WR) * V) + (WB * (1 - WB) * U))) / (1 - WB - WR));
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = {
|
// B = Y + (2 * (1 - WB)) * U;
|
||||||
{-VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0},
|
// Y contribution to R,G,B. Scale and bias. (same as jpeg)
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
#define YG 16320 /* round(1 * 64 * 256 * 256 / 257) */
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
#define YGB 32 /* 64 / 2 */
|
||||||
#else
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = {
|
// U and V contributions to R,G,B.
|
||||||
{UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
|
#define UB -119 /* round(-1.8556 * 64) */
|
||||||
UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0},
|
#define UG 12 /* round(0.18732 * 64) */
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
|
#define VG 30 /* round(0.46812 * 64) */
|
||||||
UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG},
|
#define VR -101 /* round(-1.5748 * 64) */
|
||||||
{0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
|
|
||||||
0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR},
|
// Bias values to round, and subtract 128 from U and V.
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
#define BB (UB * 128 + YGB)
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
#define BG (UG * 128 + VG * 128 + YGB)
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
#define BR (VR * 128 + YGB)
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
MAKEYUVCONSTANTS(F709, YG, YGB, UB, UG, VG, VR, BB, BG, BR)
|
||||||
YGB}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = {
|
|
||||||
{VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
|
|
||||||
VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
|
|
||||||
VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
|
|
||||||
0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef BB
|
#undef BB
|
||||||
#undef BG
|
#undef BG
|
||||||
@ -1608,60 +1547,7 @@ const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = {
|
|||||||
#define BG (UG * 128 + VG * 128 + YGB)
|
#define BG (UG * 128 + VG * 128 + YGB)
|
||||||
#define BR (VR * 128 + YGB)
|
#define BR (VR * 128 + YGB)
|
||||||
|
|
||||||
#if defined(__aarch64__)
|
MAKEYUVCONSTANTS(2020, YG, YGB, UB, UG, VG, VR, BB, BG, BR)
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuv2020Constants) = {
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
|
||||||
{-UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvu2020Constants) = {
|
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
|
||||||
{-VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
#elif defined(__arm__)
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuv2020Constants) = {
|
|
||||||
{-UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{BB, BG, BR, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvu2020Constants) = {
|
|
||||||
{-VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0},
|
|
||||||
{BR, BG, BB, YGB, 0, 0, 0, 0},
|
|
||||||
{0x0101 * YG, YG, 0, 0}};
|
|
||||||
#else
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYuv2020Constants) = {
|
|
||||||
{UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
|
|
||||||
UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0},
|
|
||||||
{UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
|
|
||||||
UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG},
|
|
||||||
{0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
|
|
||||||
0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
const struct YuvConstants SIMD_ALIGNED(kYvu2020Constants) = {
|
|
||||||
{VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
|
|
||||||
VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0},
|
|
||||||
{VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
|
|
||||||
VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG},
|
|
||||||
{0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
|
|
||||||
0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB},
|
|
||||||
{BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR},
|
|
||||||
{BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG},
|
|
||||||
{BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB},
|
|
||||||
{YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG},
|
|
||||||
{YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB, YGB,
|
|
||||||
YGB}};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef BB
|
#undef BB
|
||||||
#undef BG
|
#undef BG
|
||||||
@ -1673,6 +1559,8 @@ const struct YuvConstants SIMD_ALIGNED(kYvu2020Constants) = {
|
|||||||
#undef VR
|
#undef VR
|
||||||
#undef YG
|
#undef YG
|
||||||
|
|
||||||
|
#undef MAKEYUVCONSTANTS
|
||||||
|
|
||||||
// C reference code that mimics the YUV assembly.
|
// C reference code that mimics the YUV assembly.
|
||||||
// Reads 8 bit YUV and leaves result as 16 bit.
|
// Reads 8 bit YUV and leaves result as 16 bit.
|
||||||
static __inline void YuvPixel(uint8_t y,
|
static __inline void YuvPixel(uint8_t y,
|
||||||
@ -1712,9 +1600,9 @@ static __inline void YuvPixel(uint8_t y,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
uint32_t y1 = (uint32_t)(y * 0x0101 * yg) >> 16;
|
uint32_t y1 = (uint32_t)(y * 0x0101 * yg) >> 16;
|
||||||
*b = Clamp((int32_t)(-(u * ub) + y1 + bb) >> 6);
|
*b = Clamp((int32_t)(y1 + -(u * ub) + bb) >> 6);
|
||||||
*g = Clamp((int32_t)(-(u * ug + v * vg) + y1 + bg) >> 6);
|
*g = Clamp((int32_t)(y1 + -(u * ug + v * vg) + bg) >> 6);
|
||||||
*r = Clamp((int32_t)(-(v * vr) + y1 + br) >> 6);
|
*r = Clamp((int32_t)(y1 + -(v * vr) + br) >> 6);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads 8 bit YUV and leaves result as 16 bit.
|
// Reads 8 bit YUV and leaves result as 16 bit.
|
||||||
@ -2016,6 +1904,40 @@ void I422ToAR30Row_C(const uint8_t* src_y,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(LIBYUV_DISABLE_NEON) && \
|
||||||
|
(defined(__ARM_NEON__) || defined(__aarch64__) || defined(LIBYUV_NEON))
|
||||||
|
// C mimic assembly.
|
||||||
|
// TODO(fbarchard): Remove subsampling from Neon.
|
||||||
|
void I444AlphaToARGBRow_C(const uint8_t* src_y,
|
||||||
|
const uint8_t* src_u,
|
||||||
|
const uint8_t* src_v,
|
||||||
|
const uint8_t* src_a,
|
||||||
|
uint8_t* rgb_buf,
|
||||||
|
const struct YuvConstants* yuvconstants,
|
||||||
|
int width) {
|
||||||
|
int x;
|
||||||
|
for (x = 0; x < width - 1; x += 2) {
|
||||||
|
uint8_t u = (src_u[0] + src_u[1] + 1) >> 1;
|
||||||
|
uint8_t v = (src_v[0] + src_v[1] + 1) >> 1;
|
||||||
|
YuvPixel(src_y[0], u, v, rgb_buf + 0, rgb_buf + 1, rgb_buf + 2,
|
||||||
|
yuvconstants);
|
||||||
|
rgb_buf[3] = src_a[0];
|
||||||
|
YuvPixel(src_y[1], u, v, rgb_buf + 4, rgb_buf + 5, rgb_buf + 6,
|
||||||
|
yuvconstants);
|
||||||
|
rgb_buf[7] = src_a[1];
|
||||||
|
src_y += 2;
|
||||||
|
src_u += 2;
|
||||||
|
src_v += 2;
|
||||||
|
src_a += 2;
|
||||||
|
rgb_buf += 8; // Advance 2 pixels.
|
||||||
|
}
|
||||||
|
if (width & 1) {
|
||||||
|
YuvPixel(src_y[0], src_u[0], src_v[0], rgb_buf + 0, rgb_buf + 1,
|
||||||
|
rgb_buf + 2, yuvconstants);
|
||||||
|
rgb_buf[3] = src_a[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
void I444AlphaToARGBRow_C(const uint8_t* src_y,
|
void I444AlphaToARGBRow_C(const uint8_t* src_y,
|
||||||
const uint8_t* src_u,
|
const uint8_t* src_u,
|
||||||
const uint8_t* src_v,
|
const uint8_t* src_v,
|
||||||
@ -2035,6 +1957,7 @@ void I444AlphaToARGBRow_C(const uint8_t* src_y,
|
|||||||
rgb_buf += 4; // Advance 1 pixel.
|
rgb_buf += 4; // Advance 1 pixel.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void I422AlphaToARGBRow_C(const uint8_t* src_y,
|
void I422AlphaToARGBRow_C(const uint8_t* src_y,
|
||||||
const uint8_t* src_u,
|
const uint8_t* src_u,
|
||||||
|
|||||||
@ -1278,7 +1278,7 @@ void RGBAToYJRow_AVX2(const uint8_t* src_rgba, uint8_t* dst_y, int width) {
|
|||||||
"vmovdqu %5,%%ymm6 \n"
|
"vmovdqu %5,%%ymm6 \n"
|
||||||
|
|
||||||
LABELALIGN RGBTOY_AVX2(
|
LABELALIGN RGBTOY_AVX2(
|
||||||
ymm5) "vzeroupper \n"
|
ymm5) "vzeroupper \n"
|
||||||
: "+r"(src_rgba), // %0
|
: "+r"(src_rgba), // %0
|
||||||
"+r"(dst_y), // %1
|
"+r"(dst_y), // %1
|
||||||
"+r"(width) // %2
|
"+r"(width) // %2
|
||||||
@ -2161,15 +2161,15 @@ void OMITFP I444AlphaToARGBRow_SSSE3(const uint8_t* y_buf,
|
|||||||
// clang-format off
|
// clang-format off
|
||||||
asm volatile (
|
asm volatile (
|
||||||
YUVTORGB_SETUP(yuvconstants)
|
YUVTORGB_SETUP(yuvconstants)
|
||||||
"sub %[u_buf],%[v_buf] \n"
|
"sub %[u_buf],%[v_buf] \n"
|
||||||
|
|
||||||
LABELALIGN
|
LABELALIGN
|
||||||
"1: \n"
|
"1: \n"
|
||||||
READYUVA444
|
READYUVA444
|
||||||
YUVTORGB(yuvconstants)
|
YUVTORGB(yuvconstants)
|
||||||
STOREARGB
|
STOREARGB
|
||||||
"subl $0x8,%[width] \n"
|
"subl $0x8,%[width] \n"
|
||||||
"jg 1b \n"
|
"jg 1b \n"
|
||||||
: [y_buf]"+r"(y_buf), // %[y_buf]
|
: [y_buf]"+r"(y_buf), // %[y_buf]
|
||||||
[u_buf]"+r"(u_buf), // %[u_buf]
|
[u_buf]"+r"(u_buf), // %[u_buf]
|
||||||
[v_buf]"+r"(v_buf), // %[v_buf]
|
[v_buf]"+r"(v_buf), // %[v_buf]
|
||||||
@ -2947,16 +2947,16 @@ void OMITFP I444AlphaToARGBRow_AVX2(const uint8_t* y_buf,
|
|||||||
// clang-format off
|
// clang-format off
|
||||||
asm volatile (
|
asm volatile (
|
||||||
YUVTORGB_SETUP_AVX2(yuvconstants)
|
YUVTORGB_SETUP_AVX2(yuvconstants)
|
||||||
"sub %[u_buf],%[v_buf] \n"
|
"sub %[u_buf],%[v_buf] \n"
|
||||||
|
|
||||||
LABELALIGN
|
LABELALIGN
|
||||||
"1: \n"
|
"1: \n"
|
||||||
READYUVA444_AVX2
|
READYUVA444_AVX2
|
||||||
YUVTORGB_AVX2(yuvconstants)
|
YUVTORGB_AVX2(yuvconstants)
|
||||||
STOREARGB_AVX2
|
STOREARGB_AVX2
|
||||||
"subl $0x10,%[width] \n"
|
"subl $0x10,%[width] \n"
|
||||||
"jg 1b \n"
|
"jg 1b \n"
|
||||||
"vzeroupper \n"
|
"vzeroupper \n"
|
||||||
: [y_buf]"+r"(y_buf), // %[y_buf]
|
: [y_buf]"+r"(y_buf), // %[y_buf]
|
||||||
[u_buf]"+r"(u_buf), // %[u_buf]
|
[u_buf]"+r"(u_buf), // %[u_buf]
|
||||||
[v_buf]"+r"(v_buf), // %[v_buf]
|
[v_buf]"+r"(v_buf), // %[v_buf]
|
||||||
|
|||||||
@ -168,8 +168,8 @@ void I444AlphaToARGBRow_NEON(const uint8_t* src_y,
|
|||||||
asm volatile(
|
asm volatile(
|
||||||
YUVTORGB_SETUP
|
YUVTORGB_SETUP
|
||||||
"1: \n" READYUV444 YUVTORGB
|
"1: \n" READYUV444 YUVTORGB
|
||||||
"subs %5, %5, #8 \n"
|
|
||||||
"vld1.8 {d23}, [%3]! \n"
|
"vld1.8 {d23}, [%3]! \n"
|
||||||
|
"subs %5, %5, #8 \n"
|
||||||
"vst4.8 {d20, d21, d22, d23}, [%4]! \n"
|
"vst4.8 {d20, d21, d22, d23}, [%4]! \n"
|
||||||
"bgt 1b \n"
|
"bgt 1b \n"
|
||||||
: "+r"(src_y), // %0
|
: "+r"(src_y), // %0
|
||||||
@ -415,11 +415,11 @@ void NV12ToARGBRow_NEON(const uint8_t* src_y,
|
|||||||
const struct YuvConstants* yuvconstants,
|
const struct YuvConstants* yuvconstants,
|
||||||
int width) {
|
int width) {
|
||||||
asm volatile(YUVTORGB_SETUP
|
asm volatile(YUVTORGB_SETUP
|
||||||
"vmov.u8 d23, #255 \n"
|
"vmov.u8 d23, #255 \n"
|
||||||
"1: \n" READNV12 YUVTORGB
|
"1: \n" READNV12 YUVTORGB
|
||||||
"subs %3, %3, #8 \n"
|
"subs %3, %3, #8 \n"
|
||||||
"vst4.8 {d20, d21, d22, d23}, [%2]! \n"
|
"vst4.8 {d20, d21, d22, d23}, [%2]! \n"
|
||||||
"bgt 1b \n"
|
"bgt 1b \n"
|
||||||
: "+r"(src_y), // %0
|
: "+r"(src_y), // %0
|
||||||
"+r"(src_uv), // %1
|
"+r"(src_uv), // %1
|
||||||
"+r"(dst_argb), // %2
|
"+r"(dst_argb), // %2
|
||||||
@ -438,11 +438,11 @@ void NV21ToARGBRow_NEON(const uint8_t* src_y,
|
|||||||
const struct YuvConstants* yuvconstants,
|
const struct YuvConstants* yuvconstants,
|
||||||
int width) {
|
int width) {
|
||||||
asm volatile(YUVTORGB_SETUP
|
asm volatile(YUVTORGB_SETUP
|
||||||
"vmov.u8 d23, #255 \n"
|
"vmov.u8 d23, #255 \n"
|
||||||
"1: \n" READNV21 YUVTORGB
|
"1: \n" READNV21 YUVTORGB
|
||||||
"subs %3, %3, #8 \n"
|
"subs %3, %3, #8 \n"
|
||||||
"vst4.8 {d20, d21, d22, d23}, [%2]! \n"
|
"vst4.8 {d20, d21, d22, d23}, [%2]! \n"
|
||||||
"bgt 1b \n"
|
"bgt 1b \n"
|
||||||
: "+r"(src_y), // %0
|
: "+r"(src_y), // %0
|
||||||
"+r"(src_vu), // %1
|
"+r"(src_vu), // %1
|
||||||
"+r"(dst_argb), // %2
|
"+r"(dst_argb), // %2
|
||||||
@ -537,11 +537,11 @@ void YUY2ToARGBRow_NEON(const uint8_t* src_yuy2,
|
|||||||
const struct YuvConstants* yuvconstants,
|
const struct YuvConstants* yuvconstants,
|
||||||
int width) {
|
int width) {
|
||||||
asm volatile(YUVTORGB_SETUP
|
asm volatile(YUVTORGB_SETUP
|
||||||
"vmov.u8 d23, #255 \n"
|
"vmov.u8 d23, #255 \n"
|
||||||
"1: \n" READYUY2 YUVTORGB
|
"1: \n" READYUY2 YUVTORGB
|
||||||
"subs %2, %2, #8 \n"
|
"subs %2, %2, #8 \n"
|
||||||
"vst4.8 {d20, d21, d22, d23}, [%1]! \n"
|
"vst4.8 {d20, d21, d22, d23}, [%1]! \n"
|
||||||
"bgt 1b \n"
|
"bgt 1b \n"
|
||||||
: "+r"(src_yuy2), // %0
|
: "+r"(src_yuy2), // %0
|
||||||
"+r"(dst_argb), // %1
|
"+r"(dst_argb), // %1
|
||||||
"+r"(width) // %2
|
"+r"(width) // %2
|
||||||
@ -558,11 +558,11 @@ void UYVYToARGBRow_NEON(const uint8_t* src_uyvy,
|
|||||||
const struct YuvConstants* yuvconstants,
|
const struct YuvConstants* yuvconstants,
|
||||||
int width) {
|
int width) {
|
||||||
asm volatile(YUVTORGB_SETUP
|
asm volatile(YUVTORGB_SETUP
|
||||||
"vmov.u8 d23, #255 \n"
|
"vmov.u8 d23, #255 \n"
|
||||||
"1: \n" READUYVY YUVTORGB
|
"1: \n" READUYVY YUVTORGB
|
||||||
"subs %2, %2, #8 \n"
|
"subs %2, %2, #8 \n"
|
||||||
"vst4.8 {d20, d21, d22, d23}, [%1]! \n"
|
"vst4.8 {d20, d21, d22, d23}, [%1]! \n"
|
||||||
"bgt 1b \n"
|
"bgt 1b \n"
|
||||||
: "+r"(src_uyvy), // %0
|
: "+r"(src_uyvy), // %0
|
||||||
"+r"(dst_argb), // %1
|
"+r"(dst_argb), // %1
|
||||||
"+r"(width) // %2
|
"+r"(width) // %2
|
||||||
|
|||||||
@ -595,7 +595,7 @@ void NV12ToRGB565Row_NEON(const uint8_t* src_y,
|
|||||||
int width) {
|
int width) {
|
||||||
asm volatile(
|
asm volatile(
|
||||||
YUVTORGB_SETUP "1: \n" READNV12
|
YUVTORGB_SETUP "1: \n" READNV12
|
||||||
"prfm pldl1keep, [%0, 448] \n" YUVTORGB(
|
"prfm pldl1keep, [%0, 448] \n" YUVTORGB(
|
||||||
v22, v21, v20) ARGBTORGB565
|
v22, v21, v20) ARGBTORGB565
|
||||||
"prfm pldl1keep, [%1, 256] \n"
|
"prfm pldl1keep, [%1, 256] \n"
|
||||||
"subs %w3, %w3, #8 \n"
|
"subs %w3, %w3, #8 \n"
|
||||||
|
|||||||
@ -28,25 +28,25 @@ extern "C" {
|
|||||||
#if defined(_M_X64)
|
#if defined(_M_X64)
|
||||||
|
|
||||||
// Read 8 UV from 444
|
// Read 8 UV from 444
|
||||||
#define READYUV444 \
|
#define READYUV444 \
|
||||||
xmm0 = _mm_loadl_epi64((__m128i*)u_buf); \
|
xmm0 = _mm_loadl_epi64((__m128i*)u_buf); \
|
||||||
xmm1 = _mm_loadl_epi64((__m128i*)(u_buf + offset)); \
|
xmm1 = _mm_loadl_epi64((__m128i*)(u_buf + offset)); \
|
||||||
xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
|
xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
|
||||||
u_buf += 8; \
|
u_buf += 8; \
|
||||||
xmm4 = _mm_loadl_epi64((__m128i*)y_buf); \
|
xmm4 = _mm_loadl_epi64((__m128i*)y_buf); \
|
||||||
xmm4 = _mm_unpacklo_epi8(xmm4, xmm4); \
|
xmm4 = _mm_unpacklo_epi8(xmm4, xmm4); \
|
||||||
y_buf += 8;
|
y_buf += 8;
|
||||||
|
|
||||||
// Read 8 UV from 444, With 8 Alpha.
|
// Read 8 UV from 444, With 8 Alpha.
|
||||||
#define READYUVA444 \
|
#define READYUVA444 \
|
||||||
xmm0 = _mm_loadl_epi64((__m128i*)u_buf); \
|
xmm0 = _mm_loadl_epi64((__m128i*)u_buf); \
|
||||||
xmm1 = _mm_loadl_epi64((__m128i*)(u_buf + offset)); \
|
xmm1 = _mm_loadl_epi64((__m128i*)(u_buf + offset)); \
|
||||||
xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
|
xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
|
||||||
u_buf += 8; \
|
u_buf += 8; \
|
||||||
xmm4 = _mm_loadl_epi64((__m128i*)y_buf); \
|
xmm4 = _mm_loadl_epi64((__m128i*)y_buf); \
|
||||||
xmm4 = _mm_unpacklo_epi8(xmm4, xmm4); \
|
xmm4 = _mm_unpacklo_epi8(xmm4, xmm4); \
|
||||||
y_buf += 8; \
|
y_buf += 8; \
|
||||||
xmm5 = _mm_loadl_epi64((__m128i*)a_buf); \
|
xmm5 = _mm_loadl_epi64((__m128i*)a_buf); \
|
||||||
a_buf += 8;
|
a_buf += 8;
|
||||||
|
|
||||||
// Read 4 UV from 422, upsample to 8 UV.
|
// Read 4 UV from 422, upsample to 8 UV.
|
||||||
|
|||||||
@ -991,20 +991,20 @@ void ScaleUVRowDownEven_NEON(const uint8_t* src_ptr,
|
|||||||
(void)src_stride;
|
(void)src_stride;
|
||||||
asm volatile(
|
asm volatile(
|
||||||
"1: \n"
|
"1: \n"
|
||||||
"vld1.16 {d0[0]}, [%0], %6 \n"
|
"vld1.16 {d0[0]}, [%0], %6 \n"
|
||||||
"vld1.16 {d0[1]}, [%1], %6 \n"
|
"vld1.16 {d0[1]}, [%1], %6 \n"
|
||||||
"vld1.16 {d0[2]}, [%2], %6 \n"
|
"vld1.16 {d0[2]}, [%2], %6 \n"
|
||||||
"vld1.16 {d0[3]}, [%3], %6 \n"
|
"vld1.16 {d0[3]}, [%3], %6 \n"
|
||||||
"subs %5, %5, #4 \n" // 4 pixels per loop.
|
"subs %5, %5, #4 \n" // 4 pixels per loop.
|
||||||
"vst1.8 {d0}, [%4]! \n"
|
"vst1.8 {d0}, [%4]! \n"
|
||||||
"bgt 1b \n"
|
"bgt 1b \n"
|
||||||
: "+r"(src_ptr), // %0
|
: "+r"(src_ptr), // %0
|
||||||
"+r"(src1_ptr), // %1
|
"+r"(src1_ptr), // %1
|
||||||
"+r"(src2_ptr), // %2
|
"+r"(src2_ptr), // %2
|
||||||
"+r"(src3_ptr), // %3
|
"+r"(src3_ptr), // %3
|
||||||
"+r"(dst_ptr), // %4
|
"+r"(dst_ptr), // %4
|
||||||
"+r"(dst_width) // %5
|
"+r"(dst_width) // %5
|
||||||
: "r"(src_stepx * 8) // %6
|
: "r"(src_stepx * 8) // %6
|
||||||
: "memory", "cc", "d0");
|
: "memory", "cc", "d0");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -305,7 +305,7 @@ static void ScaleUVDownEven(int src_width,
|
|||||||
ScaleUVRowDownEven = ScaleUVRowDownEven_NEON;
|
ScaleUVRowDownEven = ScaleUVRowDownEven_NEON;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif// TODO(fbarchard): Enable Box filter
|
#endif // TODO(fbarchard): Enable Box filter
|
||||||
#if defined(HAS_SCALEUVROWDOWNEVENBOX_NEON)
|
#if defined(HAS_SCALEUVROWDOWNEVENBOX_NEON)
|
||||||
if (TestCpuFlag(kCpuHasNEON)) {
|
if (TestCpuFlag(kCpuHasNEON)) {
|
||||||
ScaleUVRowDownEven = filtering ? ScaleUVRowDownEvenBox_Any_NEON
|
ScaleUVRowDownEven = filtering ? ScaleUVRowDownEvenBox_Any_NEON
|
||||||
|
|||||||
@ -208,7 +208,33 @@ static void YUVHToRGB(int y, int u, int v, int* r, int* g, int* b) {
|
|||||||
*r = orig_pixels[2];
|
*r = orig_pixels[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
static void YUVRec2020ToRGB(int y, int u, int v, int* r, int* g, int* b) {
|
#define F422ToARGB(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I422ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
|
||||||
|
|
||||||
|
static void YUVFToRGB(int y, int u, int v, int* r, int* g, int* b) {
|
||||||
|
const int kWidth = 16;
|
||||||
|
const int kHeight = 1;
|
||||||
|
const int kPixels = kWidth * kHeight;
|
||||||
|
const int kHalfPixels = ((kWidth + 1) / 2) * ((kHeight + 1) / 2);
|
||||||
|
|
||||||
|
SIMD_ALIGNED(uint8_t orig_y[16]);
|
||||||
|
SIMD_ALIGNED(uint8_t orig_u[8]);
|
||||||
|
SIMD_ALIGNED(uint8_t orig_v[8]);
|
||||||
|
SIMD_ALIGNED(uint8_t orig_pixels[16 * 4]);
|
||||||
|
memset(orig_y, y, kPixels);
|
||||||
|
memset(orig_u, u, kHalfPixels);
|
||||||
|
memset(orig_v, v, kHalfPixels);
|
||||||
|
|
||||||
|
/* YUV converted to ARGB. */
|
||||||
|
F422ToARGB(orig_y, kWidth, orig_u, (kWidth + 1) / 2, orig_v, (kWidth + 1) / 2,
|
||||||
|
orig_pixels, kWidth * 4, kWidth, kHeight);
|
||||||
|
|
||||||
|
*b = orig_pixels[0];
|
||||||
|
*g = orig_pixels[1];
|
||||||
|
*r = orig_pixels[2];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void YUVUToRGB(int y, int u, int v, int* r, int* g, int* b) {
|
||||||
const int kWidth = 16;
|
const int kWidth = 16;
|
||||||
const int kHeight = 1;
|
const int kHeight = 1;
|
||||||
const int kPixels = kWidth * kHeight;
|
const int kPixels = kWidth * kHeight;
|
||||||
@ -401,13 +427,15 @@ static void YUVHToRGBReference(int y, int u, int v, int* r, int* g, int* b) {
|
|||||||
*b = RoundToByte((y - 16) * 1.164 - (u - 128) * -2.112);
|
*b = RoundToByte((y - 16) * 1.164 - (u - 128) * -2.112);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BT.709 full range YUV to RGB reference
|
||||||
|
static void YUVFToRGBReference(int y, int u, int v, int* r, int* g, int* b) {
|
||||||
|
*r = RoundToByte(y - (v - 128) * -1.5748);
|
||||||
|
*g = RoundToByte(y - (u - 128) * 0.18732 - (v - 128) * 0.46812);
|
||||||
|
*b = RoundToByte(y - (u - 128) * -1.8556);
|
||||||
|
}
|
||||||
|
|
||||||
// BT.2020 YUV to RGB reference
|
// BT.2020 YUV to RGB reference
|
||||||
static void YUVRec2020ToRGBReference(int y,
|
static void YUVUToRGBReference(int y, int u, int v, int* r, int* g, int* b) {
|
||||||
int u,
|
|
||||||
int v,
|
|
||||||
int* r,
|
|
||||||
int* g,
|
|
||||||
int* b) {
|
|
||||||
*r = RoundToByte((y - 16) * 1.164384 - (v - 128) * -1.67867);
|
*r = RoundToByte((y - 16) * 1.164384 - (v - 128) * -1.67867);
|
||||||
*g = RoundToByte((y - 16) * 1.164384 - (u - 128) * 0.187326 -
|
*g = RoundToByte((y - 16) * 1.164384 - (u - 128) * 0.187326 -
|
||||||
(v - 128) * 0.65042);
|
(v - 128) * 0.65042);
|
||||||
@ -633,7 +661,7 @@ TEST_F(LibYUVColorTest, TestFullYUVH) {
|
|||||||
PrintHistogram(rh, gh, bh);
|
PrintHistogram(rh, gh, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LibYUVColorTest, TestFullYUVRec2020) {
|
TEST_F(LibYUVColorTest, TestFullYUVF) {
|
||||||
int rh[256] = {
|
int rh[256] = {
|
||||||
0,
|
0,
|
||||||
};
|
};
|
||||||
@ -648,8 +676,37 @@ TEST_F(LibYUVColorTest, TestFullYUVRec2020) {
|
|||||||
for (int y2 = 0; y2 < 256; y2 += FASTSTEP) {
|
for (int y2 = 0; y2 < 256; y2 += FASTSTEP) {
|
||||||
int r0, g0, b0, r1, g1, b1;
|
int r0, g0, b0, r1, g1, b1;
|
||||||
int y = RANDOM256(y2);
|
int y = RANDOM256(y2);
|
||||||
YUVRec2020ToRGBReference(y, u, v, &r0, &g0, &b0);
|
YUVFToRGBReference(y, u, v, &r0, &g0, &b0);
|
||||||
YUVRec2020ToRGB(y, u, v, &r1, &g1, &b1);
|
YUVFToRGB(y, u, v, &r1, &g1, &b1);
|
||||||
|
EXPECT_NEAR(r0, r1, 5);
|
||||||
|
EXPECT_NEAR(g0, g1, 5);
|
||||||
|
EXPECT_NEAR(b0, b1, 5);
|
||||||
|
++rh[r1 - r0 + 128];
|
||||||
|
++gh[g1 - g0 + 128];
|
||||||
|
++bh[b1 - b0 + 128];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PrintHistogram(rh, gh, bh);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(LibYUVColorTest, TestFullYUVU) {
|
||||||
|
int rh[256] = {
|
||||||
|
0,
|
||||||
|
};
|
||||||
|
int gh[256] = {
|
||||||
|
0,
|
||||||
|
};
|
||||||
|
int bh[256] = {
|
||||||
|
0,
|
||||||
|
};
|
||||||
|
for (int u = 0; u < 256; ++u) {
|
||||||
|
for (int v = 0; v < 256; ++v) {
|
||||||
|
for (int y2 = 0; y2 < 256; y2 += FASTSTEP) {
|
||||||
|
int r0, g0, b0, r1, g1, b1;
|
||||||
|
int y = RANDOM256(y2);
|
||||||
|
YUVUToRGBReference(y, u, v, &r0, &g0, &b0);
|
||||||
|
YUVUToRGB(y, u, v, &r1, &g1, &b1);
|
||||||
EXPECT_NEAR(r0, r1, ERROR_R);
|
EXPECT_NEAR(r0, r1, ERROR_R);
|
||||||
EXPECT_NEAR(g0, g1, ERROR_G);
|
EXPECT_NEAR(g0, g1, ERROR_G);
|
||||||
// TODO(crbug.com/libyuv/863): Reduce the errors in the B channel.
|
// TODO(crbug.com/libyuv/863): Reduce the errors in the B channel.
|
||||||
|
|||||||
@ -546,6 +546,20 @@ TESTBIPLANARTOBP(NV12, 2, 2, NV12Mirror, 2, 2)
|
|||||||
TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2)
|
TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2)
|
||||||
TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2)
|
TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2)
|
||||||
|
|
||||||
|
// Provide matrix wrappers
|
||||||
|
#define F420ToABGR(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I420ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuF709Constants, i, j)
|
||||||
|
#define F420ToARGB(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I420ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
|
||||||
|
#define F422ToABGR(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I422ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuF709Constants, i, j)
|
||||||
|
#define F422ToARGB(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I422ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
|
||||||
|
#define F444ToABGR(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I444ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuF709Constants, i, j)
|
||||||
|
#define F444ToARGB(a, b, c, d, e, f, g, h, i, j) \
|
||||||
|
I444ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
|
||||||
|
|
||||||
#define ALIGNINT(V, ALIGN) (((V) + (ALIGN)-1) / (ALIGN) * (ALIGN))
|
#define ALIGNINT(V, ALIGN) (((V) + (ALIGN)-1) / (ALIGN) * (ALIGN))
|
||||||
|
|
||||||
#define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
|
#define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
|
||||||
@ -611,6 +625,8 @@ TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1)
|
|||||||
TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1)
|
TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1)
|
||||||
TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1)
|
TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1)
|
||||||
TESTPLANARTOB(J420, 2, 2, ABGR, 4, 4, 1)
|
TESTPLANARTOB(J420, 2, 2, ABGR, 4, 4, 1)
|
||||||
|
TESTPLANARTOB(F420, 2, 2, ARGB, 4, 4, 1)
|
||||||
|
TESTPLANARTOB(F420, 2, 2, ABGR, 4, 4, 1)
|
||||||
TESTPLANARTOB(H420, 2, 2, ARGB, 4, 4, 1)
|
TESTPLANARTOB(H420, 2, 2, ARGB, 4, 4, 1)
|
||||||
TESTPLANARTOB(H420, 2, 2, ABGR, 4, 4, 1)
|
TESTPLANARTOB(H420, 2, 2, ABGR, 4, 4, 1)
|
||||||
TESTPLANARTOB(U420, 2, 2, ARGB, 4, 4, 1)
|
TESTPLANARTOB(U420, 2, 2, ARGB, 4, 4, 1)
|
||||||
@ -726,6 +742,12 @@ TESTPLANARTOB(H420, 2, 2, AR30, 4, 4, 1)
|
|||||||
#define J420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
#define J420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
|
I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
|
||||||
l, m)
|
l, m)
|
||||||
|
#define F420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
|
I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
|
||||||
|
l, m)
|
||||||
|
#define F420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
|
I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
|
||||||
|
l, m)
|
||||||
#define H420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
#define H420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
|
I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
|
||||||
l, m)
|
l, m)
|
||||||
@ -744,6 +766,12 @@ TESTPLANARTOB(H420, 2, 2, AR30, 4, 4, 1)
|
|||||||
#define J422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
#define J422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
|
I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
|
||||||
l, m)
|
l, m)
|
||||||
|
#define F422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
|
I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
|
||||||
|
l, m)
|
||||||
|
#define F422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
|
I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
|
||||||
|
l, m)
|
||||||
#define H422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
#define H422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
|
I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
|
||||||
l, m)
|
l, m)
|
||||||
@ -762,6 +790,12 @@ TESTPLANARTOB(H420, 2, 2, AR30, 4, 4, 1)
|
|||||||
#define J444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
#define J444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
|
I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
|
||||||
l, m)
|
l, m)
|
||||||
|
#define F444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
|
I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
|
||||||
|
l, m)
|
||||||
|
#define F444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
|
I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
|
||||||
|
l, m)
|
||||||
#define H444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
#define H444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
|
||||||
I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
|
I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
|
||||||
l, m)
|
l, m)
|
||||||
@ -2812,12 +2846,16 @@ TESTQPLANARTOE(J420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
|
|||||||
TESTQPLANARTOE(J420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
TESTQPLANARTOE(J420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
||||||
TESTQPLANARTOE(H420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
|
TESTQPLANARTOE(H420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
|
||||||
TESTQPLANARTOE(H420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
TESTQPLANARTOE(H420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
||||||
|
TESTQPLANARTOE(F420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
|
||||||
|
TESTQPLANARTOE(F420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
||||||
TESTQPLANARTOE(U420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
|
TESTQPLANARTOE(U420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
|
||||||
TESTQPLANARTOE(U420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
TESTQPLANARTOE(U420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
|
||||||
TESTQPLANARTOE(I422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
TESTQPLANARTOE(I422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
||||||
TESTQPLANARTOE(I422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
TESTQPLANARTOE(I422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
||||||
TESTQPLANARTOE(J422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
TESTQPLANARTOE(J422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
||||||
TESTQPLANARTOE(J422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
TESTQPLANARTOE(J422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
||||||
|
TESTQPLANARTOE(F422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
||||||
|
TESTQPLANARTOE(F422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
||||||
TESTQPLANARTOE(H422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
TESTQPLANARTOE(H422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
||||||
TESTQPLANARTOE(H422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
TESTQPLANARTOE(H422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
|
||||||
TESTQPLANARTOE(U422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
TESTQPLANARTOE(U422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user