Rename MergeUVRow_ variable to MergeUVRow

Bug:  libyuv:42280902
Change-Id: I9935bf958b901ddf84cf91b2097c8cd5d6efadde
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/7683070
Commit-Queue: Frank Barchard <fbarchard@chromium.org>
Reviewed-by: Dale Curtis <dalecurtis@chromium.org>
This commit is contained in:
Frank Barchard 2026-03-18 16:31:08 -07:00 committed by libyuv LUCI CQ
parent b1cacfb38f
commit 4183733af5

View File

@ -454,7 +454,7 @@ int ARGBToNV12(const uint8_t* src_argb,
ARGBToUVRow_C;
void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
ARGBToYRow_C;
void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v,
uint8_t* dst_uv, int width) = MergeUVRow_C;
if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
return -1;
@ -570,52 +570,52 @@ int ARGBToNV12(const uint8_t* src_argb,
#endif
#if defined(HAS_MERGEUVROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
MergeUVRow_ = MergeUVRow_Any_SSE2;
MergeUVRow = MergeUVRow_Any_SSE2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_SSE2;
MergeUVRow = MergeUVRow_SSE2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow_ = MergeUVRow_Any_AVX2;
MergeUVRow = MergeUVRow_Any_AVX2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_AVX2;
MergeUVRow = MergeUVRow_AVX2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW)) {
MergeUVRow_ = MergeUVRow_Any_AVX512BW;
MergeUVRow = MergeUVRow_Any_AVX512BW;
if (IS_ALIGNED(halfwidth, 32)) {
MergeUVRow_ = MergeUVRow_AVX512BW;
MergeUVRow = MergeUVRow_AVX512BW;
}
}
#endif
#if defined(HAS_MERGEUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow_ = MergeUVRow_Any_NEON;
MergeUVRow = MergeUVRow_Any_NEON;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_NEON;
MergeUVRow = MergeUVRow_NEON;
}
}
#endif
#if defined(HAS_MERGEUVROW_SME)
if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow_ = MergeUVRow_SME;
MergeUVRow = MergeUVRow_SME;
}
#endif
#if defined(HAS_MERGEUVROW_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
MergeUVRow_ = MergeUVRow_Any_LSX;
MergeUVRow = MergeUVRow_Any_LSX;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_LSX;
MergeUVRow = MergeUVRow_LSX;
}
}
#endif
#if defined(HAS_MERGEUVROW_RVV)
if (TestCpuFlag(kCpuHasRVV)) {
MergeUVRow_ = MergeUVRow_RVV;
MergeUVRow = MergeUVRow_RVV;
}
#endif
{
@ -627,7 +627,7 @@ int ARGBToNV12(const uint8_t* src_argb,
for (y = 0; y < height - 1; y += 2) {
ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
MergeUVRow(row_u, row_v, dst_uv, halfwidth);
ARGBToYRow(src_argb, dst_y, width);
ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
src_argb += src_stride_argb * 2;
@ -636,7 +636,7 @@ int ARGBToNV12(const uint8_t* src_argb,
}
if (height & 1) {
ARGBToUVRow(src_argb, 0, row_u, row_v, width);
MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
MergeUVRow(row_u, row_v, dst_uv, halfwidth);
ARGBToYRow(src_argb, dst_y, width);
}
free_aligned_buffer_64(row_u);
@ -784,7 +784,7 @@ int ARGBToNV21(const uint8_t* src_argb,
ARGBToUVRow_C;
void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
ARGBToYRow_C;
void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v,
uint8_t* dst_vu, int width) = MergeUVRow_C;
if (!src_argb || !dst_y || !dst_vu || width <= 0 || height == 0) {
return -1;
@ -910,52 +910,52 @@ int ARGBToNV21(const uint8_t* src_argb,
#endif
#if defined(HAS_MERGEUVROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
MergeUVRow_ = MergeUVRow_Any_SSE2;
MergeUVRow = MergeUVRow_Any_SSE2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_SSE2;
MergeUVRow = MergeUVRow_SSE2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow_ = MergeUVRow_Any_AVX2;
MergeUVRow = MergeUVRow_Any_AVX2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_AVX2;
MergeUVRow = MergeUVRow_AVX2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW)) {
MergeUVRow_ = MergeUVRow_Any_AVX512BW;
MergeUVRow = MergeUVRow_Any_AVX512BW;
if (IS_ALIGNED(halfwidth, 64)) {
MergeUVRow_ = MergeUVRow_AVX512BW;
MergeUVRow = MergeUVRow_AVX512BW;
}
}
#endif
#if defined(HAS_MERGEUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow_ = MergeUVRow_Any_NEON;
MergeUVRow = MergeUVRow_Any_NEON;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_NEON;
MergeUVRow = MergeUVRow_NEON;
}
}
#endif
#if defined(HAS_MERGEUVROW_SME)
if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow_ = MergeUVRow_SME;
MergeUVRow = MergeUVRow_SME;
}
#endif
#if defined(HAS_MERGEUVROW_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
MergeUVRow_ = MergeUVRow_Any_LSX;
MergeUVRow = MergeUVRow_Any_LSX;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_LSX;
MergeUVRow = MergeUVRow_LSX;
}
}
#endif
#if defined(HAS_MERGEUVROW_RVV)
if (TestCpuFlag(kCpuHasRVV)) {
MergeUVRow_ = MergeUVRow_RVV;
MergeUVRow = MergeUVRow_RVV;
}
#endif
{
@ -967,7 +967,7 @@ int ARGBToNV21(const uint8_t* src_argb,
for (y = 0; y < height - 1; y += 2) {
ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
MergeUVRow(row_v, row_u, dst_vu, halfwidth);
ARGBToYRow(src_argb, dst_y, width);
ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
src_argb += src_stride_argb * 2;
@ -976,7 +976,7 @@ int ARGBToNV21(const uint8_t* src_argb,
}
if (height & 1) {
ARGBToUVRow(src_argb, 0, row_u, row_v, width);
MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
MergeUVRow(row_v, row_u, dst_vu, halfwidth);
ARGBToYRow(src_argb, dst_y, width);
}
free_aligned_buffer_64(row_u);
@ -1000,7 +1000,7 @@ int ABGRToNV12(const uint8_t* src_abgr,
ABGRToUVRow_C;
void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
ABGRToYRow_C;
void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v,
uint8_t* dst_uv, int width) = MergeUVRow_C;
if (!src_abgr || !dst_y || !dst_uv || width <= 0 || height == 0) {
return -1;
@ -1114,52 +1114,52 @@ int ABGRToNV12(const uint8_t* src_abgr,
#endif
#if defined(HAS_MERGEUVROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
MergeUVRow_ = MergeUVRow_Any_SSE2;
MergeUVRow = MergeUVRow_Any_SSE2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_SSE2;
MergeUVRow = MergeUVRow_SSE2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow_ = MergeUVRow_Any_AVX2;
MergeUVRow = MergeUVRow_Any_AVX2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_AVX2;
MergeUVRow = MergeUVRow_AVX2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW)) {
MergeUVRow_ = MergeUVRow_Any_AVX512BW;
MergeUVRow = MergeUVRow_Any_AVX512BW;
if (IS_ALIGNED(halfwidth, 64)) {
MergeUVRow_ = MergeUVRow_AVX512BW;
MergeUVRow = MergeUVRow_AVX512BW;
}
}
#endif
#if defined(HAS_MERGEUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow_ = MergeUVRow_Any_NEON;
MergeUVRow = MergeUVRow_Any_NEON;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_NEON;
MergeUVRow = MergeUVRow_NEON;
}
}
#endif
#if defined(HAS_MERGEUVROW_SME)
if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow_ = MergeUVRow_SME;
MergeUVRow = MergeUVRow_SME;
}
#endif
#if defined(HAS_MERGEUVROW_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
MergeUVRow_ = MergeUVRow_Any_LSX;
MergeUVRow = MergeUVRow_Any_LSX;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_LSX;
MergeUVRow = MergeUVRow_LSX;
}
}
#endif
#if defined(HAS_MERGEUVROW_RVV)
if (TestCpuFlag(kCpuHasRVV)) {
MergeUVRow_ = MergeUVRow_RVV;
MergeUVRow = MergeUVRow_RVV;
}
#endif
{
@ -1171,7 +1171,7 @@ int ABGRToNV12(const uint8_t* src_abgr,
for (y = 0; y < height - 1; y += 2) {
ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
MergeUVRow(row_u, row_v, dst_uv, halfwidth);
ABGRToYRow(src_abgr, dst_y, width);
ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
src_abgr += src_stride_abgr * 2;
@ -1180,7 +1180,7 @@ int ABGRToNV12(const uint8_t* src_abgr,
}
if (height & 1) {
ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
MergeUVRow(row_u, row_v, dst_uv, halfwidth);
ABGRToYRow(src_abgr, dst_y, width);
}
free_aligned_buffer_64(row_u);
@ -1205,7 +1205,7 @@ int ABGRToNV21(const uint8_t* src_abgr,
ABGRToUVRow_C;
void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
ABGRToYRow_C;
void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v,
uint8_t* dst_vu, int width) = MergeUVRow_C;
if (!src_abgr || !dst_y || !dst_vu || width <= 0 || height == 0) {
return -1;
@ -1319,52 +1319,52 @@ int ABGRToNV21(const uint8_t* src_abgr,
#endif
#if defined(HAS_MERGEUVROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
MergeUVRow_ = MergeUVRow_Any_SSE2;
MergeUVRow = MergeUVRow_Any_SSE2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_SSE2;
MergeUVRow = MergeUVRow_SSE2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow_ = MergeUVRow_Any_AVX2;
MergeUVRow = MergeUVRow_Any_AVX2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_AVX2;
MergeUVRow = MergeUVRow_AVX2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW)) {
MergeUVRow_ = MergeUVRow_Any_AVX512BW;
MergeUVRow = MergeUVRow_Any_AVX512BW;
if (IS_ALIGNED(halfwidth, 64)) {
MergeUVRow_ = MergeUVRow_AVX512BW;
MergeUVRow = MergeUVRow_AVX512BW;
}
}
#endif
#if defined(HAS_MERGEUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow_ = MergeUVRow_Any_NEON;
MergeUVRow = MergeUVRow_Any_NEON;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_NEON;
MergeUVRow = MergeUVRow_NEON;
}
}
#endif
#if defined(HAS_MERGEUVROW_SME)
if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow_ = MergeUVRow_SME;
MergeUVRow = MergeUVRow_SME;
}
#endif
#if defined(HAS_MERGEUVROW_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
MergeUVRow_ = MergeUVRow_Any_LSX;
MergeUVRow = MergeUVRow_Any_LSX;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_LSX;
MergeUVRow = MergeUVRow_LSX;
}
}
#endif
#if defined(HAS_MERGEUVROW_RVV)
if (TestCpuFlag(kCpuHasRVV)) {
MergeUVRow_ = MergeUVRow_RVV;
MergeUVRow = MergeUVRow_RVV;
}
#endif
{
@ -1376,7 +1376,7 @@ int ABGRToNV21(const uint8_t* src_abgr,
for (y = 0; y < height - 1; y += 2) {
ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
MergeUVRow(row_v, row_u, dst_vu, halfwidth);
ABGRToYRow(src_abgr, dst_y, width);
ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
src_abgr += src_stride_abgr * 2;
@ -1385,7 +1385,7 @@ int ABGRToNV21(const uint8_t* src_abgr,
}
if (height & 1) {
ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
MergeUVRow(row_v, row_u, dst_vu, halfwidth);
ABGRToYRow(src_abgr, dst_y, width);
}
free_aligned_buffer_64(row_u);
@ -3700,7 +3700,7 @@ int RAWToJNV21(const uint8_t* src_raw,
void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
ARGBToYJRow_C;
#endif
void (*MergeUVRow_)(const uint8_t* src_uj, const uint8_t* src_vj,
void (*MergeUVRow)(const uint8_t* src_uj, const uint8_t* src_vj,
uint8_t* dst_vu, int width) = MergeUVRow_C;
if (!src_raw || !dst_y || !dst_vu || width <= 0 || height == 0) {
return -1;
@ -3801,52 +3801,52 @@ int RAWToJNV21(const uint8_t* src_raw,
#endif // HAS_RAWTOYJROW
#if defined(HAS_MERGEUVROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
MergeUVRow_ = MergeUVRow_Any_SSE2;
MergeUVRow = MergeUVRow_Any_SSE2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_SSE2;
MergeUVRow = MergeUVRow_SSE2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow_ = MergeUVRow_Any_AVX2;
MergeUVRow = MergeUVRow_Any_AVX2;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_AVX2;
MergeUVRow = MergeUVRow_AVX2;
}
}
#endif
#if defined(HAS_MERGEUVROW_AVX512BW)
if (TestCpuFlag(kCpuHasAVX512BW)) {
MergeUVRow_ = MergeUVRow_Any_AVX512BW;
MergeUVRow = MergeUVRow_Any_AVX512BW;
if (IS_ALIGNED(halfwidth, 64)) {
MergeUVRow_ = MergeUVRow_AVX512BW;
MergeUVRow = MergeUVRow_AVX512BW;
}
}
#endif
#if defined(HAS_MERGEUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow_ = MergeUVRow_Any_NEON;
MergeUVRow = MergeUVRow_Any_NEON;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_NEON;
MergeUVRow = MergeUVRow_NEON;
}
}
#endif
#if defined(HAS_MERGEUVROW_SME)
if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow_ = MergeUVRow_SME;
MergeUVRow = MergeUVRow_SME;
}
#endif
#if defined(HAS_MERGEUVROW_LSX)
if (TestCpuFlag(kCpuHasLSX)) {
MergeUVRow_ = MergeUVRow_Any_LSX;
MergeUVRow = MergeUVRow_Any_LSX;
if (IS_ALIGNED(halfwidth, 16)) {
MergeUVRow_ = MergeUVRow_LSX;
MergeUVRow = MergeUVRow_LSX;
}
}
#endif
#if defined(HAS_MERGEUVROW_RVV)
if (TestCpuFlag(kCpuHasRVV)) {
MergeUVRow_ = MergeUVRow_RVV;
MergeUVRow = MergeUVRow_RVV;
}
#endif
{
@ -3869,14 +3869,14 @@ int RAWToJNV21(const uint8_t* src_raw,
for (y = 0; y < height - 1; y += 2) {
#if defined(HAS_RAWTOYJROW)
RAWToUVJRow(src_raw, src_stride_raw, row_uj, row_vj, width);
MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth);
MergeUVRow(row_vj, row_uj, dst_vu, halfwidth);
RAWToYJRow(src_raw, dst_y, width);
RAWToYJRow(src_raw + src_stride_raw, dst_y + dst_stride_y, width);
#else
RAWToARGBRow(src_raw, row, width);
RAWToARGBRow(src_raw + src_stride_raw, row + row_size, width);
ARGBToUVJRow(row, row_size, row_uj, row_vj, width);
MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth);
MergeUVRow(row_vj, row_uj, dst_vu, halfwidth);
ARGBToYJRow(row, dst_y, width);
ARGBToYJRow(row + row_size, dst_y + dst_stride_y, width);
#endif
@ -3887,12 +3887,12 @@ int RAWToJNV21(const uint8_t* src_raw,
if (height & 1) {
#if defined(HAS_RAWTOYJROW)
RAWToUVJRow(src_raw, 0, row_uj, row_vj, width);
MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth);
MergeUVRow(row_vj, row_uj, dst_vu, halfwidth);
RAWToYJRow(src_raw, dst_y, width);
#else
RAWToARGBRow(src_raw, row, width);
ARGBToUVJRow(row, 0, row_uj, row_vj, width);
MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth);
MergeUVRow(row_vj, row_uj, dst_vu, halfwidth);
ARGBToYJRow(row, dst_y, width);
#endif
}