[AArch64] Add SVE2 implementations of NV{12,21}ToRGB24Row

Now that we have the `_2X` versions of the macros we can use these to
implement `ToRGB24` kernels. These cannot use the bottom/top approach
previously used by other SVE kernels since there are three rather than
two or four elements each.

Reduction in runtimes observed compared to the existing Neon
implementations:

            | NV12ToRGB24Row | NV21ToRGB24Row
Cortex-A510 |         -60.7% |         -60.7%
Cortex-A520 |         -46.0% |         -46.0%
Cortex-A715 |         -25.2% |         -25.2%
Cortex-A720 |         -25.2% |         -25.2%
  Cortex-X2 |         -28.9% |         -29.0%
  Cortex-X3 |         -28.2% |         -28.1%
  Cortex-X4 |         -30.8% |         -30.7%
Cortex-X925 |         -28.8% |         -28.9%

Change-Id: I39853d124bfdcac38584109870b398b8ecd5b632
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/6067149
Reviewed-by: Frank Barchard <fbarchard@chromium.org>
This commit is contained in:
George Steed 2024-09-09 17:53:24 +01:00 committed by Frank Barchard
parent 233f859e3c
commit 8f659daffd
3 changed files with 142 additions and 0 deletions

View File

@ -572,7 +572,9 @@ extern "C" {
#define HAS_I444ALPHATOARGBROW_SVE2
#define HAS_I444TOARGBROW_SVE2
#define HAS_NV12TOARGBROW_SVE2
#define HAS_NV12TORGB24ROW_SVE2
#define HAS_NV21TOARGBROW_SVE2
#define HAS_NV21TORGB24ROW_SVE2
#define HAS_P210TOAR30ROW_SVE2
#define HAS_P210TOARGBROW_SVE2
#define HAS_P410TOAR30ROW_SVE2
@ -1332,11 +1334,21 @@ void NV12ToRGB24Row_NEON(const uint8_t* src_y,
uint8_t* dst_rgb24,
const struct YuvConstants* yuvconstants,
int width);
void NV12ToRGB24Row_SVE2(const uint8_t* src_y,
const uint8_t* src_uv,
uint8_t* dst_rgb24,
const struct YuvConstants* yuvconstants,
int width);
void NV21ToRGB24Row_NEON(const uint8_t* src_y,
const uint8_t* src_vu,
uint8_t* dst_rgb24,
const struct YuvConstants* yuvconstants,
int width);
void NV21ToRGB24Row_SVE2(const uint8_t* src_y,
const uint8_t* src_vu,
uint8_t* dst_rgb24,
const struct YuvConstants* yuvconstants,
int width);
void NV21ToYUV24Row_NEON(const uint8_t* src_y,
const uint8_t* src_vu,
uint8_t* dst_yuv24,

View File

@ -4610,6 +4610,11 @@ int NV12ToRGB24Matrix(const uint8_t* src_y,
}
}
#endif
#if defined(HAS_NV12TORGB24ROW_SVE2)
if (TestCpuFlag(kCpuHasSVE2)) {
NV12ToRGB24Row = NV12ToRGB24Row_SVE2;
}
#endif
#if defined(HAS_NV12TORGB24ROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
NV12ToRGB24Row = NV12ToRGB24Row_Any_SSSE3;
@ -4676,6 +4681,11 @@ int NV21ToRGB24Matrix(const uint8_t* src_y,
}
}
#endif
#if defined(HAS_NV21TORGB24ROW_SVE2)
if (TestCpuFlag(kCpuHasSVE2)) {
NV21ToRGB24Row = NV21ToRGB24Row_SVE2;
}
#endif
#if defined(HAS_NV21TORGB24ROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
NV21ToRGB24Row = NV21ToRGB24Row_Any_SSSE3;

View File

@ -925,6 +925,126 @@ void NV21ToARGBRow_SVE2(const uint8_t* src_y,
: "cc", "memory", YUVTORGB_SVE_REGS, "p2");
}
void NV12ToRGB24Row_SVE2(const uint8_t* src_y,
const uint8_t* src_uv,
uint8_t* dst_rgb24,
const struct YuvConstants* yuvconstants,
int width) {
uint32_t nv_u_start = 0xff00U;
uint32_t nv_u_step = 0x0002U;
uint32_t nv_v_start = 0xff01U;
uint32_t nv_v_step = 0x0002U;
uint64_t vl;
asm("cntb %0" : "=r"(vl));
int width_last_y = width & (vl - 1);
int width_last_uv = width_last_y + (width_last_y & 1);
asm volatile(
"ptrue p0.b \n" //
YUVTORGB_SVE_SETUP
"dup z19.b, #255 \n" // A
"index z7.h, %w[nv_u_start], %w[nv_u_step] \n"
"index z23.h, %w[nv_v_start], %w[nv_v_step] \n"
"subs %w[width], %w[width], %w[vl] \n"
"b.lt 2f \n"
// Run bulk of computation with an all-true predicate to avoid predicate
// generation overhead.
"ptrue p1.b \n"
"ptrue p2.b \n"
"1: \n" //
READNV_SVE_2X NVTORGB_SVE_2X(b, t) RGBTOARGB8_SVE_2X
"subs %w[width], %w[width], %w[vl] \n"
"st3b {z16.b, z17.b, z18.b}, p1, [%[dst_rgb24]] \n"
"incb %[dst_rgb24], all, mul #3 \n"
"b.ge 1b \n"
"2: \n"
"adds %w[width], %w[width], %w[vl] \n"
"b.eq 99f \n"
// Calculate a predicate for the final iteration to deal with the tail.
"whilelt p1.b, wzr, %w[width_last_y] \n"
"whilelt p2.b, wzr, %w[width_last_uv] \n" //
READNV_SVE_2X NVTORGB_SVE_2X(b, t) RGBTOARGB8_SVE_2X
"st3b {z16.b, z17.b, z18.b}, p1, [%[dst_rgb24]] \n"
"99: \n"
: [src_y] "+r"(src_y), // %[src_y]
[src_uv] "+r"(src_uv), // %[src_uv]
[dst_rgb24] "+r"(dst_rgb24), // %[dst_rgb24]
[width] "+r"(width) // %[width]
: [vl] "r"(vl), // %[vl]
[kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff]
[kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias), // %[kRGBCoeffBias]
[nv_u_start] "r"(nv_u_start), // %[nv_u_start]
[nv_u_step] "r"(nv_u_step), // %[nv_u_step]
[nv_v_start] "r"(nv_v_start), // %[nv_v_start]
[nv_v_step] "r"(nv_v_step), // %[nv_v_step]
[width_last_y] "r"(width_last_y), // %[width_last_y]
[width_last_uv] "r"(width_last_uv) // %[width_last_uv]
: "cc", "memory", YUVTORGB_SVE_REGS, "p2");
}
void NV21ToRGB24Row_SVE2(const uint8_t* src_y,
const uint8_t* src_vu,
uint8_t* dst_rgb24,
const struct YuvConstants* yuvconstants,
int width) {
uint32_t nv_u_start = 0xff01U;
uint32_t nv_u_step = 0x0002U;
uint32_t nv_v_start = 0xff00U;
uint32_t nv_v_step = 0x0002U;
uint64_t vl;
asm("cntb %0" : "=r"(vl));
int width_last_y = width & (vl - 1);
int width_last_uv = width_last_y + (width_last_y & 1);
asm volatile(
"ptrue p0.b \n" //
YUVTORGB_SVE_SETUP
"dup z19.b, #255 \n" // A
"index z7.h, %w[nv_u_start], %w[nv_u_step] \n"
"index z23.h, %w[nv_v_start], %w[nv_v_step] \n"
"subs %w[width], %w[width], %w[vl] \n"
"b.lt 2f \n"
// Run bulk of computation with an all-true predicate to avoid predicate
// generation overhead.
"ptrue p1.b \n"
"ptrue p2.b \n"
"1: \n" //
READNV_SVE_2X NVTORGB_SVE_2X(t, b) RGBTOARGB8_SVE_2X
"subs %w[width], %w[width], %w[vl] \n"
"st3b {z16.b, z17.b, z18.b}, p1, [%[dst_rgb24]] \n"
"incb %[dst_rgb24], all, mul #3 \n"
"b.ge 1b \n"
"2: \n"
"adds %w[width], %w[width], %w[vl] \n"
"b.eq 99f \n"
// Calculate a predicate for the final iteration to deal with the tail.
"whilelt p1.b, wzr, %w[width_last_y] \n"
"whilelt p2.b, wzr, %w[width_last_uv] \n" //
READNV_SVE_2X NVTORGB_SVE_2X(t, b) RGBTOARGB8_SVE_2X
"st3b {z16.b, z17.b, z18.b}, p1, [%[dst_rgb24]] \n"
"99: \n"
: [src_y] "+r"(src_y), // %[src_y]
[src_uv] "+r"(src_vu), // %[src_vu]
[dst_rgb24] "+r"(dst_rgb24), // %[dst_rgb24]
[width] "+r"(width) // %[width]
: [vl] "r"(vl), // %[vl]
[kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff]
[kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias), // %[kRGBCoeffBias]
[nv_u_start] "r"(nv_u_start), // %[nv_u_start]
[nv_u_step] "r"(nv_u_step), // %[nv_u_step]
[nv_v_start] "r"(nv_v_start), // %[nv_v_start]
[nv_v_step] "r"(nv_v_step), // %[nv_v_step]
[width_last_y] "r"(width_last_y), // %[width_last_y]
[width_last_uv] "r"(width_last_uv) // %[width_last_uv]
: "cc", "memory", YUVTORGB_SVE_REGS, "p2");
}
// Dot-product constants are stored as four-tuples with the two innermost
// elements flipped to account for the interleaving nature of the widening
// addition instructions.