[AArch64] Add SVE2 implementations of {P210,P410}ToAR30Row

Observed reductions in runtime compared to the existing Neon code:

            | P210ToAR30Row | P410ToAR30Row
Cortex-A510 |       -16.5%  |        -21.2%
Cortex-A520 | (!)    +2.7%  |         -8.7%
Cortex-A715 |        -6.1%  |         -6.1%
Cortex-A720 |        -6.2%  |         -5.9%
  Cortex-X2 |        -4.1%  |         -4.2%
  Cortex-X3 |        -4.2%  |         -4.2%
  Cortex-X4 |        -1.2%  |         -1.2%
Cortex-X925 |        -3.6%  |         -2.8%

Bug: b/42280942
Change-Id: I40723a370fad1ccb53f8ccd9d32cddb502500dd6
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/6023036
Reviewed-by: Frank Barchard <fbarchard@chromium.org>
This commit is contained in:
George Steed 2024-05-16 13:50:02 +01:00 committed by Frank Barchard
parent 0ddf3f7b90
commit 823d960afc
3 changed files with 150 additions and 0 deletions

View File

@ -563,7 +563,9 @@ extern "C" {
#define HAS_I444TOARGBROW_SVE2
#define HAS_NV12TOARGBROW_SVE2
#define HAS_NV21TOARGBROW_SVE2
#define HAS_P210TOAR30ROW_SVE2
#define HAS_P210TOARGBROW_SVE2
#define HAS_P410TOAR30ROW_SVE2
#define HAS_P410TOARGBROW_SVE2
#define HAS_RAWTOARGBROW_SVE2
#define HAS_RAWTORGB24ROW_SVE2
@ -5405,11 +5407,21 @@ void P210ToAR30Row_NEON(const uint16_t* y_buf,
uint8_t* dst_ar30,
const struct YuvConstants* yuvconstants,
int width);
void P210ToAR30Row_SVE2(const uint16_t* y_buf,
const uint16_t* uv_buf,
uint8_t* dst_ar30,
const struct YuvConstants* yuvconstants,
int width);
void P410ToAR30Row_NEON(const uint16_t* y_buf,
const uint16_t* uv_buf,
uint8_t* dst_ar30,
const struct YuvConstants* yuvconstants,
int width);
void P410ToAR30Row_SVE2(const uint16_t* y_buf,
const uint16_t* uv_buf,
uint8_t* dst_ar30,
const struct YuvConstants* yuvconstants,
int width);
void P210ToARGBRow_Any_NEON(const uint16_t* y_buf,
const uint16_t* uv_buf,
uint8_t* dst_argb,

View File

@ -2050,6 +2050,11 @@ int P010ToAR30Matrix(const uint16_t* src_y,
P210ToAR30Row = P210ToAR30Row_NEON;
}
}
#endif
#if defined(HAS_P210TOAR30ROW_SVE2)
if (TestCpuFlag(kCpuHasSVE2)) {
P210ToAR30Row = P210ToAR30Row_SVE2;
}
#endif
for (y = 0; y < height; ++y) {
P210ToAR30Row(src_y, src_uv, dst_ar30, yuvconstants, width);
@ -2109,6 +2114,11 @@ int P210ToAR30Matrix(const uint16_t* src_y,
P210ToAR30Row = P210ToAR30Row_NEON;
}
}
#endif
#if defined(HAS_P210TOAR30ROW_SVE2)
if (TestCpuFlag(kCpuHasSVE2)) {
P210ToAR30Row = P210ToAR30Row_SVE2;
}
#endif
for (y = 0; y < height; ++y) {
P210ToAR30Row(src_y, src_uv, dst_ar30, yuvconstants, width);
@ -8264,6 +8274,11 @@ static int P010ToAR30MatrixBilinear(const uint16_t* src_y,
}
}
#endif
#if defined(HAS_P410TOAR30ROW_SVE2)
if (TestCpuFlag(kCpuHasSVE2)) {
P410ToAR30Row = P410ToAR30Row_SVE2;
}
#endif
#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_SSE41
if (TestCpuFlag(kCpuHasSSE41)) {
@ -8365,6 +8380,11 @@ static int P210ToAR30MatrixLinear(const uint16_t* src_y,
}
}
#endif
#if defined(HAS_P410TOAR30ROW_SVE2)
if (TestCpuFlag(kCpuHasSVE2)) {
P410ToAR30Row = P410ToAR30Row_SVE2;
}
#endif
#ifdef HAS_SCALEUVROWUP2_LINEAR_16_SSE41
if (TestCpuFlag(kCpuHasSSE41)) {

View File

@ -241,6 +241,18 @@ extern "C" {
"z20", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", \
"z31", "p0", "p1", "p2", "p3"
// Store AR30 elements
#define STOREAR30_SVE \
"uqshl z16.h, p0/m, z16.h, #2 \n" /* bbbbbbbbbbxxxxxx */ \
"uqshl z17.h, p0/m, z17.h, #2 \n" /* ggggggggggxxxxxx */ \
"umin z18.h, p0/m, z18.h, z23.h \n" /* 00rrrrrrrrrrxxxx */ \
"orr z18.h, z18.h, #0xc000 \n" /* 11rrrrrrrrrrxxxx */ \
"sri z18.h, z17.h, #12 \n" /* 11rrrrrrrrrrgggg */ \
"lsl z17.h, z17.h, #4 \n" /* ggggggxxxxxx0000 */ \
"sri z17.h, z16.h, #6 \n" /* ggggggbbbbbbbbbb */ \
"st2h {z17.h, z18.h}, p1, [%[dst_ar30]] \n" \
"incb %[dst_ar30], all, mul #2 \n"
void I444ToARGBRow_SVE2(const uint8_t* src_y,
const uint8_t* src_u,
const uint8_t* src_v,
@ -1931,6 +1943,61 @@ void P210ToARGBRow_SVE2(const uint16_t* src_y,
: "cc", "memory", YUVTORGB_SVE_REGS);
}
void P210ToAR30Row_SVE2(const uint16_t* src_y,
const uint16_t* src_uv,
uint8_t* dst_ar30,
const struct YuvConstants* yuvconstants,
int width) {
uint64_t vl;
asm("cnth %0" : "=r"(vl));
int width_last_y = width & (vl - 1);
width_last_y = width_last_y == 0 ? vl : width_last_y;
int width_last_uv = width_last_y + (width_last_y & 1);
uint32_t nv_uv_start = 0x03010301U;
uint32_t nv_uv_step = 0x04040404U;
uint16_t limit = 0x3ff0;
asm volatile(
"ptrue p0.b \n" YUVTORGB_SVE_SETUP
"index z22.s, %w[nv_uv_start], %w[nv_uv_step] \n"
"dup z23.h, %w[limit] \n"
"subs %w[width], %w[width], %w[vl] \n"
"b.lt 2f \n"
// Run bulk of computation with an all-true predicate to avoid predicate
// generation overhead.
"ptrue p1.h \n"
"ptrue p2.h \n"
"1: \n" //
READP210_SVE NVTORGB_SVE
"subs %w[width], %w[width], %w[vl] \n" //
STOREAR30_SVE
"b.ge 1b \n"
"2: \n"
"adds %w[width], %w[width], %w[vl] \n"
"b.eq 99f \n"
// Calculate a predicate for the final iteration to deal with the tail.
"whilelt p1.h, wzr, %w[width_last_y] \n"
"whilelt p2.h, wzr, %w[width_last_uv] \n" //
READP210_SVE NVTORGB_SVE STOREAR30_SVE
"99: \n"
: [src_y] "+r"(src_y), // %[src_y]
[src_uv] "+r"(src_uv), // %[src_uv]
[dst_ar30] "+r"(dst_ar30), // %[dst_ar30]
[width] "+r"(width) // %[width]
: [vl] "r"(vl), // %[vl]
[kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff]
[kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias), // %[kRGBCoeffBias]
[nv_uv_start] "r"(nv_uv_start), // %[nv_uv_start]
[nv_uv_step] "r"(nv_uv_step), // %[nv_uv_step]
[width_last_y] "r"(width_last_y), // %[width_last_y]
[width_last_uv] "r"(width_last_uv), // %[width_last_uv]
[limit] "r"(limit) // %[limit]
: "cc", "memory", YUVTORGB_SVE_REGS);
}
void P410ToARGBRow_SVE2(const uint16_t* src_y,
const uint16_t* src_uv,
uint8_t* dst_argb,
@ -1982,6 +2049,57 @@ void P410ToARGBRow_SVE2(const uint16_t* src_y,
: "cc", "memory", YUVTORGB_SVE_REGS);
}
void P410ToAR30Row_SVE2(const uint16_t* src_y,
const uint16_t* src_uv,
uint8_t* dst_ar30,
const struct YuvConstants* yuvconstants,
int width) {
uint64_t vl;
asm("cnth %0" : "=r"(vl));
int width_last_y = width & (vl - 1);
width_last_y = width_last_y == 0 ? vl : width_last_y;
uint16_t limit = 0x3ff0;
asm volatile(
"ptrue p0.b \n" YUVTORGB_SVE_SETUP
"dup z23.h, %w[limit] \n"
"subs %w[width], %w[width], %w[vl] \n"
"b.lt 2f \n"
// Run bulk of computation with an all-true predicate to avoid predicate
// generation overhead.
"ptrue p1.h \n"
"ptrue p2.s \n"
"ptrue p3.s \n"
"1: \n" //
READP410_SVE NVTORGB_SVE
"subs %w[width], %w[width], %w[vl] \n" //
STOREAR30_SVE
"b.ge 1b \n"
"2: \n"
"adds %w[width], %w[width], %w[vl] \n"
"b.eq 99f \n"
// Calculate a predicate for the final iteration to deal with the tail.
"whilelt p1.h, wzr, %w[width_last_y] \n"
"whilelt p2.s, wzr, %w[width_last_y] \n"
"cntw %x[vl] \n"
"whilelt p3.s, %w[vl], %w[width_last_y] \n" //
READP410_SVE NVTORGB_SVE STOREAR30_SVE
"99: \n"
: [src_y] "+r"(src_y), // %[src_y]
[src_uv] "+r"(src_uv), // %[src_uv]
[dst_ar30] "+r"(dst_ar30), // %[dst_ar30]
[width] "+r"(width) // %[width]
: [vl] "r"(vl), // %[vl]
[kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff]
[kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias), // %[kRGBCoeffBias]
[width_last_y] "r"(width_last_y), // %[width_last_y]
[limit] "r"(limit) // %[limit]
: "cc", "memory", YUVTORGB_SVE_REGS);
}
#endif // !defined(LIBYUV_DISABLE_SVE) && defined(__aarch64__)
#ifdef __cplusplus