diff --git a/include/libyuv/row.h b/include/libyuv/row.h index c6efa5336..93feb0c6d 100644 --- a/include/libyuv/row.h +++ b/include/libyuv/row.h @@ -567,6 +567,7 @@ extern "C" { #define HAS_I444TOARGBROW_SVE2 #define HAS_I422TOARGBROW_SVE2 #define HAS_I444ALPHATOARGBROW_SVE2 +#define HAS_I422ALPHATOARGBROW_SVE2 #endif // The following are available on AArch64 platforms: @@ -1082,6 +1083,13 @@ void I422AlphaToARGBRow_NEON(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I422AlphaToARGBRow_SVE2(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + const uint8_t* src_a, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGBARow_NEON(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, diff --git a/source/convert_argb.cc b/source/convert_argb.cc index ebedf3c18..f2be3e451 100644 --- a/source/convert_argb.cc +++ b/source/convert_argb.cc @@ -2050,6 +2050,11 @@ int I420AlphaToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422ALPHATOARGBROW_SVE2) + if (TestCpuFlag(kCpuHasSVE2)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_SVE2; + } +#endif #if defined(HAS_I422ALPHATOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422AlphaToARGBRow = I422AlphaToARGBRow_Any_MSA; @@ -2193,6 +2198,11 @@ int I422AlphaToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422ALPHATOARGBROW_SVE2) + if (TestCpuFlag(kCpuHasSVE2)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_SVE2; + } +#endif #if defined(HAS_I422ALPHATOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422AlphaToARGBRow = I422AlphaToARGBRow_Any_MSA; diff --git a/source/row_sve.cc b/source/row_sve.cc index 9862c3a4b..4a4383322 100644 --- a/source/row_sve.cc +++ b/source/row_sve.cc @@ -209,6 +209,54 @@ void I444AlphaToARGBRow_SVE2(const uint8_t* src_y, : "cc", "memory", YUVTORGB_SVE_REGS); } +void I422AlphaToARGBRow_SVE2(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + const uint8_t* src_a, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + uint64_t vl; + asm("cnth %[vl] \n" + "ptrue p0.b \n" YUVTORGB_SVE_SETUP + "subs %w[width], %w[width], %w[vl] \n" + "b.lt 2f \n" + + // Run bulk of computation with an all-true predicate to avoid predicate + // generation overhead. + "ptrue p1.h \n" + "1: \n" READYUV422_SVE + "ld1b {z19.h}, p1/z, [%[src_a]] \n" // A + "add %[src_a], %[src_a], %[vl] \n" I4XXTORGB_SVE + RGBTORGBA8_SVE + "subs %w[width], %w[width], %w[vl] \n" + "st2h {z16.h, z17.h}, p1, [%[dst_argb]] \n" + "add %[dst_argb], %[dst_argb], %[vl], lsl #2 \n" + "b.ge 1b \n" + + "2: \n" + "adds %w[width], %w[width], %w[vl] \n" + "b.eq 99f \n" + + // Calculate a predicate for the final iteration to deal with the tail. + "whilelt p1.h, wzr, %w[width] \n" READYUV422_SVE + "ld1b {z19.h}, p1/z, [%[src_a]] \n" // A + I4XXTORGB_SVE RGBTORGBA8_SVE + "st2h {z16.h, z17.h}, p1, [%[dst_argb]] \n" + + "99: \n" + : [src_y] "+r"(src_y), // %[src_y] + [src_u] "+r"(src_u), // %[src_u] + [src_v] "+r"(src_v), // %[src_v] + [src_a] "+r"(src_a), // %[src_a] + [dst_argb] "+r"(dst_argb), // %[dst_argb] + [width] "+r"(width), // %[width] + [vl] "=&r"(vl) // %[vl] + : [kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff] + [kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias) // %[kRGBCoeffBias] + : "cc", "memory", YUVTORGB_SVE_REGS); +} + #endif // !defined(LIBYUV_DISABLE_SVE) && defined(__aarch64__) #ifdef __cplusplus