[AArch64] Add SME implementation of Convert8To16Row_SME

Mostly just a straightforward copy of the Neon code ported to
Streaming-SVE. There is no benefit from this kernel when the SVE vector
length is only 128 bits, so skip writing a non-streaming SVE
implementation.

Change-Id: Ide34dbb7125b5f2a1edda6ef7111a1a49aad324f
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/6651565
Reviewed-by: Frank Barchard <fbarchard@chromium.org>
This commit is contained in:
George Steed 2025-03-20 12:35:54 +00:00 committed by Frank Barchard
parent 1724c4be72
commit 88798bcd63
3 changed files with 66 additions and 0 deletions

View File

@ -617,6 +617,7 @@ extern "C" {
defined(__aarch64__) defined(__aarch64__)
#define HAS_ARGBMULTIPLYROW_SME #define HAS_ARGBMULTIPLYROW_SME
#define HAS_CONVERT16TO8ROW_SME #define HAS_CONVERT16TO8ROW_SME
#define HAS_CONVERT8TO16ROW_SME
#define HAS_CONVERT8TO8ROW_SME #define HAS_CONVERT8TO8ROW_SME
#define HAS_COPYROW_SME #define HAS_COPYROW_SME
#define HAS_I210ALPHATOARGBROW_SME #define HAS_I210ALPHATOARGBROW_SME
@ -3791,6 +3792,10 @@ void Convert8To16Row_Any_NEON(const uint8_t* src_y,
uint16_t* dst_y, uint16_t* dst_y,
int scale, int scale,
int width); int width);
void Convert8To16Row_SME(const uint8_t* src_y,
uint16_t* dst_y,
int scale,
int width);
void Convert16To8Row_C(const uint16_t* src_y, void Convert16To8Row_C(const uint16_t* src_y,
uint8_t* dst_y, uint8_t* dst_y,

View File

@ -233,6 +233,11 @@ void Convert8To16Plane(const uint8_t* src_y,
} }
} }
#endif #endif
#if defined(HAS_CONVERT8TO16ROW_SME)
if (TestCpuFlag(kCpuHasSME)) {
Convert8To16Row = Convert8To16Row_SME;
}
#endif
// Convert plane // Convert plane
for (y = 0; y < height; ++y) { for (y = 0; y < height; ++y) {

View File

@ -1064,6 +1064,62 @@ __arm_locally_streaming void Convert8To8Row_SME(const uint8_t* src_y,
Convert8To8Row_SVE_SC(src_y, dst_y, scale, bias, width); Convert8To8Row_SVE_SC(src_y, dst_y, scale, bias, width);
} }
#define CONVERT8TO16_SVE \
"ld1b {z0.h}, p0/z, [%[src]] \n" \
"ld1b {z1.h}, p1/z, [%[src], #1, mul vl] \n" \
"incb %[src] \n" \
"subs %w[width], %w[width], %w[vl], lsl #1 \n" \
"trn1 z0.b, z0.b, z0.b \n" \
"trn1 z1.b, z1.b, z1.b \n" \
"lsr z0.h, p0/m, z0.h, z2.h \n" \
"lsr z1.h, p1/m, z1.h, z2.h \n" \
"prfm pldl1keep, [%[src], 448] \n" \
"st1h {z0.h}, p0, [%[dst]] \n" \
"st1h {z1.h}, p1, [%[dst], #1, mul vl] \n" \
"incb %[dst], all, mul #2 \n"
__arm_locally_streaming void Convert8To16Row_SME(const uint8_t* src_y,
uint16_t* dst_y,
int scale,
int width) {
// (src * 0x0101 * scale) >> 16.
// Since scale is a power of two, compute the shift to use to avoid needing
// to widen to int32.
int shift = __builtin_clz(scale) - 15;
uint64_t vl;
asm volatile(
"dup z2.h, %w[shift] \n"
"cnth %[vl] \n"
"subs %w[width], %w[width], %w[vl], lsl #1 \n"
"b.lt 2f \n"
// Run bulk of computation with all-true predicates to avoid predicate
// generation overhead.
"ptrue p0.h \n"
"ptrue p1.h \n"
"1: \n" //
CONVERT8TO16_SVE
"b.ge 1b \n"
"2: \n"
"adds %w[width], %w[width], %w[vl], lsl #1 \n"
"b.eq 99f \n"
// Calculate predicates for the final iteration to deal with the tail.
"whilelt p0.h, wzr, %w[width] \n"
"whilelt p1.h, %w[vl], %w[width] \n" //
CONVERT8TO16_SVE
"99: \n"
: [src] "+r"(src_y), // %[src]
[dst] "+r"(dst_y), // %[dst]
[width] "+r"(width), // %[width]
[vl] "=&r"(vl) // %[vl]
: [shift] "r"(shift) // %[shift]
: "cc", "memory", "z0", "z1", "z2", "p0", "p1");
}
#endif // !defined(LIBYUV_DISABLE_SME) && defined(CLANG_HAS_SME) && #endif // !defined(LIBYUV_DISABLE_SME) && defined(CLANG_HAS_SME) &&
// defined(__aarch64__) // defined(__aarch64__)