From 51d07554a039004ff278009852f9d33c0f76bf91 Mon Sep 17 00:00:00 2001 From: George Steed Date: Tue, 27 Aug 2024 14:33:25 +0100 Subject: [PATCH] [AArch64] Add SME implementation of ScaleRowDown2Linear There is no benefit from an SVE version of this kernel for devices with an SVE vector length of 128-bits, so skip directly to SME instead. We do not use the ZA tile here, so this is a purely streaming-SVE (SSVE) implementation. Change-Id: Ie6b91bd4407130ba2653838088e81e72e4460f68 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/5913884 Reviewed-by: Justin Green Reviewed-by: Frank Barchard --- include/libyuv/scale_row.h | 4 ++++ source/scale.cc | 5 +++-- source/scale_sme.cc | 40 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/include/libyuv/scale_row.h b/include/libyuv/scale_row.h index 6e488a4cd..37907f377 100644 --- a/include/libyuv/scale_row.h +++ b/include/libyuv/scale_row.h @@ -1416,6 +1416,10 @@ void ScaleRowDown2Linear_NEON(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); +void ScaleRowDown2Linear_SME(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width); void ScaleRowDown2Box_NEON(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, diff --git a/source/scale.cc b/source/scale.cc index 2f9bb4990..610be07cd 100644 --- a/source/scale.cc +++ b/source/scale.cc @@ -76,8 +76,9 @@ static void ScalePlaneDown2(int src_width, #endif #if defined(HAS_SCALEROWDOWN2_SME) if (TestCpuFlag(kCpuHasSME)) { - if (filtering == kFilterNone) { - ScaleRowDown2 = ScaleRowDown2_SME; + if (filtering == kFilterNone || filtering == kFilterLinear) { + ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_SME + : ScaleRowDown2Linear_SME; } } #endif diff --git a/source/scale_sme.cc b/source/scale_sme.cc index d4f0a827d..34c6884a5 100644 --- a/source/scale_sme.cc +++ b/source/scale_sme.cc @@ -56,6 +56,46 @@ __arm_locally_streaming void ScaleRowDown2_SME(const uint8_t* src_ptr, : "memory", "cc", "z0", "z1", "p0"); } +__arm_locally_streaming void ScaleRowDown2Linear_SME(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + // Streaming-SVE only, no use of ZA tile. + (void)src_stride; + int vl; + asm volatile( + "cntb %x[vl] \n" + "subs %w[dst_width], %w[dst_width], %w[vl] \n" + "b.lt 2f \n" + + "1: \n" + "ptrue p0.b \n" + "ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n" + "incb %[src_ptr], all, mul #2 \n" + "urhadd z0.b, p0/m, z0.b, z1.b \n" + "subs %w[dst_width], %w[dst_width], %w[vl] \n" + "st1b {z0.b}, p0, [%[dst_ptr]] \n" + "incb %[dst_ptr] \n" + "b.ge 1b \n" + + "2: \n" + "adds %w[dst_width], %w[dst_width], %w[vl] \n" + "b.eq 99f \n" + + "whilelt p0.b, wzr, %w[dst_width] \n" + "ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n" + "urhadd z0.b, p0/m, z0.b, z1.b \n" + "st1b {z0.b}, p0, [%[dst_ptr]] \n" + + "99: \n" + : [src_ptr] "+r"(src_ptr), // %[src_ptr] + [dst_ptr] "+r"(dst), // %[dst_ptr] + [dst_width] "+r"(dst_width), // %[dst_width] + [vl] "=r"(vl) // %[vl] + : + : "memory", "cc", "z0", "z1", "p0"); +} + #endif // !defined(LIBYUV_DISABLE_SME) && defined(CLANG_HAS_SME) && // defined(__aarch64__)