From 576218dbce3e90d1b7592277ba173cd1fc8528b6 Mon Sep 17 00:00:00 2001 From: George Steed Date: Fri, 4 Oct 2024 14:20:17 +0100 Subject: [PATCH] [AArch64] Add SME implementation of ScaleUVRowDown2Linear There is no benefit from an SVE version of this kernel for devices with an SVE vector length of 128-bits, so skip directly to SME instead. We do not use the ZA tile here, so this is a purely streaming-SVE (SSVE) implementation. Change-Id: I401eb6ad14b3159917c8e3a79ab20dde318d28b6 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/5979726 Reviewed-by: Justin Green Reviewed-by: Frank Barchard --- include/libyuv/scale_row.h | 5 +++++ source/scale_sme.cc | 41 ++++++++++++++++++++++++++++++++++++++ source/scale_uv.cc | 6 ++++-- 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/include/libyuv/scale_row.h b/include/libyuv/scale_row.h index 9696f4064..c4df7348a 100644 --- a/include/libyuv/scale_row.h +++ b/include/libyuv/scale_row.h @@ -121,6 +121,7 @@ extern "C" { defined(__aarch64__) #define HAS_SCALEROWDOWN2_SME #define HAS_SCALEUVROWDOWN2_SME +#define HAS_SCALEUVROWDOWN2LINEAR_SME #endif #if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) @@ -1163,6 +1164,10 @@ void ScaleUVRowDown2Linear_NEON(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_uv, int dst_width); +void ScaleUVRowDown2Linear_SME(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_uv, + int dst_width); void ScaleUVRowDown2Box_NEON(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, diff --git a/source/scale_sme.cc b/source/scale_sme.cc index 032b8005e..ee63c090b 100644 --- a/source/scale_sme.cc +++ b/source/scale_sme.cc @@ -187,6 +187,47 @@ __arm_locally_streaming void ScaleUVRowDown2_SME(const uint8_t* src_uv, : "memory", "cc", "z0", "z1", "p0"); } +__arm_locally_streaming void ScaleUVRowDown2Linear_SME(const uint8_t* src_uv, + ptrdiff_t src_stride, + uint8_t* dst_uv, + int dst_width) { + // Streaming-SVE only, no use of ZA tile. + (void)src_stride; + int vl; + asm volatile( + "cnth %x[vl] \n" + "ptrue p1.b \n" + "subs %w[dst_width], %w[dst_width], %w[vl] \n" + "b.lt 2f \n" + + "ptrue p0.h \n" + "1: \n" + "ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n" + "incb %[src_uv], all, mul #2 \n" + "urhadd z0.b, p1/m, z0.b, z1.b \n" + "st1h {z0.h}, p0, [%[dst_uv]] \n" + "incb %[dst_uv], all, mul #1 \n" + "subs %w[dst_width], %w[dst_width], %w[vl] \n" + "b.ge 1b \n" + + "2: \n" + "adds %w[dst_width], %w[dst_width], %w[vl] \n" + "b.eq 99f \n" + + "whilelt p0.h, wzr, %w[dst_width] \n" + "ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n" + "urhadd z0.b, p1/m, z0.b, z1.b \n" + "st1h {z0.h}, p0, [%[dst_uv]] \n" + + "99: \n" + : [src_uv] "+r"(src_uv), // %[src_uv] + [dst_uv] "+r"(dst_uv), // %[dst_uv] + [dst_width] "+r"(dst_width), // %[dst_width] + [vl] "=r"(vl) // %[vl] + : + : "z0", "z1", "p0", "p1"); +} + #endif // !defined(LIBYUV_DISABLE_SME) && defined(CLANG_HAS_SME) && // defined(__aarch64__) diff --git a/source/scale_uv.cc b/source/scale_uv.cc index 3f608b7c3..4498027ea 100644 --- a/source/scale_uv.cc +++ b/source/scale_uv.cc @@ -121,8 +121,10 @@ static void ScaleUVDown2(int src_width, } #endif #if defined(HAS_SCALEUVROWDOWN2_SME) - if (TestCpuFlag(kCpuHasSME) && filtering == kFilterNone) { - ScaleUVRowDown2 = ScaleUVRowDown2_SME; + if (TestCpuFlag(kCpuHasSME) && + (filtering == kFilterNone || filtering == kFilterLinear)) { + ScaleUVRowDown2 = filtering == kFilterNone ? ScaleUVRowDown2_SME + : ScaleUVRowDown2Linear_SME; } #endif #if defined(HAS_SCALEUVROWDOWN2_RVV)