libyuv/source/scale_sme.cc
Frank Barchard 1c501a8f3f CpuId test FSMR - Fast Short Rep Movsb
- Renumber cpuid bits to use low byte to ID the type of CPU and upper 24 bits for features

Intel CPUs starting at Icelake support FSMR
adl:Has FSMR 0x8000
arl:Has FSMR 0x0
bdw:Has FSMR 0x0
clx:Has FSMR 0x0
cnl:Has FSMR 0x0
cpx:Has FSMR 0x0
emr:Has FSMR 0x8000
glm:Has FSMR 0x0
glp:Has FSMR 0x0
gnr:Has FSMR 0x8000
gnr256:Has FSMR 0x8000
hsw:Has FSMR 0x0
icl:Has FSMR 0x8000
icx:Has FSMR 0x8000
ivb:Has FSMR 0x0
knl:Has FSMR 0x0
knm:Has FSMR 0x0
lnl:Has FSMR 0x8000
mrm:Has FSMR 0x0
mtl:Has FSMR 0x8000
nhm:Has FSMR 0x0
pnr:Has FSMR 0x0
rpl:Has FSMR 0x8000
skl:Has FSMR 0x0
skx:Has FSMR 0x0
slm:Has FSMR 0x0
slt:Has FSMR 0x0
snb:Has FSMR 0x0
snr:Has FSMR 0x0
spr:Has FSMR 0x8000
srf:Has FSMR 0x0
tgl:Has FSMR 0x8000
tnt:Has FSMR 0x0
wsm:Has FSMR 0x0

Intel CPUs starting at Ivybridge support ERMS

adl:Has ERMS 0x4000
arl:Has ERMS 0x4000
bdw:Has ERMS 0x4000
clx:Has ERMS 0x4000
cnl:Has ERMS 0x4000
cpx:Has ERMS 0x4000
emr:Has ERMS 0x4000
glm:Has ERMS 0x4000
glp:Has ERMS 0x4000
gnr:Has ERMS 0x4000
gnr256:Has ERMS 0x4000
hsw:Has ERMS 0x4000
icl:Has ERMS 0x4000
icx:Has ERMS 0x4000
ivb:Has ERMS 0x4000
knl:Has ERMS 0x4000
knm:Has ERMS 0x4000
lnl:Has ERMS 0x4000
mrm:Has ERMS 0x0
mtl:Has ERMS 0x4000
nhm:Has ERMS 0x0
pnr:Has ERMS 0x0
rpl:Has ERMS 0x4000
skl:Has ERMS 0x4000
skx:Has ERMS 0x4000
slm:Has ERMS 0x4000
slt:Has ERMS 0x0
snb:Has ERMS 0x0
snr:Has ERMS 0x4000
spr:Has ERMS 0x4000
srf:Has ERMS 0x4000
tgl:Has ERMS 0x4000
tnt:Has ERMS 0x4000
wsm:Has ERMS 0x0
Change-Id: I18e5a3905f2691ab66d4d0cb6f668c0a0ff72d37
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/6027541
Reviewed-by: richard winterton <rrwinterton@gmail.com>
2024-11-18 17:56:45 +00:00

293 lines
13 KiB
C++

/*
* Copyright 2024 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/scale_row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_SME) && defined(CLANG_HAS_SME) && \
defined(__aarch64__)
__arm_locally_streaming void ScaleRowDown2_SME(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
// Streaming-SVE only, no use of ZA tile.
(void)src_stride;
int vl;
asm volatile(
"cntb %x[vl] \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.lt 2f \n"
"1: \n"
"ptrue p0.b \n"
"ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n"
"incb %[src_ptr], all, mul #2 \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"st1b {z1.b}, p0, [%[dst_ptr]] \n"
"incb %[dst_ptr] \n"
"b.ge 1b \n"
"2: \n"
"adds %w[dst_width], %w[dst_width], %w[vl] \n"
"b.eq 99f \n"
"whilelt p0.b, wzr, %w[dst_width] \n"
"ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n"
"st1b {z1.b}, p0, [%[dst_ptr]] \n"
"99: \n"
: [src_ptr] "+r"(src_ptr), // %[src_ptr]
[dst_ptr] "+r"(dst), // %[dst_ptr]
[dst_width] "+r"(dst_width), // %[dst_width]
[vl] "=r"(vl) // %[vl]
:
: "memory", "cc", "z0", "z1", "p0");
}
__arm_locally_streaming void ScaleRowDown2Linear_SME(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
// Streaming-SVE only, no use of ZA tile.
(void)src_stride;
int vl;
asm volatile(
"cntb %x[vl] \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.lt 2f \n"
"1: \n"
"ptrue p0.b \n"
"ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n"
"incb %[src_ptr], all, mul #2 \n"
"urhadd z0.b, p0/m, z0.b, z1.b \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"st1b {z0.b}, p0, [%[dst_ptr]] \n"
"incb %[dst_ptr] \n"
"b.ge 1b \n"
"2: \n"
"adds %w[dst_width], %w[dst_width], %w[vl] \n"
"b.eq 99f \n"
"whilelt p0.b, wzr, %w[dst_width] \n"
"ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n"
"urhadd z0.b, p0/m, z0.b, z1.b \n"
"st1b {z0.b}, p0, [%[dst_ptr]] \n"
"99: \n"
: [src_ptr] "+r"(src_ptr), // %[src_ptr]
[dst_ptr] "+r"(dst), // %[dst_ptr]
[dst_width] "+r"(dst_width), // %[dst_width]
[vl] "=r"(vl) // %[vl]
:
: "memory", "cc", "z0", "z1", "p0");
}
#define SCALEROWDOWN2BOX_SVE \
"ld2b {z0.b, z1.b}, p0/z, [%[src_ptr]] \n" \
"ld2b {z2.b, z3.b}, p0/z, [%[src2_ptr]] \n" \
"incb %[src_ptr], all, mul #2 \n" \
"incb %[src2_ptr], all, mul #2 \n" \
"uaddlb z4.h, z0.b, z1.b \n" \
"uaddlt z5.h, z0.b, z1.b \n" \
"uaddlb z6.h, z2.b, z3.b \n" \
"uaddlt z7.h, z2.b, z3.b \n" \
"add z4.h, z4.h, z6.h \n" \
"add z5.h, z5.h, z7.h \n" \
"rshrnb z0.b, z4.h, #2 \n" \
"rshrnt z0.b, z5.h, #2 \n" \
"subs %w[dst_width], %w[dst_width], %w[vl] \n" \
"st1b {z0.b}, p0, [%[dst_ptr]] \n" \
"incb %[dst_ptr] \n"
__arm_locally_streaming void ScaleRowDown2Box_SME(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
// Streaming-SVE only, no use of ZA tile.
const uint8_t* src2_ptr = src_ptr + src_stride;
int vl;
asm volatile(
"cntb %x[vl] \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.lt 2f \n"
"ptrue p0.b \n"
"1: \n" //
SCALEROWDOWN2BOX_SVE
"b.ge 1b \n"
"2: \n"
"adds %w[dst_width], %w[dst_width], %w[vl] \n"
"b.eq 99f \n"
"whilelt p0.b, wzr, %w[dst_width] \n" //
SCALEROWDOWN2BOX_SVE
"99: \n"
: [src_ptr] "+r"(src_ptr), // %[src_ptr]
[src2_ptr] "+r"(src2_ptr), // %[src2_ptr]
[dst_ptr] "+r"(dst), // %[dst_ptr]
[dst_width] "+r"(dst_width), // %[dst_width]
[vl] "=r"(vl) // %[vl]
:
: "memory", "cc", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "p0");
}
#undef SCALEROWDOWN2BOX_SVE
__arm_locally_streaming void ScaleUVRowDown2_SME(const uint8_t* src_uv,
ptrdiff_t src_stride,
uint8_t* dst_uv,
int dst_width) {
// Streaming-SVE only, no use of ZA tile.
(void)src_stride;
int vl;
asm volatile(
"cnth %x[vl] \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.lt 2f \n"
"1: \n"
"ptrue p0.b \n"
"ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n"
"incb %[src_uv], all, mul #2 \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"st1h {z1.h}, p0, [%[dst_uv]] \n"
"incb %[dst_uv] \n"
"b.ge 1b \n"
"2: \n"
"adds %w[dst_width], %w[dst_width], %w[vl] \n"
"b.eq 99f \n"
"whilelt p0.h, wzr, %w[dst_width] \n"
"ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n"
"st1h {z1.h}, p0, [%[dst_uv]] \n"
"99: \n"
: [src_uv] "+r"(src_uv), // %[src_uv]
[dst_uv] "+r"(dst_uv), // %[dst_uv]
[dst_width] "+r"(dst_width), // %[dst_width]
[vl] "=r"(vl) // %[vl]
:
: "memory", "cc", "z0", "z1", "p0");
}
__arm_locally_streaming void ScaleUVRowDown2Linear_SME(const uint8_t* src_uv,
ptrdiff_t src_stride,
uint8_t* dst_uv,
int dst_width) {
// Streaming-SVE only, no use of ZA tile.
(void)src_stride;
int vl;
asm volatile(
"cnth %x[vl] \n"
"ptrue p1.b \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.lt 2f \n"
"ptrue p0.h \n"
"1: \n"
"ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n"
"incb %[src_uv], all, mul #2 \n"
"urhadd z0.b, p1/m, z0.b, z1.b \n"
"st1h {z0.h}, p0, [%[dst_uv]] \n"
"incb %[dst_uv], all, mul #1 \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.ge 1b \n"
"2: \n"
"adds %w[dst_width], %w[dst_width], %w[vl] \n"
"b.eq 99f \n"
"whilelt p0.h, wzr, %w[dst_width] \n"
"ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n"
"urhadd z0.b, p1/m, z0.b, z1.b \n"
"st1h {z0.h}, p0, [%[dst_uv]] \n"
"99: \n"
: [src_uv] "+r"(src_uv), // %[src_uv]
[dst_uv] "+r"(dst_uv), // %[dst_uv]
[dst_width] "+r"(dst_width), // %[dst_width]
[vl] "=r"(vl) // %[vl]
:
: "z0", "z1", "p0", "p1");
}
#define SCALEUVROWDOWN2BOX_SVE \
"ld2h {z0.h, z1.h}, p0/z, [%[src_uv]] \n" \
"ld2h {z2.h, z3.h}, p0/z, [%[src2_uv]] \n" \
"incb %[src_uv], all, mul #2 \n" \
"incb %[src2_uv], all, mul #2 \n" \
"uaddlb z4.h, z0.b, z1.b \n" \
"uaddlt z5.h, z0.b, z1.b \n" \
"uaddlb z6.h, z2.b, z3.b \n" \
"uaddlt z7.h, z2.b, z3.b \n" \
"add z4.h, z4.h, z6.h \n" \
"add z5.h, z5.h, z7.h \n" \
"rshrnb z0.b, z4.h, #2 \n" \
"rshrnt z0.b, z5.h, #2 \n" \
"st1h {z0.h}, p0, [%[dst_uv]] \n" \
"incb %[dst_uv], all, mul #1 \n"
__arm_locally_streaming void ScaleUVRowDown2Box_SME(const uint8_t* src_uv,
ptrdiff_t src_stride,
uint8_t* dst_uv,
int dst_width) {
// Streaming-SVE only, no use of ZA tile.
const uint8_t* src2_uv = src_uv + src_stride;
int vl;
asm volatile(
"cnth %x[vl] \n"
"ptrue p1.b \n"
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.lt 2f \n"
"ptrue p0.h \n"
"1: \n" //
SCALEUVROWDOWN2BOX_SVE
"subs %w[dst_width], %w[dst_width], %w[vl] \n"
"b.ge 1b \n"
"2: \n"
"adds %w[dst_width], %w[dst_width], %w[vl] \n"
"b.eq 99f \n"
"whilelt p0.h, wzr, %w[dst_width] \n" //
SCALEUVROWDOWN2BOX_SVE
"99: \n"
: [src_uv] "+r"(src_uv), // %[src_uv]
[src2_uv] "+r"(src2_uv), // %[src2_uv]
[dst_uv] "+r"(dst_uv), // %[dst_uv]
[dst_width] "+r"(dst_width), // %[dst_width]
[vl] "=r"(vl) // %[vl]
:
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "p0", "p1");
}
#undef SCALEUVROWDOWN2BOX_SVE
#endif // !defined(LIBYUV_DISABLE_SME) && defined(CLANG_HAS_SME) &&
// defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif