libyuv/source/rotate_neon64.cc
George Steed 4f7fd808b7 [AArch64] Use full vectors in TransposeWx{8 => 16}_NEON
The existing Neon code only makes use of 64-bit vectors throughout which
limits the performance on larger cores. To avoid this, swap the Neon
code from a Wx8 implementation to a Wx16 implementation and process
blocks of 16 full vectors at a time.

The original code also handled widths that were not exact multiples of
16, however this should already be handled by the "any" kernel so it is
removed.

Finally, avoid duplicating the TransposeWx16_C fallback kernel
definition in all architectures that need it, and just put it once in
rotate_common.cc instead.

Observed speedups for TransposePlane across a range of
micro-architectures:

 Cortex-A53: -40.0%
 Cortex-A55: -20.7%
 Cortex-A57: -43.9%
Cortex-A510: -43.5%
Cortex-A520: -43.9%
Cortex-A720: -31.1%
  Cortex-X2: -38.3%
  Cortex-X4: -43.6%

Change-Id: Ic7c4d5f24eb27091d743ddc00cd95ef178b6984e
Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/5545459
Reviewed-by: Frank Barchard <fbarchard@chromium.org>
2024-05-21 07:46:42 +00:00

424 lines
19 KiB
C++

/*
* Copyright 2014 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC Neon armv8 64 bit.
#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
void TransposeWx16_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
const uint8_t* src_temp;
asm("1: \n"
"mov %[src_temp], %[src] \n"
"ld1 {v0.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v1.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v2.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v3.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v4.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v5.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v6.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v7.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v8.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v9.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v10.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v11.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v12.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v13.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v14.16b}, [%[src_temp]], %[src_stride] \n"
"ld1 {v15.16b}, [%[src_temp]], %[src_stride] \n"
"add %[src], %[src], #16 \n"
// Transpose 8x8-byte blocks.
"trn1 v16.2d, v0.2d, v8.2d \n"
"trn1 v17.2d, v1.2d, v9.2d \n"
"trn1 v18.2d, v2.2d, v10.2d \n"
"trn1 v19.2d, v3.2d, v11.2d \n"
"trn1 v20.2d, v4.2d, v12.2d \n"
"trn1 v21.2d, v5.2d, v13.2d \n"
"trn1 v22.2d, v6.2d, v14.2d \n"
"trn1 v23.2d, v7.2d, v15.2d \n"
"trn2 v24.2d, v0.2d, v8.2d \n"
"trn2 v25.2d, v1.2d, v9.2d \n"
"trn2 v26.2d, v2.2d, v10.2d \n"
"trn2 v27.2d, v3.2d, v11.2d \n"
"trn2 v28.2d, v4.2d, v12.2d \n"
"trn2 v29.2d, v5.2d, v13.2d \n"
"trn2 v30.2d, v6.2d, v14.2d \n"
"trn2 v31.2d, v7.2d, v15.2d \n"
"subs %w[width], %w[width], #16 \n"
// Transpose 4x4-byte blocks within each 8x8 block.
"trn1 v0.4s, v16.4s, v20.4s \n"
"trn1 v1.4s, v17.4s, v21.4s \n"
"trn1 v2.4s, v18.4s, v22.4s \n"
"trn1 v3.4s, v19.4s, v23.4s \n"
"trn2 v4.4s, v16.4s, v20.4s \n"
"trn2 v5.4s, v17.4s, v21.4s \n"
"trn2 v6.4s, v18.4s, v22.4s \n"
"trn2 v7.4s, v19.4s, v23.4s \n"
"trn1 v8.4s, v24.4s, v28.4s \n"
"trn1 v9.4s, v25.4s, v29.4s \n"
"trn1 v10.4s, v26.4s, v30.4s \n"
"trn1 v11.4s, v27.4s, v31.4s \n"
"trn2 v12.4s, v24.4s, v28.4s \n"
"trn2 v13.4s, v25.4s, v29.4s \n"
"trn2 v14.4s, v26.4s, v30.4s \n"
"trn2 v15.4s, v27.4s, v31.4s \n"
// Transpose 2x2-byte blocks within each 4x4 block.
"trn1 v16.8h, v0.8h, v2.8h \n"
"trn1 v17.8h, v1.8h, v3.8h \n"
"trn2 v18.8h, v0.8h, v2.8h \n"
"trn2 v19.8h, v1.8h, v3.8h \n"
"trn1 v20.8h, v4.8h, v6.8h \n"
"trn1 v21.8h, v5.8h, v7.8h \n"
"trn2 v22.8h, v4.8h, v6.8h \n"
"trn2 v23.8h, v5.8h, v7.8h \n"
"trn1 v24.8h, v8.8h, v10.8h \n"
"trn1 v25.8h, v9.8h, v11.8h \n"
"trn2 v26.8h, v8.8h, v10.8h \n"
"trn2 v27.8h, v9.8h, v11.8h \n"
"trn1 v28.8h, v12.8h, v14.8h \n"
"trn1 v29.8h, v13.8h, v15.8h \n"
"trn2 v30.8h, v12.8h, v14.8h \n"
"trn2 v31.8h, v13.8h, v15.8h \n"
// Transpose bytes within each 2x2 block.
"trn1 v0.16b, v16.16b, v17.16b \n"
"trn2 v1.16b, v16.16b, v17.16b \n"
"trn1 v2.16b, v18.16b, v19.16b \n"
"trn2 v3.16b, v18.16b, v19.16b \n"
"trn1 v4.16b, v20.16b, v21.16b \n"
"trn2 v5.16b, v20.16b, v21.16b \n"
"trn1 v6.16b, v22.16b, v23.16b \n"
"trn2 v7.16b, v22.16b, v23.16b \n"
"trn1 v8.16b, v24.16b, v25.16b \n"
"trn2 v9.16b, v24.16b, v25.16b \n"
"trn1 v10.16b, v26.16b, v27.16b \n"
"trn2 v11.16b, v26.16b, v27.16b \n"
"trn1 v12.16b, v28.16b, v29.16b \n"
"trn2 v13.16b, v28.16b, v29.16b \n"
"trn1 v14.16b, v30.16b, v31.16b \n"
"trn2 v15.16b, v30.16b, v31.16b \n"
"st1 {v0.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v1.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v2.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v3.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v4.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v5.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v6.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v7.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v8.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v9.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v10.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v11.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v12.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v13.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v14.16b}, [%[dst]], %[dst_stride] \n"
"st1 {v15.16b}, [%[dst]], %[dst_stride] \n"
"b.gt 1b \n"
: [src] "+r"(src), // %[src]
[src_temp] "=&r"(src_temp), // %[src_temp]
[dst] "+r"(dst), // %[dst]
[width] "+r"(width) // %[width]
: [src_stride] "r"((ptrdiff_t)src_stride), // %[src_stride]
[dst_stride] "r"((ptrdiff_t)dst_stride) // %[dst_stride]
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8",
"v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
"v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28",
"v29", "v30", "v31");
}
static const uint8_t kVTbl4x4TransposeDi[32] = {
0, 16, 32, 48, 2, 18, 34, 50, 4, 20, 36, 52, 6, 22, 38, 54,
1, 17, 33, 49, 3, 19, 35, 51, 5, 21, 37, 53, 7, 23, 39, 55};
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %w4, %w4, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"ld1 {v0.16b}, [%0], %5 \n"
"ld1 {v1.16b}, [%0], %5 \n"
"ld1 {v2.16b}, [%0], %5 \n"
"ld1 {v3.16b}, [%0], %5 \n"
"ld1 {v4.16b}, [%0], %5 \n"
"ld1 {v5.16b}, [%0], %5 \n"
"ld1 {v6.16b}, [%0], %5 \n"
"ld1 {v7.16b}, [%0] \n"
"mov %0, %1 \n"
"trn1 v16.16b, v0.16b, v1.16b \n"
"trn2 v17.16b, v0.16b, v1.16b \n"
"trn1 v18.16b, v2.16b, v3.16b \n"
"trn2 v19.16b, v2.16b, v3.16b \n"
"trn1 v20.16b, v4.16b, v5.16b \n"
"trn2 v21.16b, v4.16b, v5.16b \n"
"trn1 v22.16b, v6.16b, v7.16b \n"
"trn2 v23.16b, v6.16b, v7.16b \n"
"trn1 v0.8h, v16.8h, v18.8h \n"
"trn2 v1.8h, v16.8h, v18.8h \n"
"trn1 v2.8h, v20.8h, v22.8h \n"
"trn2 v3.8h, v20.8h, v22.8h \n"
"trn1 v4.8h, v17.8h, v19.8h \n"
"trn2 v5.8h, v17.8h, v19.8h \n"
"trn1 v6.8h, v21.8h, v23.8h \n"
"trn2 v7.8h, v21.8h, v23.8h \n"
"trn1 v16.4s, v0.4s, v2.4s \n"
"trn2 v17.4s, v0.4s, v2.4s \n"
"trn1 v18.4s, v1.4s, v3.4s \n"
"trn2 v19.4s, v1.4s, v3.4s \n"
"trn1 v20.4s, v4.4s, v6.4s \n"
"trn2 v21.4s, v4.4s, v6.4s \n"
"trn1 v22.4s, v5.4s, v7.4s \n"
"trn2 v23.4s, v5.4s, v7.4s \n"
"mov %0, %2 \n"
"st1 {v16.d}[0], [%0], %6 \n"
"st1 {v18.d}[0], [%0], %6 \n"
"st1 {v17.d}[0], [%0], %6 \n"
"st1 {v19.d}[0], [%0], %6 \n"
"st1 {v16.d}[1], [%0], %6 \n"
"st1 {v18.d}[1], [%0], %6 \n"
"st1 {v17.d}[1], [%0], %6 \n"
"st1 {v19.d}[1], [%0] \n"
"mov %0, %3 \n"
"st1 {v20.d}[0], [%0], %7 \n"
"st1 {v22.d}[0], [%0], %7 \n"
"st1 {v21.d}[0], [%0], %7 \n"
"st1 {v23.d}[0], [%0], %7 \n"
"st1 {v20.d}[1], [%0], %7 \n"
"st1 {v22.d}[1], [%0], %7 \n"
"st1 {v21.d}[1], [%0], %7 \n"
"st1 {v23.d}[1], [%0] \n"
"add %1, %1, #16 \n" // src += 8*2
"add %2, %2, %6, lsl #3 \n" // dst_a += 8 *
// dst_stride_a
"add %3, %3, %7, lsl #3 \n" // dst_b += 8 *
// dst_stride_b
"subs %w4, %w4, #8 \n" // w -= 8
"b.ge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %w4, %w4, #8 \n"
"b.eq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %w4, #2 \n"
"b.lt 3f \n"
"cmp %w4, #4 \n"
"b.lt 2f \n"
// TODO(frkoenig): Clean this up
// 4x8 block
"mov %0, %1 \n"
"ld1 {v0.8b}, [%0], %5 \n"
"ld1 {v1.8b}, [%0], %5 \n"
"ld1 {v2.8b}, [%0], %5 \n"
"ld1 {v3.8b}, [%0], %5 \n"
"ld1 {v4.8b}, [%0], %5 \n"
"ld1 {v5.8b}, [%0], %5 \n"
"ld1 {v6.8b}, [%0], %5 \n"
"ld1 {v7.8b}, [%0] \n"
"ld1 {v30.16b}, [%8], #16 \n"
"ld1 {v31.16b}, [%8] \n"
"tbl v16.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b \n"
"tbl v17.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v31.16b \n"
"tbl v18.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v30.16b \n"
"tbl v19.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v31.16b \n"
"mov %0, %2 \n"
"st1 {v16.s}[0], [%0], %6 \n"
"st1 {v16.s}[1], [%0], %6 \n"
"st1 {v16.s}[2], [%0], %6 \n"
"st1 {v16.s}[3], [%0], %6 \n"
"add %0, %2, #4 \n"
"st1 {v18.s}[0], [%0], %6 \n"
"st1 {v18.s}[1], [%0], %6 \n"
"st1 {v18.s}[2], [%0], %6 \n"
"st1 {v18.s}[3], [%0] \n"
"mov %0, %3 \n"
"st1 {v17.s}[0], [%0], %7 \n"
"st1 {v17.s}[1], [%0], %7 \n"
"st1 {v17.s}[2], [%0], %7 \n"
"st1 {v17.s}[3], [%0], %7 \n"
"add %0, %3, #4 \n"
"st1 {v19.s}[0], [%0], %7 \n"
"st1 {v19.s}[1], [%0], %7 \n"
"st1 {v19.s}[2], [%0], %7 \n"
"st1 {v19.s}[3], [%0] \n"
"add %1, %1, #8 \n" // src += 4 * 2
"add %2, %2, %6, lsl #2 \n" // dst_a += 4 *
// dst_stride_a
"add %3, %3, %7, lsl #2 \n" // dst_b += 4 *
// dst_stride_b
"subs %w4, %w4, #4 \n" // w -= 4
"b.eq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %w4, #2 \n"
"b.lt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"ld2 {v0.h, v1.h}[0], [%0], %5 \n"
"ld2 {v2.h, v3.h}[0], [%0], %5 \n"
"ld2 {v0.h, v1.h}[1], [%0], %5 \n"
"ld2 {v2.h, v3.h}[1], [%0], %5 \n"
"ld2 {v0.h, v1.h}[2], [%0], %5 \n"
"ld2 {v2.h, v3.h}[2], [%0], %5 \n"
"ld2 {v0.h, v1.h}[3], [%0], %5 \n"
"ld2 {v2.h, v3.h}[3], [%0] \n"
"trn1 v4.8b, v0.8b, v2.8b \n"
"trn2 v5.8b, v0.8b, v2.8b \n"
"trn1 v6.8b, v1.8b, v3.8b \n"
"trn2 v7.8b, v1.8b, v3.8b \n"
"mov %0, %2 \n"
"st1 {v4.d}[0], [%0], %6 \n"
"st1 {v6.d}[0], [%0] \n"
"mov %0, %3 \n"
"st1 {v5.d}[0], [%0], %7 \n"
"st1 {v7.d}[0], [%0] \n"
"add %1, %1, #4 \n" // src += 2 * 2
"add %2, %2, %6, lsl #1 \n" // dst_a += 2 *
// dst_stride_a
"add %3, %3, %7, lsl #1 \n" // dst_b += 2 *
// dst_stride_b
"subs %w4, %w4, #2 \n" // w -= 2
"b.eq 4f \n"
// 1x8 block
"3: \n"
"ld2 {v0.b, v1.b}[0], [%1], %5 \n"
"ld2 {v0.b, v1.b}[1], [%1], %5 \n"
"ld2 {v0.b, v1.b}[2], [%1], %5 \n"
"ld2 {v0.b, v1.b}[3], [%1], %5 \n"
"ld2 {v0.b, v1.b}[4], [%1], %5 \n"
"ld2 {v0.b, v1.b}[5], [%1], %5 \n"
"ld2 {v0.b, v1.b}[6], [%1], %5 \n"
"ld2 {v0.b, v1.b}[7], [%1] \n"
"st1 {v0.d}[0], [%2] \n"
"st1 {v1.d}[0], [%3] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(dst_a), // %2
"+r"(dst_b), // %3
"+r"(width) // %4
: "r"((ptrdiff_t)src_stride), // %5
"r"((ptrdiff_t)dst_stride_a), // %6
"r"((ptrdiff_t)dst_stride_b), // %7
"r"(&kVTbl4x4TransposeDi) // %8
: "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
"v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30", "v31");
}
// Transpose 32 bit values (ARGB)
void Transpose4x4_32_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
const uint8_t* src1 = src + src_stride;
const uint8_t* src2 = src1 + src_stride;
const uint8_t* src3 = src2 + src_stride;
uint8_t* dst1 = dst + dst_stride;
uint8_t* dst2 = dst1 + dst_stride;
uint8_t* dst3 = dst2 + dst_stride;
asm volatile(
// Main loop transpose 4x4. Read a column, write a row.
"1: \n"
"ld4 {v0.s, v1.s, v2.s, v3.s}[0], [%0], %9 \n"
"ld4 {v0.s, v1.s, v2.s, v3.s}[1], [%1], %9 \n"
"ld4 {v0.s, v1.s, v2.s, v3.s}[2], [%2], %9 \n"
"ld4 {v0.s, v1.s, v2.s, v3.s}[3], [%3], %9 \n"
"subs %w8, %w8, #4 \n" // w -= 4
"st1 {v0.4s}, [%4], 16 \n"
"st1 {v1.4s}, [%5], 16 \n"
"st1 {v2.4s}, [%6], 16 \n"
"st1 {v3.4s}, [%7], 16 \n"
"b.gt 1b \n"
: "+r"(src), // %0
"+r"(src1), // %1
"+r"(src2), // %2
"+r"(src3), // %3
"+r"(dst), // %4
"+r"(dst1), // %5
"+r"(dst2), // %6
"+r"(dst3), // %7
"+r"(width) // %8
: "r"((ptrdiff_t)(src_stride * 4)) // %9
: "memory", "cc", "v0", "v1", "v2", "v3");
}
#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif