mirror of
https://chromium.googlesource.com/libyuv/libyuv
synced 2025-12-07 01:06:46 +08:00
alpha blend 4 pixel loop bug fix and blender C code match SSE for better testability and reference code for future optimized code.
BUG=none TEST=none Review URL: https://webrtc-codereview.appspot.com/645008 git-svn-id: http://libyuv.googlecode.com/svn/trunk@287 16f28f9a-4ce2-e073-06de-1de4eb20be90
This commit is contained in:
parent
ee2208885f
commit
794fe1236a
@ -1,6 +1,6 @@
|
|||||||
Name: libyuv
|
Name: libyuv
|
||||||
URL: http://code.google.com/p/libyuv/
|
URL: http://code.google.com/p/libyuv/
|
||||||
Version: 286
|
Version: 287
|
||||||
License: BSD
|
License: BSD
|
||||||
License File: LICENSE
|
License File: LICENSE
|
||||||
|
|
||||||
|
|||||||
@ -11,7 +11,7 @@
|
|||||||
#ifndef INCLUDE_LIBYUV_VERSION_H_
|
#ifndef INCLUDE_LIBYUV_VERSION_H_
|
||||||
#define INCLUDE_LIBYUV_VERSION_H_
|
#define INCLUDE_LIBYUV_VERSION_H_
|
||||||
|
|
||||||
#define LIBYUV_VERSION 286
|
#define LIBYUV_VERSION 287
|
||||||
|
|
||||||
#endif // INCLUDE_LIBYUV_VERSION_H_
|
#endif // INCLUDE_LIBYUV_VERSION_H_
|
||||||
|
|
||||||
|
|||||||
@ -588,77 +588,167 @@ void UYVYToYRow_C(const uint8* src_yuy2, uint8* dst_y, int width) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BLENDER(f, b, a) (((256 - a) * b) >> 8) + f
|
#define BLEND(f, b, a) (((256 - a) * b) >> 8) + f
|
||||||
|
|
||||||
// Blend src_argb0 over src_argb1 and store to dst_argb.
|
// Blend src_argb0 over src_argb1 and store to dst_argb.
|
||||||
// dst_argb may be src_argb0 or src_argb1.
|
// dst_argb may be src_argb0 or src_argb1.
|
||||||
|
// This code mimics the SSSE3 version for better testability.
|
||||||
void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
|
void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
|
||||||
uint8* dst_argb, int width) {
|
uint8* dst_argb, int width) {
|
||||||
for (int x = 0; x < width - 1; x += 2) {
|
for (int x = 0; x < width - 1; x += 2) {
|
||||||
|
uint32 fb = src_argb0[0];
|
||||||
|
uint32 fg = src_argb0[1];
|
||||||
|
uint32 fr = src_argb0[2];
|
||||||
uint32 a = src_argb0[3];
|
uint32 a = src_argb0[3];
|
||||||
if (a == 0) {
|
uint32 bb = src_argb1[0];
|
||||||
*reinterpret_cast<uint32*>(dst_argb) =
|
uint32 bg = src_argb1[1];
|
||||||
*reinterpret_cast<const uint32*>(src_argb1);
|
uint32 br = src_argb1[2];
|
||||||
} else if (a == 255) {
|
dst_argb[0] = BLEND(fb, bb, a);
|
||||||
*reinterpret_cast<uint32*>(dst_argb) =
|
dst_argb[1] = BLEND(fg, bg, a);
|
||||||
*reinterpret_cast<const uint32*>(src_argb0);
|
dst_argb[2] = BLEND(fr, br, a);
|
||||||
} else {
|
|
||||||
const uint32 fb = src_argb0[0];
|
|
||||||
const uint32 fg = src_argb0[1];
|
|
||||||
const uint32 fr = src_argb0[2];
|
|
||||||
const uint32 bb = src_argb1[0];
|
|
||||||
const uint32 bg = src_argb1[1];
|
|
||||||
const uint32 br = src_argb1[2];
|
|
||||||
dst_argb[0] = BLENDER(fb, bb, a);
|
|
||||||
dst_argb[1] = BLENDER(fg, bg, a);
|
|
||||||
dst_argb[2] = BLENDER(fr, br, a);
|
|
||||||
dst_argb[3] = 255u;
|
dst_argb[3] = 255u;
|
||||||
}
|
|
||||||
|
fb = src_argb0[4 + 0];
|
||||||
|
fg = src_argb0[4 + 1];
|
||||||
|
fr = src_argb0[4 + 2];
|
||||||
a = src_argb0[4 + 3];
|
a = src_argb0[4 + 3];
|
||||||
if (a == 0) {
|
bb = src_argb1[4 + 0];
|
||||||
*reinterpret_cast<uint32*>(dst_argb + 4) =
|
bg = src_argb1[4 + 1];
|
||||||
*reinterpret_cast<const uint32*>(src_argb1 + 4);
|
br = src_argb1[4 + 2];
|
||||||
} else if (a == 255) {
|
dst_argb[4 + 0] = BLEND(fb, bb, a);
|
||||||
*reinterpret_cast<uint32*>(dst_argb + 4) =
|
dst_argb[4 + 1] = BLEND(fg, bg, a);
|
||||||
*reinterpret_cast<const uint32*>(src_argb0 + 4);
|
dst_argb[4 + 2] = BLEND(fr, br, a);
|
||||||
} else {
|
|
||||||
const uint32 fb = src_argb0[4 + 0];
|
|
||||||
const uint32 fg = src_argb0[4 + 1];
|
|
||||||
const uint32 fr = src_argb0[4 + 2];
|
|
||||||
const uint32 bb = src_argb1[4 + 0];
|
|
||||||
const uint32 bg = src_argb1[4 + 1];
|
|
||||||
const uint32 br = src_argb1[4 + 2];
|
|
||||||
dst_argb[4 + 0] = BLENDER(fb, bb, a);
|
|
||||||
dst_argb[4 + 1] = BLENDER(fg, bg, a);
|
|
||||||
dst_argb[4 + 2] = BLENDER(fr, br, a);
|
|
||||||
dst_argb[4 + 3] = 255u;
|
dst_argb[4 + 3] = 255u;
|
||||||
}
|
|
||||||
src_argb0 += 8;
|
src_argb0 += 8;
|
||||||
src_argb1 += 8;
|
src_argb1 += 8;
|
||||||
dst_argb += 8;
|
dst_argb += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (width & 1) {
|
if (width & 1) {
|
||||||
|
uint32 fb = src_argb0[0];
|
||||||
|
uint32 fg = src_argb0[1];
|
||||||
|
uint32 fr = src_argb0[2];
|
||||||
uint32 a = src_argb0[3];
|
uint32 a = src_argb0[3];
|
||||||
if (a == 0) {
|
uint32 bb = src_argb1[0];
|
||||||
*reinterpret_cast<uint32*>(dst_argb) =
|
uint32 bg = src_argb1[1];
|
||||||
*reinterpret_cast<const uint32*>(src_argb1);
|
uint32 br = src_argb1[2];
|
||||||
} else if (a == 255) {
|
dst_argb[0] = BLEND(fb, bb, a);
|
||||||
*reinterpret_cast<uint32*>(dst_argb) =
|
dst_argb[1] = BLEND(fg, bg, a);
|
||||||
*reinterpret_cast<const uint32*>(src_argb0);
|
dst_argb[2] = BLEND(fr, br, a);
|
||||||
} else {
|
|
||||||
const uint32 fb = src_argb0[0];
|
|
||||||
const uint32 fg = src_argb0[1];
|
|
||||||
const uint32 fr = src_argb0[2];
|
|
||||||
const uint32 bb = src_argb1[0];
|
|
||||||
const uint32 bg = src_argb1[1];
|
|
||||||
const uint32 br = src_argb1[2];
|
|
||||||
dst_argb[0] = BLENDER(fb, bb, a);
|
|
||||||
dst_argb[1] = BLENDER(fg, bg, a);
|
|
||||||
dst_argb[2] = BLENDER(fr, br, a);
|
|
||||||
dst_argb[3] = 255u;
|
dst_argb[3] = 255u;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#undef BLEND
|
||||||
|
#define ATTENUATE(f, a) (a | (a << 8)) * (f | (f << 8)) >> 24
|
||||||
|
|
||||||
|
// Multiply source RGB by alpha and store to destination.
|
||||||
|
// This code mimics the SSSE3 version for better testability.
|
||||||
|
void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
|
||||||
|
for (int i = 0; i < width - 1; i += 2) {
|
||||||
|
uint32 b = src_argb[0];
|
||||||
|
uint32 g = src_argb[1];
|
||||||
|
uint32 r = src_argb[2];
|
||||||
|
uint32 a = src_argb[3];
|
||||||
|
dst_argb[0] = ATTENUATE(b, a);
|
||||||
|
dst_argb[1] = ATTENUATE(g, a);
|
||||||
|
dst_argb[2] = ATTENUATE(r, a);
|
||||||
|
dst_argb[3] = a;
|
||||||
|
b = src_argb[4];
|
||||||
|
g = src_argb[5];
|
||||||
|
r = src_argb[6];
|
||||||
|
a = src_argb[7];
|
||||||
|
dst_argb[4] = ATTENUATE(b, a);
|
||||||
|
dst_argb[5] = ATTENUATE(g, a);
|
||||||
|
dst_argb[6] = ATTENUATE(r, a);
|
||||||
|
dst_argb[7] = a;
|
||||||
|
src_argb += 8;
|
||||||
|
dst_argb += 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (width & 1) {
|
||||||
|
const uint32 b = src_argb[0];
|
||||||
|
const uint32 g = src_argb[1];
|
||||||
|
const uint32 r = src_argb[2];
|
||||||
|
const uint32 a = src_argb[3];
|
||||||
|
dst_argb[0] = ATTENUATE(b, a);
|
||||||
|
dst_argb[1] = ATTENUATE(g, a);
|
||||||
|
dst_argb[2] = ATTENUATE(r, a);
|
||||||
|
dst_argb[3] = a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#undef ATTENUATE
|
||||||
|
|
||||||
|
// Divide source RGB by alpha and store to destination.
|
||||||
|
// b = (b * 255 + (a / 2)) / a;
|
||||||
|
// g = (g * 255 + (a / 2)) / a;
|
||||||
|
// r = (r * 255 + (a / 2)) / a;
|
||||||
|
// Reciprocal method is off by 1 on some values. ie 125
|
||||||
|
// 8.16 fixed point inverse table
|
||||||
|
#define T(a) 0x10000 / a
|
||||||
|
uint32 fixed_invtbl8[256] = {
|
||||||
|
0x0100, T(0x01), T(0x02), T(0x03), T(0x04), T(0x05), T(0x06), T(0x07),
|
||||||
|
T(0x08), T(0x09), T(0x0a), T(0x0b), T(0x0c), T(0x0d), T(0x0e), T(0x0f),
|
||||||
|
T(0x10), T(0x11), T(0x12), T(0x13), T(0x14), T(0x15), T(0x16), T(0x17),
|
||||||
|
T(0x18), T(0x19), T(0x1a), T(0x1b), T(0x1c), T(0x1d), T(0x1e), T(0x1f),
|
||||||
|
T(0x20), T(0x21), T(0x22), T(0x23), T(0x24), T(0x25), T(0x26), T(0x27),
|
||||||
|
T(0x28), T(0x29), T(0x2a), T(0x2b), T(0x2c), T(0x2d), T(0x2e), T(0x2f),
|
||||||
|
T(0x30), T(0x31), T(0x32), T(0x33), T(0x34), T(0x35), T(0x36), T(0x37),
|
||||||
|
T(0x38), T(0x39), T(0x3a), T(0x3b), T(0x3c), T(0x3d), T(0x3e), T(0x3f),
|
||||||
|
T(0x40), T(0x41), T(0x42), T(0x43), T(0x44), T(0x45), T(0x46), T(0x47),
|
||||||
|
T(0x48), T(0x49), T(0x4a), T(0x4b), T(0x4c), T(0x4d), T(0x4e), T(0x4f),
|
||||||
|
T(0x50), T(0x51), T(0x52), T(0x53), T(0x54), T(0x55), T(0x56), T(0x57),
|
||||||
|
T(0x58), T(0x59), T(0x5a), T(0x5b), T(0x5c), T(0x5d), T(0x5e), T(0x5f),
|
||||||
|
T(0x60), T(0x61), T(0x62), T(0x63), T(0x64), T(0x65), T(0x66), T(0x67),
|
||||||
|
T(0x68), T(0x69), T(0x6a), T(0x6b), T(0x6c), T(0x6d), T(0x6e), T(0x6f),
|
||||||
|
T(0x70), T(0x71), T(0x72), T(0x73), T(0x74), T(0x75), T(0x76), T(0x77),
|
||||||
|
T(0x78), T(0x79), T(0x7a), T(0x7b), T(0x7c), T(0x7d), T(0x7e), T(0x7f),
|
||||||
|
T(0x80), T(0x81), T(0x82), T(0x83), T(0x84), T(0x85), T(0x86), T(0x87),
|
||||||
|
T(0x88), T(0x89), T(0x8a), T(0x8b), T(0x8c), T(0x8d), T(0x8e), T(0x8f),
|
||||||
|
T(0x90), T(0x91), T(0x92), T(0x93), T(0x94), T(0x95), T(0x96), T(0x97),
|
||||||
|
T(0x98), T(0x99), T(0x9a), T(0x9b), T(0x9c), T(0x9d), T(0x9e), T(0x9f),
|
||||||
|
T(0xa0), T(0xa1), T(0xa2), T(0xa3), T(0xa4), T(0xa5), T(0xa6), T(0xa7),
|
||||||
|
T(0xa8), T(0xa9), T(0xaa), T(0xab), T(0xac), T(0xad), T(0xae), T(0xaf),
|
||||||
|
T(0xb0), T(0xb1), T(0xb2), T(0xb3), T(0xb4), T(0xb5), T(0xb6), T(0xb7),
|
||||||
|
T(0xb8), T(0xb9), T(0xba), T(0xbb), T(0xbc), T(0xbd), T(0xbe), T(0xbf),
|
||||||
|
T(0xc0), T(0xc1), T(0xc2), T(0xc3), T(0xc4), T(0xc5), T(0xc6), T(0xc7),
|
||||||
|
T(0xc8), T(0xc9), T(0xca), T(0xcb), T(0xcc), T(0xcd), T(0xce), T(0xcf),
|
||||||
|
T(0xd0), T(0xd1), T(0xd2), T(0xd3), T(0xd4), T(0xd5), T(0xd6), T(0xd7),
|
||||||
|
T(0xd8), T(0xd9), T(0xda), T(0xdb), T(0xdc), T(0xdd), T(0xde), T(0xdf),
|
||||||
|
T(0xe0), T(0xe1), T(0xe2), T(0xe3), T(0xe4), T(0xe5), T(0xe6), T(0xe7),
|
||||||
|
T(0xe8), T(0xe9), T(0xea), T(0xeb), T(0xec), T(0xed), T(0xee), T(0xef),
|
||||||
|
T(0xf0), T(0xf1), T(0xf2), T(0xf3), T(0xf4), T(0xf5), T(0xf6), T(0xf7),
|
||||||
|
T(0xf8), T(0xf9), T(0xfa), T(0xfb), T(0xfc), T(0xfd), T(0xfe), 0x0100 };
|
||||||
|
#undef T
|
||||||
|
|
||||||
|
void ARGBUnattenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
|
||||||
|
for (int i = 0; i < width; ++i) {
|
||||||
|
uint32 b = src_argb[0];
|
||||||
|
uint32 g = src_argb[1];
|
||||||
|
uint32 r = src_argb[2];
|
||||||
|
const uint32 a = src_argb[3];
|
||||||
|
if (a) {
|
||||||
|
const uint32 ia = fixed_invtbl8[a]; // 8.16 fixed point
|
||||||
|
b = (b * ia) >> 8;
|
||||||
|
g = (g * ia) >> 8;
|
||||||
|
r = (r * ia) >> 8;
|
||||||
|
// Clamping should not be necessary but is free in assembly.
|
||||||
|
if (b > 255) {
|
||||||
|
b = 255;
|
||||||
|
}
|
||||||
|
if (g > 255) {
|
||||||
|
g = 255;
|
||||||
|
}
|
||||||
|
if (r > 255) {
|
||||||
|
r = 255;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dst_argb[0] = b;
|
||||||
|
dst_argb[1] = g;
|
||||||
|
dst_argb[2] = r;
|
||||||
|
dst_argb[3] = a;
|
||||||
|
src_argb += 4;
|
||||||
|
dst_argb += 4;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrappers to handle odd width
|
// Wrappers to handle odd width
|
||||||
@ -757,115 +847,6 @@ UVANY(UYVYToUVRow_Any_SSE2, UYVYToUVRow_Unaligned_SSE2, UYVYToUVRow_C, 2)
|
|||||||
#undef UVANY
|
#undef UVANY
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Multiply source RGB by alpha and store to destination.
|
|
||||||
// b = (b * a + 127) / 255;
|
|
||||||
void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
|
|
||||||
for (int i = 0; i < width - 1; i += 2) {
|
|
||||||
uint32 b = src_argb[0];
|
|
||||||
uint32 g = src_argb[1];
|
|
||||||
uint32 r = src_argb[2];
|
|
||||||
uint32 a = src_argb[3];
|
|
||||||
dst_argb[0] = (b * a + 255) >> 8;
|
|
||||||
dst_argb[1] = (g * a + 255) >> 8;
|
|
||||||
dst_argb[2] = (r * a + 255) >> 8;
|
|
||||||
dst_argb[3] = a;
|
|
||||||
b = src_argb[4];
|
|
||||||
g = src_argb[5];
|
|
||||||
r = src_argb[6];
|
|
||||||
a = src_argb[7];
|
|
||||||
dst_argb[4] = (b * a + 255) >> 8;
|
|
||||||
dst_argb[5] = (g * a + 255) >> 8;
|
|
||||||
dst_argb[6] = (r * a + 255) >> 8;
|
|
||||||
dst_argb[7] = a;
|
|
||||||
src_argb += 8;
|
|
||||||
dst_argb += 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (width & 1) {
|
|
||||||
const uint32 b = src_argb[0];
|
|
||||||
const uint32 g = src_argb[1];
|
|
||||||
const uint32 r = src_argb[2];
|
|
||||||
const uint32 a = src_argb[3];
|
|
||||||
dst_argb[0] = (b * a + 255) >> 8;
|
|
||||||
dst_argb[1] = (g * a + 255) >> 8;
|
|
||||||
dst_argb[2] = (r * a + 255) >> 8;
|
|
||||||
dst_argb[3] = a;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Divide source RGB by alpha and store to destination.
|
|
||||||
// b = (b * 255 + (a / 2)) / a;
|
|
||||||
// g = (g * 255 + (a / 2)) / a;
|
|
||||||
// r = (r * 255 + (a / 2)) / a;
|
|
||||||
// Reciprocal method is off by 1 on some values. ie 125
|
|
||||||
// 8.16 fixed point inverse table
|
|
||||||
#define T(a) 0x10000 / a
|
|
||||||
uint32 fixed_invtbl8[256] = {
|
|
||||||
0x0100, T(0x01), T(0x02), T(0x03), T(0x04), T(0x05), T(0x06), T(0x07),
|
|
||||||
T(0x08), T(0x09), T(0x0a), T(0x0b), T(0x0c), T(0x0d), T(0x0e), T(0x0f),
|
|
||||||
T(0x10), T(0x11), T(0x12), T(0x13), T(0x14), T(0x15), T(0x16), T(0x17),
|
|
||||||
T(0x18), T(0x19), T(0x1a), T(0x1b), T(0x1c), T(0x1d), T(0x1e), T(0x1f),
|
|
||||||
T(0x20), T(0x21), T(0x22), T(0x23), T(0x24), T(0x25), T(0x26), T(0x27),
|
|
||||||
T(0x28), T(0x29), T(0x2a), T(0x2b), T(0x2c), T(0x2d), T(0x2e), T(0x2f),
|
|
||||||
T(0x30), T(0x31), T(0x32), T(0x33), T(0x34), T(0x35), T(0x36), T(0x37),
|
|
||||||
T(0x38), T(0x39), T(0x3a), T(0x3b), T(0x3c), T(0x3d), T(0x3e), T(0x3f),
|
|
||||||
T(0x40), T(0x41), T(0x42), T(0x43), T(0x44), T(0x45), T(0x46), T(0x47),
|
|
||||||
T(0x48), T(0x49), T(0x4a), T(0x4b), T(0x4c), T(0x4d), T(0x4e), T(0x4f),
|
|
||||||
T(0x50), T(0x51), T(0x52), T(0x53), T(0x54), T(0x55), T(0x56), T(0x57),
|
|
||||||
T(0x58), T(0x59), T(0x5a), T(0x5b), T(0x5c), T(0x5d), T(0x5e), T(0x5f),
|
|
||||||
T(0x60), T(0x61), T(0x62), T(0x63), T(0x64), T(0x65), T(0x66), T(0x67),
|
|
||||||
T(0x68), T(0x69), T(0x6a), T(0x6b), T(0x6c), T(0x6d), T(0x6e), T(0x6f),
|
|
||||||
T(0x70), T(0x71), T(0x72), T(0x73), T(0x74), T(0x75), T(0x76), T(0x77),
|
|
||||||
T(0x78), T(0x79), T(0x7a), T(0x7b), T(0x7c), T(0x7d), T(0x7e), T(0x7f),
|
|
||||||
T(0x80), T(0x81), T(0x82), T(0x83), T(0x84), T(0x85), T(0x86), T(0x87),
|
|
||||||
T(0x88), T(0x89), T(0x8a), T(0x8b), T(0x8c), T(0x8d), T(0x8e), T(0x8f),
|
|
||||||
T(0x90), T(0x91), T(0x92), T(0x93), T(0x94), T(0x95), T(0x96), T(0x97),
|
|
||||||
T(0x98), T(0x99), T(0x9a), T(0x9b), T(0x9c), T(0x9d), T(0x9e), T(0x9f),
|
|
||||||
T(0xa0), T(0xa1), T(0xa2), T(0xa3), T(0xa4), T(0xa5), T(0xa6), T(0xa7),
|
|
||||||
T(0xa8), T(0xa9), T(0xaa), T(0xab), T(0xac), T(0xad), T(0xae), T(0xaf),
|
|
||||||
T(0xb0), T(0xb1), T(0xb2), T(0xb3), T(0xb4), T(0xb5), T(0xb6), T(0xb7),
|
|
||||||
T(0xb8), T(0xb9), T(0xba), T(0xbb), T(0xbc), T(0xbd), T(0xbe), T(0xbf),
|
|
||||||
T(0xc0), T(0xc1), T(0xc2), T(0xc3), T(0xc4), T(0xc5), T(0xc6), T(0xc7),
|
|
||||||
T(0xc8), T(0xc9), T(0xca), T(0xcb), T(0xcc), T(0xcd), T(0xce), T(0xcf),
|
|
||||||
T(0xd0), T(0xd1), T(0xd2), T(0xd3), T(0xd4), T(0xd5), T(0xd6), T(0xd7),
|
|
||||||
T(0xd8), T(0xd9), T(0xda), T(0xdb), T(0xdc), T(0xdd), T(0xde), T(0xdf),
|
|
||||||
T(0xe0), T(0xe1), T(0xe2), T(0xe3), T(0xe4), T(0xe5), T(0xe6), T(0xe7),
|
|
||||||
T(0xe8), T(0xe9), T(0xea), T(0xeb), T(0xec), T(0xed), T(0xee), T(0xef),
|
|
||||||
T(0xf0), T(0xf1), T(0xf2), T(0xf3), T(0xf4), T(0xf5), T(0xf6), T(0xf7),
|
|
||||||
T(0xf8), T(0xf9), T(0xfa), T(0xfb), T(0xfc), T(0xfd), T(0xfe), 0x0100 };
|
|
||||||
#undef T
|
|
||||||
|
|
||||||
void ARGBUnattenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
|
|
||||||
for (int i = 0; i < width; ++i) {
|
|
||||||
uint32 b = src_argb[0];
|
|
||||||
uint32 g = src_argb[1];
|
|
||||||
uint32 r = src_argb[2];
|
|
||||||
const uint32 a = src_argb[3];
|
|
||||||
if (a) {
|
|
||||||
const uint32 ia = fixed_invtbl8[a]; // 8.16 fixed point
|
|
||||||
b = (b * ia) >> 8;
|
|
||||||
g = (g * ia) >> 8;
|
|
||||||
r = (r * ia) >> 8;
|
|
||||||
// Clamping should not be necessary but is free in assembly.
|
|
||||||
if (b > 255) {
|
|
||||||
b = 255;
|
|
||||||
}
|
|
||||||
if (g > 255) {
|
|
||||||
g = 255;
|
|
||||||
}
|
|
||||||
if (r > 255) {
|
|
||||||
r = 255;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dst_argb[0] = b;
|
|
||||||
dst_argb[1] = g;
|
|
||||||
dst_argb[2] = r;
|
|
||||||
dst_argb[3] = a;
|
|
||||||
src_argb += 4;
|
|
||||||
dst_argb += 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ComputeCumulativeSumRow_C(const uint8* row, int32* cumsum,
|
void ComputeCumulativeSumRow_C(const uint8* row, int32* cumsum,
|
||||||
const int32* previous_cumsum, int width) {
|
const int32* previous_cumsum, int width) {
|
||||||
int32 row_sum[4] = {0, 0, 0, 0};
|
int32 row_sum[4] = {0, 0, 0, 0};
|
||||||
|
|||||||
@ -2375,53 +2375,32 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
|||||||
"add $1-4,%3 \n"
|
"add $1-4,%3 \n"
|
||||||
"jl 49f \n"
|
"jl 49f \n"
|
||||||
|
|
||||||
// 8 pixel loop.
|
// 4 pixel loop.
|
||||||
".p2align 2 \n"
|
".p2align 2 \n"
|
||||||
"41: \n"
|
"41: \n"
|
||||||
"movdqu (%0),%%xmm3 \n"
|
"movdqu (%0),%%xmm3 \n"
|
||||||
|
"lea 0x10(%0),%0 \n"
|
||||||
"movdqa %%xmm3,%%xmm0 \n"
|
"movdqa %%xmm3,%%xmm0 \n"
|
||||||
"pxor %%xmm4,%%xmm3 \n"
|
"pxor %%xmm4,%%xmm3 \n"
|
||||||
|
"movdqu (%1),%%xmm2 \n"
|
||||||
"psrlw $0x8,%%xmm3 \n"
|
"psrlw $0x8,%%xmm3 \n"
|
||||||
"pshufhw $0xf5,%%xmm3,%%xmm3 \n"
|
"pshufhw $0xf5,%%xmm3,%%xmm3 \n"
|
||||||
"pshuflw $0xf5,%%xmm3,%%xmm3 \n"
|
"pshuflw $0xf5,%%xmm3,%%xmm3 \n"
|
||||||
"movdqu (%1),%%xmm2 \n"
|
|
||||||
"pand %%xmm6,%%xmm2 \n"
|
"pand %%xmm6,%%xmm2 \n"
|
||||||
"paddw %%xmm7,%%xmm3 \n"
|
"paddw %%xmm7,%%xmm3 \n"
|
||||||
"pmullw %%xmm3,%%xmm2 \n"
|
"pmullw %%xmm3,%%xmm2 \n"
|
||||||
"movdqu (%1),%%xmm1 \n"
|
"movdqu (%1),%%xmm1 \n"
|
||||||
|
"lea 0x10(%1),%1 \n"
|
||||||
"psrlw $0x8,%%xmm1 \n"
|
"psrlw $0x8,%%xmm1 \n"
|
||||||
"por %%xmm4,%%xmm0 \n"
|
"por %%xmm4,%%xmm0 \n"
|
||||||
"pmullw %%xmm3,%%xmm1 \n"
|
"pmullw %%xmm3,%%xmm1 \n"
|
||||||
"movdqu 0x10(%0),%%xmm3 \n"
|
|
||||||
"lea 0x20(%0),%0 \n"
|
|
||||||
"psrlw $0x8,%%xmm2 \n"
|
"psrlw $0x8,%%xmm2 \n"
|
||||||
"paddusb %%xmm2,%%xmm0 \n"
|
"paddusb %%xmm2,%%xmm0 \n"
|
||||||
"pand %%xmm5,%%xmm1 \n"
|
"pand %%xmm5,%%xmm1 \n"
|
||||||
"paddusb %%xmm1,%%xmm0 \n"
|
"paddusb %%xmm1,%%xmm0 \n"
|
||||||
"sub $0x4,%3 \n"
|
"sub $0x4,%3 \n"
|
||||||
"movdqa %%xmm0,(%2) \n"
|
"movdqa %%xmm0,(%2) \n"
|
||||||
"jl 49f \n"
|
"lea 0x10(%2),%2 \n"
|
||||||
"movdqa %%xmm3,%%xmm0 \n"
|
|
||||||
"pxor %%xmm4,%%xmm3 \n"
|
|
||||||
"movdqu 0x10(%1),%%xmm2 \n"
|
|
||||||
"psrlw $0x8,%%xmm3 \n"
|
|
||||||
"pshufhw $0xf5,%%xmm3,%%xmm3 \n"
|
|
||||||
"pshuflw $0xf5,%%xmm3,%%xmm3 \n"
|
|
||||||
"pand %%xmm6,%%xmm2 \n"
|
|
||||||
"paddw %%xmm7,%%xmm3 \n"
|
|
||||||
"pmullw %%xmm3,%%xmm2 \n"
|
|
||||||
"movdqu 0x10(%1),%%xmm1 \n"
|
|
||||||
"lea 0x20(%1),%1 \n"
|
|
||||||
"psrlw $0x8,%%xmm1 \n"
|
|
||||||
"por %%xmm4,%%xmm0 \n"
|
|
||||||
"pmullw %%xmm3,%%xmm1 \n"
|
|
||||||
"psrlw $0x8,%%xmm2 \n"
|
|
||||||
"paddusb %%xmm2,%%xmm0 \n"
|
|
||||||
"pand %%xmm5,%%xmm1 \n"
|
|
||||||
"paddusb %%xmm1,%%xmm0 \n"
|
|
||||||
"sub $0x4,%3 \n"
|
|
||||||
"movdqa %%xmm0,0x10(%2) \n"
|
|
||||||
"lea 0x20(%2),%2 \n"
|
|
||||||
"jge 41b \n"
|
"jge 41b \n"
|
||||||
|
|
||||||
"49: \n"
|
"49: \n"
|
||||||
@ -2531,49 +2510,30 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
|
|||||||
"add $1-4,%3 \n"
|
"add $1-4,%3 \n"
|
||||||
"jl 49f \n"
|
"jl 49f \n"
|
||||||
|
|
||||||
// 8 pixel loop.
|
// 4 pixel loop.
|
||||||
".p2align 2 \n"
|
".p2align 2 \n"
|
||||||
"41: \n"
|
"41: \n"
|
||||||
"movdqu (%0),%%xmm3 \n"
|
"movdqu (%0),%%xmm3 \n"
|
||||||
|
"lea 0x10(%0),%0 \n"
|
||||||
"movdqa %%xmm3,%%xmm0 \n"
|
"movdqa %%xmm3,%%xmm0 \n"
|
||||||
"pxor %%xmm4,%%xmm3 \n"
|
"pxor %%xmm4,%%xmm3 \n"
|
||||||
"pshufb %4,%%xmm3 \n"
|
|
||||||
"movdqu (%1),%%xmm2 \n"
|
"movdqu (%1),%%xmm2 \n"
|
||||||
|
"pshufb %4,%%xmm3 \n"
|
||||||
"pand %%xmm6,%%xmm2 \n"
|
"pand %%xmm6,%%xmm2 \n"
|
||||||
"paddw %%xmm7,%%xmm3 \n"
|
"paddw %%xmm7,%%xmm3 \n"
|
||||||
"pmullw %%xmm3,%%xmm2 \n"
|
"pmullw %%xmm3,%%xmm2 \n"
|
||||||
"movdqu (%1),%%xmm1 \n"
|
"movdqu (%1),%%xmm1 \n"
|
||||||
|
"lea 0x10(%1),%1 \n"
|
||||||
"psrlw $0x8,%%xmm1 \n"
|
"psrlw $0x8,%%xmm1 \n"
|
||||||
"por %%xmm4,%%xmm0 \n"
|
"por %%xmm4,%%xmm0 \n"
|
||||||
"pmullw %%xmm3,%%xmm1 \n"
|
"pmullw %%xmm3,%%xmm1 \n"
|
||||||
"movdqu 0x10(%0),%%xmm3 \n"
|
|
||||||
"lea 0x20(%0),%0 \n"
|
|
||||||
"psrlw $0x8,%%xmm2 \n"
|
"psrlw $0x8,%%xmm2 \n"
|
||||||
"paddusb %%xmm2,%%xmm0 \n"
|
"paddusb %%xmm2,%%xmm0 \n"
|
||||||
"pand %%xmm5,%%xmm1 \n"
|
"pand %%xmm5,%%xmm1 \n"
|
||||||
"paddusb %%xmm1,%%xmm0 \n"
|
"paddusb %%xmm1,%%xmm0 \n"
|
||||||
"sub $0x4,%3 \n"
|
"sub $0x4,%3 \n"
|
||||||
"movdqa %%xmm0,(%2) \n"
|
"movdqa %%xmm0,(%2) \n"
|
||||||
"jl 49f \n"
|
"lea 0x10(%2),%2 \n"
|
||||||
"movdqa %%xmm3,%%xmm0 \n"
|
|
||||||
"pxor %%xmm4,%%xmm3 \n"
|
|
||||||
"movdqu 0x10(%1),%%xmm2 \n"
|
|
||||||
"pshufb %4,%%xmm3 \n"
|
|
||||||
"pand %%xmm6,%%xmm2 \n"
|
|
||||||
"paddw %%xmm7,%%xmm3 \n"
|
|
||||||
"pmullw %%xmm3,%%xmm2 \n"
|
|
||||||
"movdqu 0x10(%1),%%xmm1 \n"
|
|
||||||
"lea 0x20(%1),%1 \n"
|
|
||||||
"psrlw $0x8,%%xmm1 \n"
|
|
||||||
"por %%xmm4,%%xmm0 \n"
|
|
||||||
"pmullw %%xmm3,%%xmm1 \n"
|
|
||||||
"psrlw $0x8,%%xmm2 \n"
|
|
||||||
"paddusb %%xmm2,%%xmm0 \n"
|
|
||||||
"pand %%xmm5,%%xmm1 \n"
|
|
||||||
"paddusb %%xmm1,%%xmm0 \n"
|
|
||||||
"sub $0x4,%3 \n"
|
|
||||||
"movdqa %%xmm0,0x10(%2) \n"
|
|
||||||
"lea 0x20(%2),%2 \n"
|
|
||||||
"jge 41b \n"
|
"jge 41b \n"
|
||||||
|
|
||||||
"49: \n"
|
"49: \n"
|
||||||
|
|||||||
@ -2474,54 +2474,31 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
|||||||
add ecx, 1 - 4
|
add ecx, 1 - 4
|
||||||
jl convertloop4b
|
jl convertloop4b
|
||||||
|
|
||||||
// 8 pixel loop.
|
// 4 pixel loop.
|
||||||
align 4
|
|
||||||
convertloop4:
|
convertloop4:
|
||||||
movdqu xmm3, [eax]
|
movdqu xmm3, [eax] // src argb
|
||||||
|
lea eax, [eax + 16]
|
||||||
movdqa xmm0, xmm3 // src argb
|
movdqa xmm0, xmm3 // src argb
|
||||||
pxor xmm3, xmm4 // ~alpha
|
pxor xmm3, xmm4 // ~alpha
|
||||||
|
movdqu xmm2, [esi] // _r_b
|
||||||
psrlw xmm3, 8 // alpha
|
psrlw xmm3, 8 // alpha
|
||||||
pshufhw xmm3, xmm3,0F5h // 8 alpha words
|
pshufhw xmm3, xmm3,0F5h // 8 alpha words
|
||||||
pshuflw xmm3, xmm3,0F5h
|
pshuflw xmm3, xmm3,0F5h
|
||||||
movdqu xmm2, [esi] // _r_b
|
|
||||||
pand xmm2, xmm6 // _r_b
|
pand xmm2, xmm6 // _r_b
|
||||||
paddw xmm3, xmm7 // 256 - alpha
|
paddw xmm3, xmm7 // 256 - alpha
|
||||||
pmullw xmm2, xmm3 // _r_b * alpha
|
pmullw xmm2, xmm3 // _r_b * alpha
|
||||||
movdqu xmm1, [esi] // _a_g
|
movdqu xmm1, [esi] // _a_g
|
||||||
|
lea esi, [esi + 16]
|
||||||
psrlw xmm1, 8 // _a_g
|
psrlw xmm1, 8 // _a_g
|
||||||
por xmm0, xmm4 // set alpha to 255
|
por xmm0, xmm4 // set alpha to 255
|
||||||
pmullw xmm1, xmm3 // _a_g * alpha
|
pmullw xmm1, xmm3 // _a_g * alpha
|
||||||
movdqu xmm3, [eax + 16]
|
|
||||||
lea eax, [eax + 32]
|
|
||||||
psrlw xmm2, 8 // _r_b convert to 8 bits again
|
psrlw xmm2, 8 // _r_b convert to 8 bits again
|
||||||
paddusb xmm0, xmm2 // + src argb
|
paddusb xmm0, xmm2 // + src argb
|
||||||
pand xmm1, xmm5 // a_g_ convert to 8 bits again
|
pand xmm1, xmm5 // a_g_ convert to 8 bits again
|
||||||
paddusb xmm0, xmm1 // + src argb
|
paddusb xmm0, xmm1 // + src argb
|
||||||
sub ecx, 4
|
sub ecx, 4
|
||||||
movdqa [edx], xmm0
|
movdqa [edx], xmm0
|
||||||
jl convertloop4b
|
lea edx, [edx + 16]
|
||||||
|
|
||||||
movdqa xmm0, xmm3 // src argb
|
|
||||||
pxor xmm3, xmm4 // ~alpha
|
|
||||||
movdqu xmm2, [esi + 16] // _r_b
|
|
||||||
psrlw xmm3, 8 // alpha
|
|
||||||
pshufhw xmm3, xmm3,0F5h // 8 alpha words
|
|
||||||
pshuflw xmm3, xmm3,0F5h
|
|
||||||
pand xmm2, xmm6 // _r_b
|
|
||||||
paddw xmm3, xmm7 // 256 - alpha
|
|
||||||
pmullw xmm2, xmm3 // _r_b * alpha
|
|
||||||
movdqu xmm1, [esi + 16] // _a_g
|
|
||||||
lea esi, [esi + 32]
|
|
||||||
psrlw xmm1, 8 // _a_g
|
|
||||||
por xmm0, xmm4 // set alpha to 255
|
|
||||||
pmullw xmm1, xmm3 // _a_g * alpha
|
|
||||||
psrlw xmm2, 8 // _r_b convert to 8 bits again
|
|
||||||
paddusb xmm0, xmm2 // + src argb
|
|
||||||
pand xmm1, xmm5 // a_g_ convert to 8 bits again
|
|
||||||
paddusb xmm0, xmm1 // + src argb
|
|
||||||
sub ecx, 4
|
|
||||||
movdqa [edx + 16], xmm0
|
|
||||||
lea edx, [edx + 32]
|
|
||||||
jge convertloop4
|
jge convertloop4
|
||||||
|
|
||||||
convertloop4b:
|
convertloop4b:
|
||||||
@ -2530,7 +2507,7 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
|||||||
|
|
||||||
// 1 pixel loop.
|
// 1 pixel loop.
|
||||||
convertloop1:
|
convertloop1:
|
||||||
movd xmm3, [eax]
|
movd xmm3, [eax] // src argb
|
||||||
lea eax, [eax + 4]
|
lea eax, [eax + 4]
|
||||||
movdqa xmm0, xmm3 // src argb
|
movdqa xmm0, xmm3 // src argb
|
||||||
pxor xmm3, xmm4 // ~alpha
|
pxor xmm3, xmm4 // ~alpha
|
||||||
@ -2629,50 +2606,29 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
|
|||||||
add ecx, 1 - 4
|
add ecx, 1 - 4
|
||||||
jl convertloop4b
|
jl convertloop4b
|
||||||
|
|
||||||
// 8 pixel loop.
|
// 4 pixel loop.
|
||||||
align 4
|
|
||||||
convertloop4:
|
convertloop4:
|
||||||
movdqu xmm3, [eax]
|
movdqu xmm3, [eax] // src argb
|
||||||
|
lea eax, [eax + 16]
|
||||||
movdqa xmm0, xmm3 // src argb
|
movdqa xmm0, xmm3 // src argb
|
||||||
pxor xmm3, xmm4 // ~alpha
|
pxor xmm3, xmm4 // ~alpha
|
||||||
pshufb xmm3, kShuffleAlpha // alpha
|
|
||||||
movdqu xmm2, [esi] // _r_b
|
movdqu xmm2, [esi] // _r_b
|
||||||
|
pshufb xmm3, kShuffleAlpha // alpha
|
||||||
pand xmm2, xmm6 // _r_b
|
pand xmm2, xmm6 // _r_b
|
||||||
paddw xmm3, xmm7 // 256 - alpha
|
paddw xmm3, xmm7 // 256 - alpha
|
||||||
pmullw xmm2, xmm3 // _r_b * alpha
|
pmullw xmm2, xmm3 // _r_b * alpha
|
||||||
movdqu xmm1, [esi] // _a_g
|
movdqu xmm1, [esi] // _a_g
|
||||||
|
lea esi, [esi + 16]
|
||||||
psrlw xmm1, 8 // _a_g
|
psrlw xmm1, 8 // _a_g
|
||||||
por xmm0, xmm4 // set alpha to 255
|
por xmm0, xmm4 // set alpha to 255
|
||||||
pmullw xmm1, xmm3 // _a_g * alpha
|
pmullw xmm1, xmm3 // _a_g * alpha
|
||||||
movdqu xmm3, [eax + 16]
|
|
||||||
lea eax, [eax + 32]
|
|
||||||
psrlw xmm2, 8 // _r_b convert to 8 bits again
|
psrlw xmm2, 8 // _r_b convert to 8 bits again
|
||||||
paddusb xmm0, xmm2 // + src argb
|
paddusb xmm0, xmm2 // + src argb
|
||||||
pand xmm1, xmm5 // a_g_ convert to 8 bits again
|
pand xmm1, xmm5 // a_g_ convert to 8 bits again
|
||||||
paddusb xmm0, xmm1 // + src argb
|
paddusb xmm0, xmm1 // + src argb
|
||||||
sub ecx, 4
|
sub ecx, 4
|
||||||
movdqa [edx], xmm0
|
movdqa [edx], xmm0
|
||||||
jl convertloop4b
|
lea edx, [edx + 16]
|
||||||
|
|
||||||
movdqa xmm0, xmm3 // src argb
|
|
||||||
pxor xmm3, xmm4 // ~alpha
|
|
||||||
movdqu xmm2, [esi + 16] // _r_b
|
|
||||||
pshufb xmm3, kShuffleAlpha // alpha
|
|
||||||
pand xmm2, xmm6 // _r_b
|
|
||||||
paddw xmm3, xmm7 // 256 - alpha
|
|
||||||
pmullw xmm2, xmm3 // _r_b * alpha
|
|
||||||
movdqu xmm1, [esi + 16] // _a_g
|
|
||||||
lea esi, [esi + 32]
|
|
||||||
psrlw xmm1, 8 // _a_g
|
|
||||||
por xmm0, xmm4 // set alpha to 255
|
|
||||||
pmullw xmm1, xmm3 // _a_g * alpha
|
|
||||||
psrlw xmm2, 8 // _r_b convert to 8 bits again
|
|
||||||
paddusb xmm0, xmm2 // + src argb
|
|
||||||
pand xmm1, xmm5 // a_g_ convert to 8 bits again
|
|
||||||
paddusb xmm0, xmm1 // + src argb
|
|
||||||
sub ecx, 4
|
|
||||||
movdqa [edx + 16], xmm0
|
|
||||||
lea edx, [edx + 32]
|
|
||||||
jge convertloop4
|
jge convertloop4
|
||||||
|
|
||||||
convertloop4b:
|
convertloop4b:
|
||||||
@ -2681,7 +2637,7 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
|
|||||||
|
|
||||||
// 1 pixel loop.
|
// 1 pixel loop.
|
||||||
convertloop1:
|
convertloop1:
|
||||||
movd xmm3, [eax]
|
movd xmm3, [eax] // src argb
|
||||||
lea eax, [eax + 4]
|
lea eax, [eax + 4]
|
||||||
movdqa xmm0, xmm3 // src argb
|
movdqa xmm0, xmm3 // src argb
|
||||||
pxor xmm3, xmm4 // ~alpha
|
pxor xmm3, xmm4 // ~alpha
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user