mirror of
https://chromium.googlesource.com/libyuv/libyuv
synced 2025-12-11 22:19:56 +08:00
Switch to c style casts for all source and includes.
BUG=303 TESTED=try R=tpsiaki@google.com Review URL: https://webrtc-codereview.appspot.com/6629004 git-svn-id: http://libyuv.googlecode.com/svn/trunk@952 16f28f9a-4ce2-e073-06de-1de4eb20be90
This commit is contained in:
parent
0ba7b2394b
commit
a1f5254a95
@ -1,6 +1,6 @@
|
||||
Name: libyuv
|
||||
URL: http://code.google.com/p/libyuv/
|
||||
Version: 951
|
||||
Version: 952
|
||||
License: BSD
|
||||
License File: LICENSE
|
||||
|
||||
|
||||
@ -75,9 +75,13 @@ typedef signed char int8;
|
||||
#endif
|
||||
|
||||
#ifndef ALIGNP
|
||||
#ifdef __cplusplus
|
||||
#define ALIGNP(p, t) \
|
||||
(reinterpret_cast<uint8*>(((reinterpret_cast<uintptr_t>(p) + \
|
||||
((t) - 1)) & ~((t) - 1))))
|
||||
#else
|
||||
#define ALIGNP(p, t) ((uint8*)((((uintptr_t)(p) + ((t) - 1)) & ~((t) - 1))))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(LIBYUV_API)
|
||||
|
||||
@ -22,18 +22,26 @@ extern "C" {
|
||||
|
||||
#define IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1)))
|
||||
|
||||
// TODO(fbarchard): Port to C.
|
||||
#ifdef __cplusplus
|
||||
#define align_buffer_64(var, size) \
|
||||
uint8* var; \
|
||||
uint8* var##_mem; \
|
||||
var##_mem = reinterpret_cast<uint8*>(malloc((size) + 63)); \
|
||||
var = reinterpret_cast<uint8*> \
|
||||
((reinterpret_cast<intptr_t>(var##_mem) + 63) & ~63)
|
||||
#else
|
||||
#define align_buffer_64(var, size) \
|
||||
uint8* var; \
|
||||
uint8* var##_mem; \
|
||||
var##_mem = (uint8*)(malloc((size) + 63)); \
|
||||
var = (uint8*) (((intptr_t)(var##_mem) + 63) & ~63)
|
||||
#endif
|
||||
|
||||
#define free_aligned_buffer_64(var) \
|
||||
free(var##_mem); \
|
||||
var = 0
|
||||
|
||||
|
||||
#if defined(__CLR_VER) || defined(COVERAGE_ENABLED) || \
|
||||
defined(TARGET_IPHONE_SIMULATOR)
|
||||
#define LIBYUV_DISABLE_X86
|
||||
|
||||
@ -11,6 +11,6 @@
|
||||
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
|
||||
#define INCLUDE_LIBYUV_VERSION_H_
|
||||
|
||||
#define LIBYUV_VERSION 951
|
||||
#define LIBYUV_VERSION 952
|
||||
|
||||
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
|
||||
|
||||
@ -59,18 +59,18 @@ uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed) {
|
||||
#endif
|
||||
|
||||
const int kBlockSize = 1 << 15; // 32768;
|
||||
while (count >= static_cast<uint64>(kBlockSize)) {
|
||||
while (count >= (uint64)(kBlockSize)) {
|
||||
seed = HashDjb2_SSE(src, kBlockSize, seed);
|
||||
src += kBlockSize;
|
||||
count -= kBlockSize;
|
||||
}
|
||||
int remainder = static_cast<int>(count) & ~15;
|
||||
int remainder = (int)(count) & ~15;
|
||||
if (remainder) {
|
||||
seed = HashDjb2_SSE(src, remainder, seed);
|
||||
src += remainder;
|
||||
count -= remainder;
|
||||
}
|
||||
remainder = static_cast<int>(count) & 15;
|
||||
remainder = (int)(count) & 15;
|
||||
if (remainder) {
|
||||
seed = HashDjb2_C(src, remainder, seed);
|
||||
}
|
||||
@ -168,7 +168,7 @@ LIBYUV_API
|
||||
double SumSquareErrorToPsnr(uint64 sse, uint64 count) {
|
||||
double psnr;
|
||||
if (sse > 0) {
|
||||
double mse = static_cast<double>(count) / static_cast<double>(sse);
|
||||
double mse = (double)(count) / (double)(sse);
|
||||
psnr = 10.0 * log10(255.0 * 255.0 * mse);
|
||||
} else {
|
||||
psnr = kMaxPsnr; // Limit to prevent divide by 0
|
||||
|
||||
@ -19,7 +19,7 @@ uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count) {
|
||||
uint32 sse = 0u;
|
||||
for (int i = 0; i < count; ++i) {
|
||||
int diff = src_a[i] - src_b[i];
|
||||
sse += static_cast<uint32>(diff * diff);
|
||||
sse += (uint32)(diff * diff);
|
||||
}
|
||||
return sse;
|
||||
}
|
||||
|
||||
@ -368,7 +368,7 @@ int BGRAToARGB(const uint8* src_bgra, int src_stride_bgra,
|
||||
int width, int height) {
|
||||
return ARGBShuffle(src_bgra, src_stride_bgra,
|
||||
dst_argb, dst_stride_argb,
|
||||
reinterpret_cast<const uint8*>(&kShuffleMaskBGRAToARGB),
|
||||
(const uint8*)(&kShuffleMaskBGRAToARGB),
|
||||
width, height);
|
||||
}
|
||||
|
||||
@ -379,7 +379,7 @@ int ABGRToARGB(const uint8* src_abgr, int src_stride_abgr,
|
||||
int width, int height) {
|
||||
return ARGBShuffle(src_abgr, src_stride_abgr,
|
||||
dst_argb, dst_stride_argb,
|
||||
reinterpret_cast<const uint8*>(&kShuffleMaskABGRToARGB),
|
||||
(const uint8*)(&kShuffleMaskABGRToARGB),
|
||||
width, height);
|
||||
}
|
||||
|
||||
@ -390,7 +390,7 @@ int RGBAToARGB(const uint8* src_rgba, int src_stride_rgba,
|
||||
int width, int height) {
|
||||
return ARGBShuffle(src_rgba, src_stride_rgba,
|
||||
dst_argb, dst_stride_argb,
|
||||
reinterpret_cast<const uint8*>(&kShuffleMaskRGBAToARGB),
|
||||
(const uint8*)(&kShuffleMaskRGBAToARGB),
|
||||
width, height);
|
||||
}
|
||||
|
||||
|
||||
@ -717,7 +717,7 @@ int ARGBToRGBA(const uint8* src_argb, int src_stride_argb,
|
||||
int width, int height) {
|
||||
return ARGBShuffle(src_argb, src_stride_argb,
|
||||
dst_rgba, dst_stride_rgba,
|
||||
reinterpret_cast<const uint8*>(&kShuffleMaskARGBToRGBA),
|
||||
(const uint8*)(&kShuffleMaskARGBToRGBA),
|
||||
width, height);
|
||||
}
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ static void JpegCopyI420(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
I420Buffers* dest = static_cast<I420Buffers*>(opaque);
|
||||
I420Buffers* dest = (I420Buffers*)(opaque);
|
||||
I420Copy(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -53,7 +53,7 @@ static void JpegI422ToI420(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
I420Buffers* dest = static_cast<I420Buffers*>(opaque);
|
||||
I420Buffers* dest = (I420Buffers*)(opaque);
|
||||
I422ToI420(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -71,7 +71,7 @@ static void JpegI444ToI420(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
I420Buffers* dest = static_cast<I420Buffers*>(opaque);
|
||||
I420Buffers* dest = (I420Buffers*)(opaque);
|
||||
I444ToI420(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -89,7 +89,7 @@ static void JpegI411ToI420(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
I420Buffers* dest = static_cast<I420Buffers*>(opaque);
|
||||
I420Buffers* dest = (I420Buffers*)(opaque);
|
||||
I411ToI420(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -107,7 +107,7 @@ static void JpegI400ToI420(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
I420Buffers* dest = static_cast<I420Buffers*>(opaque);
|
||||
I420Buffers* dest = (I420Buffers*)(opaque);
|
||||
I400ToI420(data[0], strides[0],
|
||||
dest->y, dest->y_stride,
|
||||
dest->u, dest->u_stride,
|
||||
@ -233,7 +233,7 @@ static void JpegI420ToARGB(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
ARGBBuffers* dest = static_cast<ARGBBuffers*>(opaque);
|
||||
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
|
||||
I420ToARGB(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -247,7 +247,7 @@ static void JpegI422ToARGB(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
ARGBBuffers* dest = static_cast<ARGBBuffers*>(opaque);
|
||||
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
|
||||
I422ToARGB(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -261,7 +261,7 @@ static void JpegI444ToARGB(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
ARGBBuffers* dest = static_cast<ARGBBuffers*>(opaque);
|
||||
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
|
||||
I444ToARGB(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -275,7 +275,7 @@ static void JpegI411ToARGB(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
ARGBBuffers* dest = static_cast<ARGBBuffers*>(opaque);
|
||||
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
|
||||
I411ToARGB(data[0], strides[0],
|
||||
data[1], strides[1],
|
||||
data[2], strides[2],
|
||||
@ -289,7 +289,7 @@ static void JpegI400ToARGB(void* opaque,
|
||||
const uint8* const* data,
|
||||
const int* strides,
|
||||
int rows) {
|
||||
ARGBBuffers* dest = static_cast<ARGBBuffers*>(opaque);
|
||||
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
|
||||
I400ToARGB(data[0], strides[0],
|
||||
dest->argb, dest->argb_stride,
|
||||
dest->w, rows);
|
||||
|
||||
@ -22,10 +22,10 @@ extern "C" {
|
||||
|
||||
// generate a selector mask useful for pshufb
|
||||
static uint32 GenerateSelector(int select0, int select1) {
|
||||
return static_cast<uint32>(select0) |
|
||||
static_cast<uint32>((select1 + 4) << 8) |
|
||||
static_cast<uint32>((select0 + 8) << 16) |
|
||||
static_cast<uint32>((select1 + 12) << 24);
|
||||
return (uint32)(select0) |
|
||||
(uint32)((select1 + 4) << 8) |
|
||||
(uint32)((select0 + 8) << 16) |
|
||||
(uint32)((select1 + 12) << 24);
|
||||
}
|
||||
|
||||
static int MakeSelectors(const int blue_index,
|
||||
|
||||
@ -81,7 +81,7 @@ bool MJpegDecoder::LoadFrame(const uint8* src, size_t src_len) {
|
||||
}
|
||||
|
||||
buf_.data = src;
|
||||
buf_.len = static_cast<int>(src_len);
|
||||
buf_.len = (int)(src_len);
|
||||
buf_vec_.pos = 0;
|
||||
decompress_struct_->client_data = &buf_vec_;
|
||||
#ifdef HAVE_SETJMP
|
||||
@ -391,7 +391,7 @@ void MJpegDecoder::init_source(j_decompress_ptr cinfo) {
|
||||
}
|
||||
|
||||
boolean MJpegDecoder::fill_input_buffer(j_decompress_ptr cinfo) {
|
||||
BufferVector* buf_vec = static_cast<BufferVector*>(cinfo->client_data);
|
||||
BufferVector* buf_vec = (BufferVector*)(cinfo->client_data);
|
||||
if (buf_vec->pos >= buf_vec->len) {
|
||||
assert(0 && "No more data");
|
||||
// ERROR: No more data
|
||||
@ -427,7 +427,7 @@ void MJpegDecoder::ErrorHandler(j_common_ptr cinfo) {
|
||||
// ERROR: Error in jpeglib: buf
|
||||
#endif
|
||||
|
||||
SetJmpErrorMgr* mgr = reinterpret_cast<SetJmpErrorMgr*>(cinfo->err);
|
||||
SetJmpErrorMgr* mgr = (SetJmpErrorMgr*)(cinfo->err);
|
||||
// This rewinds the call stack to the point of the corresponding setjmp()
|
||||
// and causes it to return (for a second time) with value 1.
|
||||
longjmp(mgr->setjmp_buffer, 1);
|
||||
@ -507,7 +507,7 @@ void MJpegDecoder::SetScanlinePointers(uint8** data) {
|
||||
}
|
||||
|
||||
inline bool MJpegDecoder::DecodeImcuRow() {
|
||||
return static_cast<unsigned int>(GetImageScanlinesPerImcuRow()) ==
|
||||
return (unsigned int)(GetImageScanlinesPerImcuRow()) ==
|
||||
jpeg_read_raw_data(decompress_struct_,
|
||||
scanlines_,
|
||||
GetImageScanlinesPerImcuRow());
|
||||
|
||||
@ -1399,7 +1399,7 @@ int RGBColorMatrix(uint8* dst_argb, int dst_stride_argb,
|
||||
matrix_argb[15] = 64; // 1.0
|
||||
|
||||
uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
|
||||
return ARGBColorMatrix(const_cast<const uint8*>(dst), dst_stride_argb,
|
||||
return ARGBColorMatrix((const uint8*)(dst), dst_stride_argb,
|
||||
dst, dst_stride_argb,
|
||||
&matrix_argb[0], width, height);
|
||||
}
|
||||
|
||||
@ -377,8 +377,8 @@ static void TransposeWx8_SSSE3(const uint8* src, int src_stride,
|
||||
: "+r"(src), // %0
|
||||
"+r"(dst), // %1
|
||||
"+r"(width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)), // %3
|
||||
"r"(static_cast<intptr_t>(dst_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)), // %3
|
||||
"r"((intptr_t)(dst_stride)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__SSE2__)
|
||||
, "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
||||
@ -648,8 +648,8 @@ static void TransposeWx8_FAST_SSSE3(const uint8* src, int src_stride,
|
||||
: "+r"(src), // %0
|
||||
"+r"(dst), // %1
|
||||
"+r"(width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)), // %3
|
||||
"r"(static_cast<intptr_t>(dst_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)), // %3
|
||||
"r"((intptr_t)(dst_stride)) // %4
|
||||
: "memory", "cc",
|
||||
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
|
||||
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
|
||||
@ -758,9 +758,9 @@ static void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
|
||||
"+r"(dst_a), // %1
|
||||
"+r"(dst_b), // %2
|
||||
"+r"(w) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride)), // %4
|
||||
"r"(static_cast<intptr_t>(dst_stride_a)), // %5
|
||||
"r"(static_cast<intptr_t>(dst_stride_b)) // %6
|
||||
: "r"((intptr_t)(src_stride)), // %4
|
||||
"r"((intptr_t)(dst_stride_a)), // %5
|
||||
"r"((intptr_t)(dst_stride_b)) // %6
|
||||
: "memory", "cc",
|
||||
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
|
||||
"xmm8", "xmm9"
|
||||
|
||||
@ -33,7 +33,7 @@ static __inline int32 clamp255(int32 v) {
|
||||
|
||||
static __inline uint32 Clamp(int32 val) {
|
||||
int v = clamp0(val);
|
||||
return static_cast<uint32>(clamp255(v));
|
||||
return (uint32)(clamp255(v));
|
||||
}
|
||||
|
||||
static __inline uint32 Abs(int32 v) {
|
||||
@ -51,7 +51,7 @@ static __inline int32 clamp255(int32 v) {
|
||||
|
||||
static __inline uint32 Clamp(int32 val) {
|
||||
int v = clamp0(val);
|
||||
return static_cast<uint32>(clamp255(v));
|
||||
return (uint32)(clamp255(v));
|
||||
}
|
||||
|
||||
static __inline uint32 Abs(int32 v) {
|
||||
@ -60,7 +60,7 @@ static __inline uint32 Abs(int32 v) {
|
||||
#endif // USE_BRANCHLESS
|
||||
|
||||
#ifdef LIBYUV_LITTLE_ENDIAN
|
||||
#define WRITEWORD(p, v) *reinterpret_cast<uint32*>(p) = v
|
||||
#define WRITEWORD(p, v) *(uint32*)(p) = v
|
||||
#else
|
||||
static inline void WRITEWORD(uint8* p, uint32 v) {
|
||||
p[0] = (uint8)(v & 255);
|
||||
@ -187,7 +187,7 @@ void ARGBToRGB565Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
|
||||
uint8 b0 = src_argb[0] >> 3;
|
||||
uint8 g0 = src_argb[1] >> 2;
|
||||
uint8 r0 = src_argb[2] >> 3;
|
||||
*reinterpret_cast<uint16*>(dst_rgb) = b0 | (g0 << 5) | (r0 << 11);
|
||||
*(uint16*)(dst_rgb) = b0 | (g0 << 5) | (r0 << 11);
|
||||
}
|
||||
}
|
||||
|
||||
@ -201,7 +201,7 @@ void ARGBToARGB1555Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
|
||||
uint8 g1 = src_argb[5] >> 3;
|
||||
uint8 r1 = src_argb[6] >> 3;
|
||||
uint8 a1 = src_argb[7] >> 7;
|
||||
*reinterpret_cast<uint32*>(dst_rgb) =
|
||||
*(uint32*)(dst_rgb) =
|
||||
b0 | (g0 << 5) | (r0 << 10) | (a0 << 15) |
|
||||
(b1 << 16) | (g1 << 21) | (r1 << 26) | (a1 << 31);
|
||||
dst_rgb += 4;
|
||||
@ -212,7 +212,7 @@ void ARGBToARGB1555Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
|
||||
uint8 g0 = src_argb[1] >> 3;
|
||||
uint8 r0 = src_argb[2] >> 3;
|
||||
uint8 a0 = src_argb[3] >> 7;
|
||||
*reinterpret_cast<uint16*>(dst_rgb) =
|
||||
*(uint16*)(dst_rgb) =
|
||||
b0 | (g0 << 5) | (r0 << 10) | (a0 << 15);
|
||||
}
|
||||
}
|
||||
@ -227,7 +227,7 @@ void ARGBToARGB4444Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
|
||||
uint8 g1 = src_argb[5] >> 4;
|
||||
uint8 r1 = src_argb[6] >> 4;
|
||||
uint8 a1 = src_argb[7] >> 4;
|
||||
*reinterpret_cast<uint32*>(dst_rgb) =
|
||||
*(uint32*)(dst_rgb) =
|
||||
b0 | (g0 << 4) | (r0 << 8) | (a0 << 12) |
|
||||
(b1 << 16) | (g1 << 20) | (r1 << 24) | (a1 << 28);
|
||||
dst_rgb += 4;
|
||||
@ -238,7 +238,7 @@ void ARGBToARGB4444Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
|
||||
uint8 g0 = src_argb[1] >> 4;
|
||||
uint8 r0 = src_argb[2] >> 4;
|
||||
uint8 a0 = src_argb[3] >> 4;
|
||||
*reinterpret_cast<uint16*>(dst_rgb) =
|
||||
*(uint16*)(dst_rgb) =
|
||||
b0 | (g0 << 4) | (r0 << 8) | (a0 << 12);
|
||||
}
|
||||
}
|
||||
@ -829,7 +829,7 @@ void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2,
|
||||
int b_diff = b - b_sub;
|
||||
int c_diff = c - c_sub;
|
||||
int sobel = Abs(a_diff + b_diff * 2 + c_diff);
|
||||
dst_sobelx[i] = static_cast<uint8>(clamp255(sobel));
|
||||
dst_sobelx[i] = (uint8)(clamp255(sobel));
|
||||
}
|
||||
}
|
||||
|
||||
@ -846,7 +846,7 @@ void SobelYRow_C(const uint8* src_y0, const uint8* src_y1,
|
||||
int b_diff = b - b_sub;
|
||||
int c_diff = c - c_sub;
|
||||
int sobel = Abs(a_diff + b_diff * 2 + c_diff);
|
||||
dst_sobely[i] = static_cast<uint8>(clamp255(sobel));
|
||||
dst_sobely[i] = (uint8)(clamp255(sobel));
|
||||
}
|
||||
}
|
||||
|
||||
@ -856,10 +856,10 @@ void SobelRow_C(const uint8* src_sobelx, const uint8* src_sobely,
|
||||
int r = src_sobelx[i];
|
||||
int b = src_sobely[i];
|
||||
int s = clamp255(r + b);
|
||||
dst_argb[0] = static_cast<uint8>(s);
|
||||
dst_argb[1] = static_cast<uint8>(s);
|
||||
dst_argb[2] = static_cast<uint8>(s);
|
||||
dst_argb[3] = static_cast<uint8>(255u);
|
||||
dst_argb[0] = (uint8)(s);
|
||||
dst_argb[1] = (uint8)(s);
|
||||
dst_argb[2] = (uint8)(s);
|
||||
dst_argb[3] = (uint8)(255u);
|
||||
dst_argb += 4;
|
||||
}
|
||||
}
|
||||
@ -870,7 +870,7 @@ void SobelToPlaneRow_C(const uint8* src_sobelx, const uint8* src_sobely,
|
||||
int r = src_sobelx[i];
|
||||
int b = src_sobely[i];
|
||||
int s = clamp255(r + b);
|
||||
dst_y[i] = static_cast<uint8>(s);
|
||||
dst_y[i] = (uint8)(s);
|
||||
}
|
||||
}
|
||||
|
||||
@ -880,10 +880,10 @@ void SobelXYRow_C(const uint8* src_sobelx, const uint8* src_sobely,
|
||||
int r = src_sobelx[i];
|
||||
int b = src_sobely[i];
|
||||
int g = clamp255(r + b);
|
||||
dst_argb[0] = static_cast<uint8>(b);
|
||||
dst_argb[1] = static_cast<uint8>(g);
|
||||
dst_argb[2] = static_cast<uint8>(r);
|
||||
dst_argb[3] = static_cast<uint8>(255u);
|
||||
dst_argb[0] = (uint8)(b);
|
||||
dst_argb[1] = (uint8)(g);
|
||||
dst_argb[2] = (uint8)(r);
|
||||
dst_argb[3] = (uint8)(255u);
|
||||
dst_argb += 4;
|
||||
}
|
||||
}
|
||||
@ -901,15 +901,15 @@ void I400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width) {
|
||||
|
||||
// C reference code that mimics the YUV assembly.
|
||||
|
||||
#define YG 74 /* static_cast<int8>(1.164 * 64 + 0.5) */
|
||||
#define YG 74 /* (int8)(1.164 * 64 + 0.5) */
|
||||
|
||||
#define UB 127 /* min(63,static_cast<int8>(2.018 * 64)) */
|
||||
#define UG -25 /* static_cast<int8>(-0.391 * 64 - 0.5) */
|
||||
#define UB 127 /* min(63,(int8)(2.018 * 64)) */
|
||||
#define UG -25 /* (int8)(-0.391 * 64 - 0.5) */
|
||||
#define UR 0
|
||||
|
||||
#define VB 0
|
||||
#define VG -52 /* static_cast<int8>(-0.813 * 64 - 0.5) */
|
||||
#define VR 102 /* static_cast<int8>(1.596 * 64 + 0.5) */
|
||||
#define VG -52 /* (int8)(-0.813 * 64 - 0.5) */
|
||||
#define VR 102 /* (int8)(1.596 * 64 + 0.5) */
|
||||
|
||||
// Bias
|
||||
#define BB UB * 128 + VB * 128
|
||||
@ -918,10 +918,10 @@ void I400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width) {
|
||||
|
||||
static __inline void YuvPixel(uint8 y, uint8 u, uint8 v,
|
||||
uint8* b, uint8* g, uint8* r) {
|
||||
int32 y1 = (static_cast<int32>(y) - 16) * YG;
|
||||
*b = Clamp(static_cast<int32>((u * UB + v * VB) - (BB) + y1) >> 6);
|
||||
*g = Clamp(static_cast<int32>((u * UG + v * VG) - (BG) + y1) >> 6);
|
||||
*r = Clamp(static_cast<int32>((u * UR + v * VR) - (BR) + y1) >> 6);
|
||||
int32 y1 = ((int32)(y) - 16) * YG;
|
||||
*b = Clamp((int32)((u * UB + v * VB) - (BB) + y1) >> 6);
|
||||
*g = Clamp((int32)((u * UG + v * VG) - (BG) + y1) >> 6);
|
||||
*r = Clamp((int32)((u * UR + v * VR) - (BR) + y1) >> 6);
|
||||
}
|
||||
|
||||
#if !defined(LIBYUV_DISABLE_NEON) && \
|
||||
@ -1054,7 +1054,7 @@ void I422ToARGB4444Row_C(const uint8* src_y,
|
||||
b1 = b1 >> 4;
|
||||
g1 = g1 >> 4;
|
||||
r1 = r1 >> 4;
|
||||
*reinterpret_cast<uint32*>(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) |
|
||||
*(uint32*)(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) |
|
||||
(b1 << 16) | (g1 << 20) | (r1 << 24) | 0xf000f000;
|
||||
src_y += 2;
|
||||
src_u += 1;
|
||||
@ -1066,7 +1066,7 @@ void I422ToARGB4444Row_C(const uint8* src_y,
|
||||
b0 = b0 >> 4;
|
||||
g0 = g0 >> 4;
|
||||
r0 = r0 >> 4;
|
||||
*reinterpret_cast<uint16*>(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) |
|
||||
*(uint16*)(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) |
|
||||
0xf000;
|
||||
}
|
||||
}
|
||||
@ -1091,7 +1091,7 @@ void I422ToARGB1555Row_C(const uint8* src_y,
|
||||
b1 = b1 >> 3;
|
||||
g1 = g1 >> 3;
|
||||
r1 = r1 >> 3;
|
||||
*reinterpret_cast<uint32*>(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) |
|
||||
*(uint32*)(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) |
|
||||
(b1 << 16) | (g1 << 21) | (r1 << 26) | 0x80008000;
|
||||
src_y += 2;
|
||||
src_u += 1;
|
||||
@ -1103,7 +1103,7 @@ void I422ToARGB1555Row_C(const uint8* src_y,
|
||||
b0 = b0 >> 3;
|
||||
g0 = g0 >> 3;
|
||||
r0 = r0 >> 3;
|
||||
*reinterpret_cast<uint16*>(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) |
|
||||
*(uint16*)(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) |
|
||||
0x8000;
|
||||
}
|
||||
}
|
||||
@ -1128,7 +1128,7 @@ void I422ToRGB565Row_C(const uint8* src_y,
|
||||
b1 = b1 >> 3;
|
||||
g1 = g1 >> 2;
|
||||
r1 = r1 >> 3;
|
||||
*reinterpret_cast<uint32*>(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
|
||||
*(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
|
||||
(b1 << 16) | (g1 << 21) | (r1 << 27);
|
||||
src_y += 2;
|
||||
src_u += 1;
|
||||
@ -1140,7 +1140,7 @@ void I422ToRGB565Row_C(const uint8* src_y,
|
||||
b0 = b0 >> 3;
|
||||
g0 = g0 >> 2;
|
||||
r0 = r0 >> 3;
|
||||
*reinterpret_cast<uint16*>(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
|
||||
*(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1249,7 +1249,7 @@ void NV12ToRGB565Row_C(const uint8* src_y,
|
||||
b1 = b1 >> 3;
|
||||
g1 = g1 >> 2;
|
||||
r1 = r1 >> 3;
|
||||
*reinterpret_cast<uint32*>(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
|
||||
*(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
|
||||
(b1 << 16) | (g1 << 21) | (r1 << 27);
|
||||
src_y += 2;
|
||||
usrc_v += 2;
|
||||
@ -1260,7 +1260,7 @@ void NV12ToRGB565Row_C(const uint8* src_y,
|
||||
b0 = b0 >> 3;
|
||||
g0 = g0 >> 2;
|
||||
r0 = r0 >> 3;
|
||||
*reinterpret_cast<uint16*>(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
|
||||
*(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1283,7 +1283,7 @@ void NV21ToRGB565Row_C(const uint8* src_y,
|
||||
b1 = b1 >> 3;
|
||||
g1 = g1 >> 2;
|
||||
r1 = r1 >> 3;
|
||||
*reinterpret_cast<uint32*>(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
|
||||
*(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
|
||||
(b1 << 16) | (g1 << 21) | (r1 << 27);
|
||||
src_y += 2;
|
||||
vsrc_u += 2;
|
||||
@ -1294,7 +1294,7 @@ void NV21ToRGB565Row_C(const uint8* src_y,
|
||||
b0 = b0 >> 3;
|
||||
g0 = g0 >> 2;
|
||||
r0 = r0 >> 3;
|
||||
*reinterpret_cast<uint16*>(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
|
||||
*(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1456,8 +1456,8 @@ void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) {
|
||||
}
|
||||
|
||||
void ARGBMirrorRow_C(const uint8* src, uint8* dst, int width) {
|
||||
const uint32* src32 = reinterpret_cast<const uint32*>(src);
|
||||
uint32* dst32 = reinterpret_cast<uint32*>(dst);
|
||||
const uint32* src32 = (const uint32*)(src);
|
||||
uint32* dst32 = (uint32*)(dst);
|
||||
src32 += width - 1;
|
||||
for (int x = 0; x < width - 1; x += 2) {
|
||||
dst32[x] = src32[0];
|
||||
@ -1516,7 +1516,7 @@ void SetRow_C(uint8* dst, uint32 v8, int count) {
|
||||
void ARGBSetRows_C(uint8* dst, uint32 v32, int width,
|
||||
int dst_stride, int height) {
|
||||
for (int y = 0; y < height; ++y) {
|
||||
uint32* d = reinterpret_cast<uint32*>(dst);
|
||||
uint32* d = (uint32*)(dst);
|
||||
for (int x = 0; x < width; ++x) {
|
||||
d[x] = v32;
|
||||
}
|
||||
@ -1773,10 +1773,10 @@ void CumulativeSumToAverageRow_C(const int32* tl, const int32* bl,
|
||||
int w, int area, uint8* dst, int count) {
|
||||
float ooa = 1.0f / area;
|
||||
for (int i = 0; i < count; ++i) {
|
||||
dst[0] = static_cast<uint8>((bl[w + 0] + tl[0] - bl[0] - tl[w + 0]) * ooa);
|
||||
dst[1] = static_cast<uint8>((bl[w + 1] + tl[1] - bl[1] - tl[w + 1]) * ooa);
|
||||
dst[2] = static_cast<uint8>((bl[w + 2] + tl[2] - bl[2] - tl[w + 2]) * ooa);
|
||||
dst[3] = static_cast<uint8>((bl[w + 3] + tl[3] - bl[3] - tl[w + 3]) * ooa);
|
||||
dst[0] = (uint8)((bl[w + 0] + tl[0] - bl[0] - tl[w + 0]) * ooa);
|
||||
dst[1] = (uint8)((bl[w + 1] + tl[1] - bl[1] - tl[w + 1]) * ooa);
|
||||
dst[2] = (uint8)((bl[w + 2] + tl[2] - bl[2] - tl[w + 2]) * ooa);
|
||||
dst[3] = (uint8)((bl[w + 3] + tl[3] - bl[3] - tl[w + 3]) * ooa);
|
||||
dst += 4;
|
||||
tl += 4;
|
||||
bl += 4;
|
||||
@ -1792,10 +1792,10 @@ void ARGBAffineRow_C(const uint8* src_argb, int src_argb_stride,
|
||||
uv[0] = uv_dudv[0];
|
||||
uv[1] = uv_dudv[1];
|
||||
for (int i = 0; i < width; ++i) {
|
||||
int x = static_cast<int>(uv[0]);
|
||||
int y = static_cast<int>(uv[1]);
|
||||
*reinterpret_cast<uint32*>(dst_argb) =
|
||||
*reinterpret_cast<const uint32*>(src_argb + y * src_argb_stride +
|
||||
int x = (int)(uv[0]);
|
||||
int y = (int)(uv[1]);
|
||||
*(uint32*)(dst_argb) =
|
||||
*(const uint32*)(src_argb + y * src_argb_stride +
|
||||
x * 4);
|
||||
dst_argb += 4;
|
||||
uv[0] += uv_dudv[2];
|
||||
@ -1820,7 +1820,7 @@ void InterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr,
|
||||
return;
|
||||
}
|
||||
if (source_y_fraction == 128) {
|
||||
HalfRow_C(src_ptr, static_cast<int>(src_stride), dst_ptr, width);
|
||||
HalfRow_C(src_ptr, (int)(src_stride), dst_ptr, width);
|
||||
return;
|
||||
}
|
||||
int y1_fraction = source_y_fraction;
|
||||
@ -2060,10 +2060,10 @@ void ARGBPolynomialRow_C(const uint8* src_argb,
|
||||
uint8* dst_argb, const float* poly,
|
||||
int width) {
|
||||
for (int i = 0; i < width; ++i) {
|
||||
float b = static_cast<float>(src_argb[0]);
|
||||
float g = static_cast<float>(src_argb[1]);
|
||||
float r = static_cast<float>(src_argb[2]);
|
||||
float a = static_cast<float>(src_argb[3]);
|
||||
float b = (float)(src_argb[0]);
|
||||
float g = (float)(src_argb[1]);
|
||||
float r = (float)(src_argb[2]);
|
||||
float a = (float)(src_argb[3]);
|
||||
float b2 = b * b;
|
||||
float g2 = g * g;
|
||||
float r2 = r * r;
|
||||
@ -2085,10 +2085,10 @@ void ARGBPolynomialRow_C(const uint8* src_argb,
|
||||
dr += poly[14] * r3;
|
||||
da += poly[15] * a3;
|
||||
|
||||
dst_argb[0] = Clamp(static_cast<int32>(db));
|
||||
dst_argb[1] = Clamp(static_cast<int32>(dg));
|
||||
dst_argb[2] = Clamp(static_cast<int32>(dr));
|
||||
dst_argb[3] = Clamp(static_cast<int32>(da));
|
||||
dst_argb[0] = Clamp((int32)(db));
|
||||
dst_argb[1] = Clamp((int32)(dg));
|
||||
dst_argb[2] = Clamp((int32)(dr));
|
||||
dst_argb[3] = Clamp((int32)(da));
|
||||
src_argb += 4;
|
||||
dst_argb += 4;
|
||||
}
|
||||
|
||||
@ -952,7 +952,7 @@ void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_argb)) // %4
|
||||
: "r"((intptr_t)(src_stride_argb)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1020,7 +1020,7 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_argb)) // %4
|
||||
: "r"((intptr_t)(src_stride_argb)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1090,7 +1090,7 @@ void ARGBToUVRow_Unaligned_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_argb)) // %4
|
||||
: "r"((intptr_t)(src_stride_argb)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1161,7 +1161,7 @@ void ARGBToUVJRow_Unaligned_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_argb))
|
||||
: "r"((intptr_t)(src_stride_argb))
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1553,7 +1553,7 @@ void BGRAToUVRow_SSSE3(const uint8* src_bgra0, int src_stride_bgra,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_bgra)) // %4
|
||||
: "r"((intptr_t)(src_stride_bgra)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1623,7 +1623,7 @@ void BGRAToUVRow_Unaligned_SSSE3(const uint8* src_bgra0, int src_stride_bgra,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_bgra)) // %4
|
||||
: "r"((intptr_t)(src_stride_bgra)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1837,7 +1837,7 @@ void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_abgr)) // %4
|
||||
: "r"((intptr_t)(src_stride_abgr)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1907,7 +1907,7 @@ void ABGRToUVRow_Unaligned_SSSE3(const uint8* src_abgr0, int src_stride_abgr,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_abgr)) // %4
|
||||
: "r"((intptr_t)(src_stride_abgr)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -1973,7 +1973,7 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_rgba))
|
||||
: "r"((intptr_t)(src_stride_rgba))
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -2043,7 +2043,7 @@ void RGBAToUVRow_Unaligned_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+rm"(width) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride_rgba)) // %4
|
||||
: "r"((intptr_t)(src_stride_rgba)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -2056,20 +2056,20 @@ void RGBAToUVRow_Unaligned_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
||||
#endif // HAS_ARGBTOUVROW_SSSE3
|
||||
|
||||
#ifdef HAS_I422TOARGBROW_SSSE3
|
||||
#define UB 127 /* min(63,static_cast<int8>(2.018 * 64)) */
|
||||
#define UG -25 /* static_cast<int8>(-0.391 * 64 - 0.5) */
|
||||
#define UB 127 /* min(63,(int8)(2.018 * 64)) */
|
||||
#define UG -25 /* (int8)(-0.391 * 64 - 0.5) */
|
||||
#define UR 0
|
||||
|
||||
#define VB 0
|
||||
#define VG -52 /* static_cast<int8>(-0.813 * 64 - 0.5) */
|
||||
#define VR 102 /* static_cast<int8>(1.596 * 64 + 0.5) */
|
||||
#define VG -52 /* (int8)(-0.813 * 64 - 0.5) */
|
||||
#define VR 102 /* (int8)(1.596 * 64 + 0.5) */
|
||||
|
||||
// Bias
|
||||
#define BB UB * 128 + VB * 128
|
||||
#define BG UG * 128 + VG * 128
|
||||
#define BR UR * 128 + VR * 128
|
||||
|
||||
#define YG 74 /* static_cast<int8>(1.164 * 64 + 0.5) */
|
||||
#define YG 74 /* (int8)(1.164 * 64 + 0.5) */
|
||||
|
||||
struct {
|
||||
vec8 kUVToB; // 0
|
||||
@ -2964,7 +2964,7 @@ static uvec8 kShuffleMirror = {
|
||||
};
|
||||
|
||||
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
|
||||
intptr_t temp_width = static_cast<intptr_t>(width);
|
||||
intptr_t temp_width = (intptr_t)(width);
|
||||
asm volatile (
|
||||
"movdqa %3,%%xmm5 \n"
|
||||
"lea " MEMLEA(-0x10,0) ",%0 \n"
|
||||
@ -2993,7 +2993,7 @@ void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
|
||||
|
||||
#ifdef HAS_MIRRORROW_SSE2
|
||||
void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
|
||||
intptr_t temp_width = static_cast<intptr_t>(width);
|
||||
intptr_t temp_width = (intptr_t)(width);
|
||||
asm volatile (
|
||||
"lea " MEMLEA(-0x10,0) ",%0 \n"
|
||||
LABELALIGN
|
||||
@ -3032,7 +3032,7 @@ static uvec8 kShuffleMirrorUV = {
|
||||
};
|
||||
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
|
||||
int width) {
|
||||
intptr_t temp_width = static_cast<intptr_t>(width);
|
||||
intptr_t temp_width = (intptr_t)(width);
|
||||
asm volatile (
|
||||
"movdqa %4,%%xmm1 \n"
|
||||
"lea " MEMLEA4(-0x10,0,3,2) ",%0 \n"
|
||||
@ -3071,7 +3071,7 @@ static uvec8 kARGBShuffleMirror = {
|
||||
};
|
||||
|
||||
void ARGBMirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
|
||||
intptr_t temp_width = static_cast<intptr_t>(width);
|
||||
intptr_t temp_width = (intptr_t)(width);
|
||||
asm volatile (
|
||||
"lea " MEMLEA4(-0x10,0,2,4) ",%0 \n"
|
||||
"movdqa %3,%%xmm5 \n"
|
||||
@ -3268,7 +3268,7 @@ void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
|
||||
|
||||
#ifdef HAS_COPYROW_X86
|
||||
void CopyRow_X86(const uint8* src, uint8* dst, int width) {
|
||||
size_t width_tmp = static_cast<size_t>(width);
|
||||
size_t width_tmp = (size_t)(width);
|
||||
asm volatile (
|
||||
"shr $0x2,%2 \n"
|
||||
"rep movsl " MEMMOVESTRING(0,1) " \n"
|
||||
@ -3284,7 +3284,7 @@ void CopyRow_X86(const uint8* src, uint8* dst, int width) {
|
||||
#ifdef HAS_COPYROW_ERMS
|
||||
// Unaligned Multiple of 1.
|
||||
void CopyRow_ERMS(const uint8* src, uint8* dst, int width) {
|
||||
size_t width_tmp = static_cast<size_t>(width);
|
||||
size_t width_tmp = (size_t)(width);
|
||||
asm volatile (
|
||||
"rep movsb " MEMMOVESTRING(0,1) " \n"
|
||||
: "+S"(src), // %0
|
||||
@ -3440,7 +3440,7 @@ void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
|
||||
|
||||
#ifdef HAS_SETROW_X86
|
||||
void SetRow_X86(uint8* dst, uint32 v32, int width) {
|
||||
size_t width_tmp = static_cast<size_t>(width);
|
||||
size_t width_tmp = (size_t)(width);
|
||||
asm volatile (
|
||||
"shr $0x2,%1 \n"
|
||||
"rep stosl " MEMSTORESTRING(eax,0) " \n"
|
||||
@ -3453,8 +3453,8 @@ void SetRow_X86(uint8* dst, uint32 v32, int width) {
|
||||
void ARGBSetRows_X86(uint8* dst, uint32 v32, int width,
|
||||
int dst_stride, int height) {
|
||||
for (int y = 0; y < height; ++y) {
|
||||
size_t width_tmp = static_cast<size_t>(width);
|
||||
uint32* d = reinterpret_cast<uint32*>(dst);
|
||||
size_t width_tmp = (size_t)(width);
|
||||
uint32* d = (uint32*)(dst);
|
||||
asm volatile (
|
||||
"rep stosl " MEMSTORESTRING(eax,0) " \n"
|
||||
: "+D"(d), // %0
|
||||
@ -3528,7 +3528,7 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+r"(pix) // %3
|
||||
: "r"(static_cast<intptr_t>(stride_yuy2)) // %4
|
||||
: "r"((intptr_t)(stride_yuy2)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -3642,7 +3642,7 @@ void YUY2ToUVRow_Unaligned_SSE2(const uint8* src_yuy2,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+r"(pix) // %3
|
||||
: "r"(static_cast<intptr_t>(stride_yuy2)) // %4
|
||||
: "r"((intptr_t)(stride_yuy2)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -3752,7 +3752,7 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+r"(pix) // %3
|
||||
: "r"(static_cast<intptr_t>(stride_uyvy)) // %4
|
||||
: "r"((intptr_t)(stride_uyvy)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -3863,7 +3863,7 @@ void UYVYToUVRow_Unaligned_SSE2(const uint8* src_uyvy, int stride_uyvy,
|
||||
"+r"(dst_u), // %1
|
||||
"+r"(dst_v), // %2
|
||||
"+r"(pix) // %3
|
||||
: "r"(static_cast<intptr_t>(stride_uyvy)) // %4
|
||||
: "r"((intptr_t)(stride_uyvy)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -5217,7 +5217,7 @@ void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
|
||||
"+r"(botleft), // %1
|
||||
"+r"(dst), // %2
|
||||
"+rm"(count) // %3
|
||||
: "r"(static_cast<intptr_t>(width)), // %4
|
||||
: "r"((intptr_t)(width)), // %4
|
||||
"rm"(area) // %5
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
@ -5424,7 +5424,7 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
|
||||
"+r"(src_ptr), // %1
|
||||
"+r"(dst_width), // %2
|
||||
"+r"(source_y_fraction) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -5544,7 +5544,7 @@ void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
|
||||
"+r"(src_ptr), // %1
|
||||
"+r"(dst_width), // %2
|
||||
"+r"(source_y_fraction) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -5656,7 +5656,7 @@ void InterpolateRow_Unaligned_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
|
||||
"+r"(src_ptr), // %1
|
||||
"+r"(dst_width), // %2
|
||||
"+r"(source_y_fraction) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -5776,7 +5776,7 @@ void InterpolateRow_Unaligned_SSE2(uint8* dst_ptr, const uint8* src_ptr,
|
||||
"+r"(src_ptr), // %1
|
||||
"+r"(dst_width), // %2
|
||||
"+r"(source_y_fraction) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -5804,7 +5804,7 @@ void HalfRow_SSE2(const uint8* src_uv, int src_uv_stride,
|
||||
: "+r"(src_uv), // %0
|
||||
"+r"(dst_uv), // %1
|
||||
"+r"(pix) // %2
|
||||
: "r"(static_cast<intptr_t>(src_uv_stride)) // %3
|
||||
: "r"((intptr_t)(src_uv_stride)) // %3
|
||||
: "memory", "cc"
|
||||
#if defined(__SSE2__)
|
||||
, "xmm0"
|
||||
|
||||
@ -2030,15 +2030,15 @@ void RGBAToUVRow_Unaligned_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
||||
}
|
||||
#endif // HAS_ARGBTOYROW_SSSE3
|
||||
|
||||
#define YG 74 /* static_cast<int8>(1.164 * 64 + 0.5) */
|
||||
#define YG 74 /* (int8)(1.164 * 64 + 0.5) */
|
||||
|
||||
#define UB 127 /* min(63,static_cast<int8>(2.018 * 64)) */
|
||||
#define UG -25 /* static_cast<int8>(-0.391 * 64 - 0.5) */
|
||||
#define UB 127 /* min(63,(int8)(2.018 * 64)) */
|
||||
#define UG -25 /* (int8)(-0.391 * 64 - 0.5) */
|
||||
#define UR 0
|
||||
|
||||
#define VB 0
|
||||
#define VG -52 /* static_cast<int8>(-0.813 * 64 - 0.5) */
|
||||
#define VR 102 /* static_cast<int8>(1.596 * 64 + 0.5) */
|
||||
#define VG -52 /* (int8)(-0.813 * 64 - 0.5) */
|
||||
#define VR 102 /* (int8)(1.596 * 64 + 0.5) */
|
||||
|
||||
// Bias
|
||||
#define BB UB * 128 + VB * 128
|
||||
|
||||
@ -434,9 +434,9 @@ static void ScalePlaneBox(int src_width, int src_height,
|
||||
y = (src_height << 16);
|
||||
}
|
||||
int boxheight = (y >> 16) - iy;
|
||||
ScaleAddRows(src, src_stride, reinterpret_cast<uint16*>(row16),
|
||||
ScaleAddRows(src, src_stride, (uint16*)(row16),
|
||||
src_width, boxheight);
|
||||
ScaleAddCols(dst_width, boxheight, x, dx, reinterpret_cast<uint16*>(row16),
|
||||
ScaleAddCols(dst_width, boxheight, x, dx, (uint16*)(row16),
|
||||
dst_ptr);
|
||||
dst_ptr += dst_stride;
|
||||
}
|
||||
|
||||
@ -171,14 +171,14 @@ static void ScaleARGBBilinearDown(int src_width, int src_height,
|
||||
assert(src_height > 0);
|
||||
assert(dst_width > 0);
|
||||
assert(dst_height > 0);
|
||||
int64 xlast = x + static_cast<int64>(dst_width - 1) * dx;
|
||||
int64 xlast = x + (int64)(dst_width - 1) * dx;
|
||||
int64 xl = (dx >= 0) ? x : xlast;
|
||||
int64 xr = (dx >= 0) ? xlast : x;
|
||||
xl = (xl >> 16) & ~3; // Left edge aligned.
|
||||
xr = (xr >> 16) + 1; // Right most pixel used.
|
||||
int clip_src_width = (((xr - xl) + 1 + 3) & ~3) * 4; // Width aligned to 4.
|
||||
src_argb += xl * 4;
|
||||
x -= static_cast<int>(xl << 16);
|
||||
x -= (int)(xl << 16);
|
||||
void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
|
||||
ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
|
||||
InterpolateRow_C;
|
||||
@ -679,13 +679,13 @@ static void ScaleARGB(const uint8* src, int src_stride,
|
||||
ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
|
||||
&x, &y, &dx, &dy);
|
||||
if (clip_x) {
|
||||
int64 clipf = static_cast<int64>(clip_x) * dx;
|
||||
int64 clipf = (int64)(clip_x) * dx;
|
||||
x += (clipf & 0xffff);
|
||||
src += (clipf >> 16) * 4;
|
||||
dst += clip_x * 4;
|
||||
}
|
||||
if (clip_y) {
|
||||
int64 clipf = static_cast<int64>(clip_y) * dy;
|
||||
int64 clipf = (int64)(clip_y) * dy;
|
||||
y += (clipf & 0xffff);
|
||||
src += (clipf >> 16) * src_stride;
|
||||
dst += clip_y * dst_stride;
|
||||
|
||||
@ -204,8 +204,8 @@ void ScaleColsUp2_C(uint8* dst_ptr, const uint8* src_ptr,
|
||||
}
|
||||
|
||||
// (1-f)a + fb can be replaced with a + f(b-a)
|
||||
#define BLENDER(a, b, f) static_cast<uint8>(static_cast<int>(a) + \
|
||||
(static_cast<int>(f) * (static_cast<int>(b) - static_cast<int>(a)) >> 16))
|
||||
#define BLENDER(a, b, f) (uint8)((int)(a) + \
|
||||
((int)(f) * ((int)(b) - (int)(a)) >> 16))
|
||||
|
||||
void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr,
|
||||
int dst_width, int x, int dx) {
|
||||
@ -232,7 +232,7 @@ void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr,
|
||||
|
||||
void ScaleFilterCols64_C(uint8* dst_ptr, const uint8* src_ptr,
|
||||
int dst_width, int x32, int dx) {
|
||||
int64 x = static_cast<int64>(x32);
|
||||
int64 x = (int64)(x32);
|
||||
for (int j = 0; j < dst_width - 1; j += 2) {
|
||||
int64 xi = x >> 16;
|
||||
int a = src_ptr[xi];
|
||||
@ -332,8 +332,8 @@ void ScaleAddRows_C(const uint8* src_ptr, ptrdiff_t src_stride,
|
||||
void ScaleARGBRowDown2_C(const uint8* src_argb,
|
||||
ptrdiff_t /* src_stride */,
|
||||
uint8* dst_argb, int dst_width) {
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
|
||||
for (int x = 0; x < dst_width - 1; x += 2) {
|
||||
dst[0] = src[1];
|
||||
@ -378,8 +378,8 @@ void ScaleARGBRowDown2Box_C(const uint8* src_argb, ptrdiff_t src_stride,
|
||||
void ScaleARGBRowDownEven_C(const uint8* src_argb, ptrdiff_t /* src_stride */,
|
||||
int src_stepx,
|
||||
uint8* dst_argb, int dst_width) {
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
|
||||
for (int x = 0; x < dst_width - 1; x += 2) {
|
||||
dst[0] = src[0];
|
||||
@ -413,8 +413,8 @@ void ScaleARGBRowDownEvenBox_C(const uint8* src_argb,
|
||||
// Scales a single row of pixels using point sampling.
|
||||
void ScaleARGBCols_C(uint8* dst_argb, const uint8* src_argb,
|
||||
int dst_width, int x, int dx) {
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
for (int j = 0; j < dst_width - 1; j += 2) {
|
||||
dst[0] = src[x >> 16];
|
||||
x += dx;
|
||||
@ -429,9 +429,9 @@ void ScaleARGBCols_C(uint8* dst_argb, const uint8* src_argb,
|
||||
|
||||
void ScaleARGBCols64_C(uint8* dst_argb, const uint8* src_argb,
|
||||
int dst_width, int x32, int dx) {
|
||||
int64 x = static_cast<int64>(x32);
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
int64 x = (int64)(x32);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
for (int j = 0; j < dst_width - 1; j += 2) {
|
||||
dst[0] = src[x >> 16];
|
||||
x += dx;
|
||||
@ -447,8 +447,8 @@ void ScaleARGBCols64_C(uint8* dst_argb, const uint8* src_argb,
|
||||
// Scales a single row of pixels up by 2x using point sampling.
|
||||
void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb,
|
||||
int dst_width, int, int) {
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
for (int j = 0; j < dst_width - 1; j += 2) {
|
||||
dst[1] = dst[0] = src[0];
|
||||
src += 1;
|
||||
@ -461,7 +461,7 @@ void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb,
|
||||
|
||||
// Mimics SSSE3 blender
|
||||
#define BLENDER1(a, b, f) ((a) * (0x7f ^ f) + (b) * f) >> 7
|
||||
#define BLENDERC(a, b, f, s) static_cast<uint32>( \
|
||||
#define BLENDERC(a, b, f, s) (uint32)( \
|
||||
BLENDER1(((a) >> s) & 255, ((b) >> s) & 255, f) << s)
|
||||
#define BLENDER(a, b, f) \
|
||||
BLENDERC(a, b, f, 24) | BLENDERC(a, b, f, 16) | \
|
||||
@ -469,8 +469,8 @@ void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb,
|
||||
|
||||
void ScaleARGBFilterCols_C(uint8* dst_argb, const uint8* src_argb,
|
||||
int dst_width, int x, int dx) {
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
for (int j = 0; j < dst_width - 1; j += 2) {
|
||||
int xi = x >> 16;
|
||||
int xf = (x >> 9) & 0x7f;
|
||||
@ -497,9 +497,9 @@ void ScaleARGBFilterCols_C(uint8* dst_argb, const uint8* src_argb,
|
||||
|
||||
void ScaleARGBFilterCols64_C(uint8* dst_argb, const uint8* src_argb,
|
||||
int dst_width, int x32, int dx) {
|
||||
int64 x = static_cast<int64>(x32);
|
||||
const uint32* src = reinterpret_cast<const uint32*>(src_argb);
|
||||
uint32* dst = reinterpret_cast<uint32*>(dst_argb);
|
||||
int64 x = (int64)(x32);
|
||||
const uint32* src = (const uint32*)(src_argb);
|
||||
uint32* dst = (uint32*)(dst_argb);
|
||||
for (int j = 0; j < dst_width - 1; j += 2) {
|
||||
int64 xi = x >> 16;
|
||||
int xf = (x >> 9) & 0x7f;
|
||||
@ -656,12 +656,12 @@ FilterMode ScaleFilterReduce(int src_width, int src_height,
|
||||
|
||||
// Divide num by div and return as 16.16 fixed point result.
|
||||
int FixedDiv_C(int num, int div) {
|
||||
return static_cast<int>((static_cast<int64>(num) << 16) / div);
|
||||
return (int)(((int64)(num) << 16) / div);
|
||||
}
|
||||
|
||||
// Divide num by div and return as 16.16 fixed point result.
|
||||
int FixedDiv1_C(int num, int div) {
|
||||
return static_cast<int>(((static_cast<int64>(num) << 16) - 0x00010001) /
|
||||
return (int)((((int64)(num) << 16) - 0x00010001) /
|
||||
(div - 1));
|
||||
}
|
||||
|
||||
|
||||
@ -189,7 +189,7 @@ void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
||||
: "+r"(src_ptr), // %0
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %3
|
||||
: "r"((intptr_t)(src_stride)) // %3
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -295,7 +295,7 @@ void ScaleRowDown2Box_Unaligned_SSE2(const uint8* src_ptr,
|
||||
: "+r"(src_ptr), // %0
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %3
|
||||
: "r"((intptr_t)(src_stride)) // %3
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -387,7 +387,7 @@ void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width), // %2
|
||||
"+r"(stridex3) // %3
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %4
|
||||
: "r"((intptr_t)(src_stride)) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -496,7 +496,7 @@ void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr,
|
||||
: "+r"(src_ptr), // %0
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)), // %3
|
||||
: "r"((intptr_t)(src_stride)), // %3
|
||||
"m"(kMadd21) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
@ -570,7 +570,7 @@ void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr,
|
||||
: "+r"(src_ptr), // %0
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)), // %3
|
||||
: "r"((intptr_t)(src_stride)), // %3
|
||||
"m"(kMadd21) // %4
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
@ -652,7 +652,7 @@ void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr,
|
||||
: "+r"(src_ptr), // %0
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %3
|
||||
: "r"((intptr_t)(src_stride)) // %3
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -720,7 +720,7 @@ void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr,
|
||||
: "+r"(src_ptr), // %0
|
||||
"+r"(dst_ptr), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %3
|
||||
: "r"((intptr_t)(src_stride)) // %3
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -777,7 +777,7 @@ void ScaleAddRows_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
||||
"+r"(tmp_src), // %3
|
||||
"+r"(src_width), // %4
|
||||
"+rm"(src_height) // %5
|
||||
: "rm"(static_cast<intptr_t>(src_stride)) // %6
|
||||
: "rm"((intptr_t)(src_stride)) // %6
|
||||
: "memory", "cc"
|
||||
#if defined(__SSE2__)
|
||||
, "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"
|
||||
@ -970,7 +970,7 @@ void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb,
|
||||
: "+r"(src_argb), // %0
|
||||
"+r"(dst_argb), // %1
|
||||
"+r"(dst_width) // %2
|
||||
: "r"(static_cast<intptr_t>(src_stride)) // %3
|
||||
: "r"((intptr_t)(src_stride)) // %3
|
||||
: "memory", "cc"
|
||||
#if defined(__native_client__) && defined(__x86_64__)
|
||||
, "r14"
|
||||
@ -986,7 +986,7 @@ void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb,
|
||||
void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
|
||||
int src_stepx,
|
||||
uint8* dst_argb, int dst_width) {
|
||||
intptr_t src_stepx_x4 = static_cast<intptr_t>(src_stepx);
|
||||
intptr_t src_stepx_x4 = (intptr_t)(src_stepx);
|
||||
intptr_t src_stepx_x12 = 0;
|
||||
asm volatile (
|
||||
"lea " MEMLEA3(0x00,1,4) ",%1 \n"
|
||||
@ -1027,9 +1027,9 @@ void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
|
||||
void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb,
|
||||
ptrdiff_t src_stride, int src_stepx,
|
||||
uint8* dst_argb, int dst_width) {
|
||||
intptr_t src_stepx_x4 = static_cast<intptr_t>(src_stepx);
|
||||
intptr_t src_stepx_x4 = (intptr_t)(src_stepx);
|
||||
intptr_t src_stepx_x12 = 0;
|
||||
intptr_t row1 = static_cast<intptr_t>(src_stride);
|
||||
intptr_t row1 = (intptr_t)(src_stride);
|
||||
asm volatile (
|
||||
"lea " MEMLEA3(0x00,1,4) ",%1 \n"
|
||||
"lea " MEMLEA4(0x00,1,1,2) ",%4 \n"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user