summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQt Continuous Integration System <qt-info@nokia.com>2010-02-19 22:48:25 (GMT)
committerQt Continuous Integration System <qt-info@nokia.com>2010-02-19 22:48:25 (GMT)
commit1f10c6be975a2d36c05d926b332b869ad6339877 (patch)
treec235ffdc5195351d24a30ef5f6b8e67f2627c2d0 /src
parent1a8fffa8cae5b4094adcd87bfc4cbc24599d7305 (diff)
parente9dedf5b10fcc25454d01a588d5000437cb46e12 (diff)
downloadQt-1f10c6be975a2d36c05d926b332b869ad6339877.zip
Qt-1f10c6be975a2d36c05d926b332b869ad6339877.tar.gz
Qt-1f10c6be975a2d36c05d926b332b869ad6339877.tar.bz2
Merge branch '4.6' of scm.dev.nokia.troll.no:qt/oslo-staging-2 into 4.6-integration
* '4.6' of scm.dev.nokia.troll.no:qt/oslo-staging-2: Fixed off-by-one blending errors in the NEON drawhelper code. Cetest extensions for Windows Mobile device power operations. Remote lib extensions for Windows Mobile device power operations.
Diffstat (limited to 'src')
-rw-r--r--src/gui/painting/qdrawhelper_neon.cpp130
1 files changed, 65 insertions, 65 deletions
diff --git a/src/gui/painting/qdrawhelper_neon.cpp b/src/gui/painting/qdrawhelper_neon.cpp
index 25860a0..77c5202 100644
--- a/src/gui/painting/qdrawhelper_neon.cpp
+++ b/src/gui/painting/qdrawhelper_neon.cpp
@@ -48,43 +48,43 @@
QT_BEGIN_NAMESPACE
-static inline int16x8_t qvdiv_255_s16(int16x8_t x, int16x8_t half)
+static inline uint16x8_t qvdiv_255_u16(uint16x8_t x, uint16x8_t half)
{
// result = (x + (x >> 8) + 0x80) >> 8
- const int16x8_t temp = vshrq_n_s16(x, 8); // x >> 8
- const int16x8_t sum_part = vaddq_s16(x, half); // x + 0x80
- const int16x8_t sum = vaddq_s16(temp, sum_part);
+ const uint16x8_t temp = vshrq_n_u16(x, 8); // x >> 8
+ const uint16x8_t sum_part = vaddq_u16(x, half); // x + 0x80
+ const uint16x8_t sum = vaddq_u16(temp, sum_part);
- return vreinterpretq_s16_u16(vshrq_n_u16(vreinterpretq_u16_s16(sum), 8));
+ return vshrq_n_u16(sum, 8);
}
-static inline int16x8_t qvbyte_mul_s16(int16x8_t x, int16x8_t alpha, int16x8_t half)
+static inline uint16x8_t qvbyte_mul_u16(uint16x8_t x, uint16x8_t alpha, uint16x8_t half)
{
// t = qRound(x * alpha / 255.0)
- const int16x8_t t = vmulq_s16(x, alpha); // t
- return qvdiv_255_s16(t, half);
+ const uint16x8_t t = vmulq_u16(x, alpha); // t
+ return qvdiv_255_u16(t, half);
}
-static inline int16x8_t qvinterpolate_pixel_255(int16x8_t x, int16x8_t a, int16x8_t y, int16x8_t b, int16x8_t half)
+static inline uint16x8_t qvinterpolate_pixel_255(uint16x8_t x, uint16x8_t a, uint16x8_t y, uint16x8_t b, uint16x8_t half)
{
// t = x * a + y * b
- const int16x8_t ta = vmulq_s16(x, a);
- const int16x8_t tb = vmulq_s16(y, b);
+ const uint16x8_t ta = vmulq_u16(x, a);
+ const uint16x8_t tb = vmulq_u16(y, b);
- return qvdiv_255_s16(vaddq_s16(ta, tb), half);
+ return qvdiv_255_u16(vaddq_u16(ta, tb), half);
}
-static inline int16x8_t qvsource_over_s16(int16x8_t src16, int16x8_t dst16, int16x8_t half, int16x8_t full)
+static inline uint16x8_t qvsource_over_u16(uint16x8_t src16, uint16x8_t dst16, uint16x8_t half, uint16x8_t full)
{
- const int16x4_t alpha16_high = vdup_lane_s16(vget_high_s16(src16), 3);
- const int16x4_t alpha16_low = vdup_lane_s16(vget_low_s16(src16), 3);
+ const uint16x4_t alpha16_high = vdup_lane_u16(vget_high_u16(src16), 3);
+ const uint16x4_t alpha16_low = vdup_lane_u16(vget_low_u16(src16), 3);
- const int16x8_t alpha16 = vsubq_s16(full, vcombine_s16(alpha16_low, alpha16_high));
+ const uint16x8_t alpha16 = vsubq_u16(full, vcombine_u16(alpha16_low, alpha16_high));
- return vaddq_s16(src16, qvbyte_mul_s16(dst16, alpha16, half));
+ return vaddq_u16(src16, qvbyte_mul_u16(dst16, alpha16, half));
}
void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
@@ -94,21 +94,21 @@ void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
{
const uint *src = (const uint *) srcPixels;
uint *dst = (uint *) destPixels;
- int16x8_t half = vdupq_n_s16(0x80);
- int16x8_t full = vdupq_n_s16(0xff);
+ uint16x8_t half = vdupq_n_u16(0x80);
+ uint16x8_t full = vdupq_n_u16(0xff);
if (const_alpha == 256) {
for (int y = 0; y < h; ++y) {
int x = 0;
for (; x < w-3; x += 4) {
- int32x4_t src32 = vld1q_s32((int32_t *)&src[x]);
+ uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
if ((src[x] & src[x+1] & src[x+2] & src[x+3]) >= 0xff000000) {
// all opaque
- vst1q_s32((int32_t *)&dst[x], src32);
+ vst1q_u32((uint32_t *)&dst[x], src32);
} else if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
- int32x4_t dst32 = vld1q_s32((int32_t *)&dst[x]);
+ uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
- const uint8x16_t src8 = vreinterpretq_u8_s32(src32);
- const uint8x16_t dst8 = vreinterpretq_u8_s32(dst32);
+ const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
+ const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
const uint8x8_t src8_low = vget_low_u8(src8);
const uint8x8_t dst8_low = vget_low_u8(dst8);
@@ -116,19 +116,19 @@ void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
const uint8x8_t src8_high = vget_high_u8(src8);
const uint8x8_t dst8_high = vget_high_u8(dst8);
- const int16x8_t src16_low = vreinterpretq_s16_u16(vmovl_u8(src8_low));
- const int16x8_t dst16_low = vreinterpretq_s16_u16(vmovl_u8(dst8_low));
+ const uint16x8_t src16_low = vmovl_u8(src8_low);
+ const uint16x8_t dst16_low = vmovl_u8(dst8_low);
- const int16x8_t src16_high = vreinterpretq_s16_u16(vmovl_u8(src8_high));
- const int16x8_t dst16_high = vreinterpretq_s16_u16(vmovl_u8(dst8_high));
+ const uint16x8_t src16_high = vmovl_u8(src8_high);
+ const uint16x8_t dst16_high = vmovl_u8(dst8_high);
- const int16x8_t result16_low = qvsource_over_s16(src16_low, dst16_low, half, full);
- const int16x8_t result16_high = qvsource_over_s16(src16_high, dst16_high, half, full);
+ const uint16x8_t result16_low = qvsource_over_u16(src16_low, dst16_low, half, full);
+ const uint16x8_t result16_high = qvsource_over_u16(src16_high, dst16_high, half, full);
- const int32x2_t result32_low = vreinterpret_s32_s8(vmovn_s16(result16_low));
- const int32x2_t result32_high = vreinterpret_s32_s8(vmovn_s16(result16_high));
+ const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
+ const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
- vst1q_s32((int32_t *)&dst[x], vcombine_s32(result32_low, result32_high));
+ vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
}
}
for (; x<w; ++x) {
@@ -143,16 +143,16 @@ void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
}
} else if (const_alpha != 0) {
const_alpha = (const_alpha * 255) >> 8;
- int16x8_t const_alpha16 = vdupq_n_s16(const_alpha);
+ uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
for (int y = 0; y < h; ++y) {
int x = 0;
for (; x < w-3; x += 4) {
if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
- int32x4_t src32 = vld1q_s32((int32_t *)&src[x]);
- int32x4_t dst32 = vld1q_s32((int32_t *)&dst[x]);
+ uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
+ uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
- const uint8x16_t src8 = vreinterpretq_u8_s32(src32);
- const uint8x16_t dst8 = vreinterpretq_u8_s32(dst32);
+ const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
+ const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
const uint8x8_t src8_low = vget_low_u8(src8);
const uint8x8_t dst8_low = vget_low_u8(dst8);
@@ -160,22 +160,22 @@ void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
const uint8x8_t src8_high = vget_high_u8(src8);
const uint8x8_t dst8_high = vget_high_u8(dst8);
- const int16x8_t src16_low = vreinterpretq_s16_u16(vmovl_u8(src8_low));
- const int16x8_t dst16_low = vreinterpretq_s16_u16(vmovl_u8(dst8_low));
+ const uint16x8_t src16_low = vmovl_u8(src8_low);
+ const uint16x8_t dst16_low = vmovl_u8(dst8_low);
- const int16x8_t src16_high = vreinterpretq_s16_u16(vmovl_u8(src8_high));
- const int16x8_t dst16_high = vreinterpretq_s16_u16(vmovl_u8(dst8_high));
+ const uint16x8_t src16_high = vmovl_u8(src8_high);
+ const uint16x8_t dst16_high = vmovl_u8(dst8_high);
- const int16x8_t srcalpha16_low = qvbyte_mul_s16(src16_low, const_alpha16, half);
- const int16x8_t srcalpha16_high = qvbyte_mul_s16(src16_high, const_alpha16, half);
+ const uint16x8_t srcalpha16_low = qvbyte_mul_u16(src16_low, const_alpha16, half);
+ const uint16x8_t srcalpha16_high = qvbyte_mul_u16(src16_high, const_alpha16, half);
- const int16x8_t result16_low = qvsource_over_s16(srcalpha16_low, dst16_low, half, full);
- const int16x8_t result16_high = qvsource_over_s16(srcalpha16_high, dst16_high, half, full);
+ const uint16x8_t result16_low = qvsource_over_u16(srcalpha16_low, dst16_low, half, full);
+ const uint16x8_t result16_high = qvsource_over_u16(srcalpha16_high, dst16_high, half, full);
- const int32x2_t result32_low = vreinterpret_s32_s8(vmovn_s16(result16_low));
- const int32x2_t result32_high = vreinterpret_s32_s8(vmovn_s16(result16_high));
+ const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
+ const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
- vst1q_s32((int32_t *)&dst[x], vcombine_s32(result32_low, result32_high));
+ vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
}
}
for (; x<w; ++x) {
@@ -206,19 +206,19 @@ void qt_blend_rgb32_on_rgb32_neon(uchar *destPixels, int dbpl,
if (const_alpha != 0) {
const uint *src = (const uint *) srcPixels;
uint *dst = (uint *) destPixels;
- int16x8_t half = vdupq_n_s16(0x80);
+ uint16x8_t half = vdupq_n_u16(0x80);
const_alpha = (const_alpha * 255) >> 8;
int one_minus_const_alpha = 255 - const_alpha;
- int16x8_t const_alpha16 = vdupq_n_s16(const_alpha);
- int16x8_t one_minus_const_alpha16 = vdupq_n_s16(255 - const_alpha);
+ uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
+ uint16x8_t one_minus_const_alpha16 = vdupq_n_u16(255 - const_alpha);
for (int y = 0; y < h; ++y) {
int x = 0;
for (; x < w-3; x += 4) {
- int32x4_t src32 = vld1q_s32((int32_t *)&src[x]);
- int32x4_t dst32 = vld1q_s32((int32_t *)&dst[x]);
+ uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
+ uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
- const uint8x16_t src8 = vreinterpretq_u8_s32(src32);
- const uint8x16_t dst8 = vreinterpretq_u8_s32(dst32);
+ const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
+ const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
const uint8x8_t src8_low = vget_low_u8(src8);
const uint8x8_t dst8_low = vget_low_u8(dst8);
@@ -226,19 +226,19 @@ void qt_blend_rgb32_on_rgb32_neon(uchar *destPixels, int dbpl,
const uint8x8_t src8_high = vget_high_u8(src8);
const uint8x8_t dst8_high = vget_high_u8(dst8);
- const int16x8_t src16_low = vreinterpretq_s16_u16(vmovl_u8(src8_low));
- const int16x8_t dst16_low = vreinterpretq_s16_u16(vmovl_u8(dst8_low));
+ const uint16x8_t src16_low = vmovl_u8(src8_low);
+ const uint16x8_t dst16_low = vmovl_u8(dst8_low);
- const int16x8_t src16_high = vreinterpretq_s16_u16(vmovl_u8(src8_high));
- const int16x8_t dst16_high = vreinterpretq_s16_u16(vmovl_u8(dst8_high));
+ const uint16x8_t src16_high = vmovl_u8(src8_high);
+ const uint16x8_t dst16_high = vmovl_u8(dst8_high);
- const int16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
- const int16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
+ const uint16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
+ const uint16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
- const int32x2_t result32_low = vreinterpret_s32_s8(vmovn_s16(result16_low));
- const int32x2_t result32_high = vreinterpret_s32_s8(vmovn_s16(result16_high));
+ const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
+ const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
- vst1q_s32((int32_t *)&dst[x], vcombine_s32(result32_low, result32_high));
+ vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
}
for (; x<w; ++x) {
uint s = src[x];