summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Poulain <benjamin.poulain@nokia.com>2010-02-15 12:55:34 (GMT)
committerBenjamin Poulain <benjamin.poulain@nokia.com>2010-02-15 12:58:32 (GMT)
commitc1fe9ae25aebc8d1b9c4a7f3e67fa25ecdcbadc8 (patch)
tree5496852f8afe59d82f7320c8ef6829f95dd14559
parent81dae1c0f37ed0b9e4ec6bc1febad273391f518e (diff)
downloadQt-c1fe9ae25aebc8d1b9c4a7f3e67fa25ecdcbadc8.zip
Qt-c1fe9ae25aebc8d1b9c4a7f3e67fa25ecdcbadc8.tar.gz
Qt-c1fe9ae25aebc8d1b9c4a7f3e67fa25ecdcbadc8.tar.bz2
Replace the inline blend function by #define
Some compilers do not inline the functions, which is a problem because the number of arguments exceed the limit for SSE, and because it is a lot slower for those low level functions.
-rw-r--r--src/gui/painting/qdrawhelper_sse2.cpp127
1 files changed, 60 insertions, 67 deletions
diff --git a/src/gui/painting/qdrawhelper_sse2.cpp b/src/gui/painting/qdrawhelper_sse2.cpp
index 1dba914..6ac64d3 100644
--- a/src/gui/painting/qdrawhelper_sse2.cpp
+++ b/src/gui/painting/qdrawhelper_sse2.cpp
@@ -63,36 +63,36 @@ QT_BEGIN_NAMESPACE
* colorMask must have 0x00ff00ff on each 32 bits component
* half must have the value 128 (0x80) for each 32 bits compnent
*/
-Q_STATIC_INLINE_FUNCTION __m128i BYTE_MUL_SSE2(const __m128i pixelVector, const __m128i alphaChannel, const __m128i colorMask, const __m128i half)
-{
- // 1. separate the colors in 2 vectors so each color is on 16 bits
- // (in order to be multiplied by the alpha
- // each 32 bit of dstVectorAG are in the form 0x00AA00GG
- // each 32 bit of dstVectorRB are in the form 0x00RR00BB
- __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8);
- __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask);
-
- // 2. multiply the vectors by the alpha channel
- pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel);
- pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel);
-
- // 3. devide by 255, that's the tricky part.
- // we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256
- /// so first (X + X/256 + rounding)
- pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8));
- pixelVectorRB = _mm_add_epi16(pixelVectorRB, half);
- pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8));
- pixelVectorAG = _mm_add_epi16(pixelVectorAG, half);
-
- /// second devide by 256
- pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8);
- /// for AG, we could >> 8 to divide followed by << 8 to put the
- /// bytes in the correct position. By masking instead, we execute
- /// only one instruction
- pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG);
-
- // 4. combine the 2 pairs of colors
- return _mm_or_si128(pixelVectorAG, pixelVectorRB);
+#define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
+{ \
+ /* 1. separate the colors in 2 vectors so each color is on 16 bits \
+ (in order to be multiplied by the alpha \
+ each 32 bit of dstVectorAG are in the form 0x00AA00GG \
+ each 32 bit of dstVectorRB are in the form 0x00RR00BB */\
+ __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \
+ __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \
+ \
+ /* 2. multiply the vectors by the alpha channel */\
+ pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \
+ pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \
+ \
+ /* 3. devide by 255, that's the tricky part. \
+ we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */ \
+ /** so first (X + X/256 + rounding) */\
+ pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \
+ pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \
+ pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \
+ pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \
+ \
+ /** second devide by 256 */\
+ pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \
+ /** for AG, we could >> 8 to divide followed by << 8 to put the \
+ bytes in the correct position. By masking instead, we execute \
+ only one instruction */\
+ pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \
+ \
+ /* 4. combine the 2 pairs of colors */ \
+ result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
}
/*
@@ -101,34 +101,29 @@ Q_STATIC_INLINE_FUNCTION __m128i BYTE_MUL_SSE2(const __m128i pixelVector, const
* colorMask must have 0x00ff00ff on each 32 bits component
* half must have the value 128 (0x80) for each 32 bits compnent
*/
-Q_STATIC_INLINE_FUNCTION __m128i INTERPOLATE_PIXEL_255_SSE2(const __m128i srcVector,
- const __m128i dstVector,
- const __m128i alphaChannel,
- const __m128i oneMinusAlphaChannel ,
- const __m128i colorMask,
- const __m128i half) {
- // interpolate AG
- __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8);
- __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8);
- __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel);
- __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel);
- __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha);
- finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8));
- finalAG = _mm_add_epi16(finalAG, half);
- finalAG = _mm_andnot_si128(colorMask, finalAG);
-
- // interpolate RB
- __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask);
- __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask);
- __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel);
- __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel);
- __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha);
- finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8));
- finalRB = _mm_add_epi16(finalRB, half);
- finalRB = _mm_srli_epi16(finalRB, 8);
-
- // combine
- return _mm_or_si128(finalAG, finalRB);
+#define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \
+ /* interpolate AG */\
+ __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \
+ __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \
+ __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \
+ __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \
+ __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \
+ finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \
+ finalAG = _mm_add_epi16(finalAG, half); \
+ finalAG = _mm_andnot_si128(colorMask, finalAG); \
+ \
+ /* interpolate RB */\
+ __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \
+ __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \
+ __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \
+ __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \
+ __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \
+ finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \
+ finalRB = _mm_add_epi16(finalRB, half); \
+ finalRB = _mm_srli_epi16(finalRB, 8); \
+ \
+ /* combine */\
+ result = _mm_or_si128(finalAG, finalRB); \
}
void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
@@ -165,7 +160,8 @@ void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
alphaChannel = _mm_sub_epi16(one, alphaChannel);
const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]);
- const __m128i destMultipliedByOneMinusAlpha = BYTE_MUL_SSE2(dstVector, alphaChannel, colorMask, half);
+ __m128i destMultipliedByOneMinusAlpha;
+ BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
// result = s + d * (1-alpha)
const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
@@ -197,14 +193,15 @@ void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
for (; x < w-3; x += 4) {
__m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) {
- srcVector = BYTE_MUL_SSE2(srcVector, constAlphaVector, colorMask, half);
+ BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half);
__m128i alphaChannel = _mm_srli_epi32(srcVector, 24);
alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16));
alphaChannel = _mm_sub_epi16(one, alphaChannel);
const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]);
- const __m128i destMultipliedByOneMinusAlpha = BYTE_MUL_SSE2(dstVector, alphaChannel, colorMask, half);
+ __m128i destMultipliedByOneMinusAlpha;
+ BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
_mm_storeu_si128((__m128i *)&dst[x], result);
@@ -252,12 +249,8 @@ void qt_blend_rgb32_on_rgb32_sse2(uchar *destPixels, int dbpl,
__m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) {
const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]);
- const __m128i result = INTERPOLATE_PIXEL_255_SSE2(srcVector,
- dstVector,
- constAlphaVector,
- oneMinusConstAlpha,
- colorMask,
- half);
+ __m128i result;
+ INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half);
_mm_storeu_si128((__m128i *)&dst[x], result);
}
}