summaryrefslogtreecommitdiffstats
path: root/src/gui/painting
diff options
context:
space:
mode:
authorJohn Brooks <special@dereferenced.net>2010-07-05 19:17:49 (GMT)
committerBenjamin Poulain <benjamin.poulain@nokia.com>2010-07-07 09:57:10 (GMT)
commit93bcbe213e947843184a75f4b237c8dff45ca866 (patch)
tree01d44325be9f3364d00503751c2462094d412423 /src/gui/painting
parent2769d4b72675e62c441fa181609adca25922715a (diff)
downloadQt-93bcbe213e947843184a75f4b237c8dff45ca866.zip
Qt-93bcbe213e947843184a75f4b237c8dff45ca866.tar.gz
Qt-93bcbe213e947843184a75f4b237c8dff45ca866.tar.bz2
Moved primitive SSE2 painting utilities to qdrawingprimitive_sse2_p.h
Merge-request: 725 Reviewed-by: Benjamin Poulain <benjamin.poulain@nokia.com>
Diffstat (limited to 'src/gui/painting')
-rw-r--r--src/gui/painting/painting.pri3
-rw-r--r--src/gui/painting/qdrawhelper_sse2.cpp156
-rw-r--r--src/gui/painting/qdrawingprimitive_sse2_p.h216
3 files changed, 218 insertions, 157 deletions
diff --git a/src/gui/painting/painting.pri b/src/gui/painting/painting.pri
index c207c9d..4023f65 100644
--- a/src/gui/painting/painting.pri
+++ b/src/gui/painting/painting.pri
@@ -205,7 +205,8 @@ x11|embedded {
if(mmx|3dnow|sse|sse2|iwmmxt) {
HEADERS += painting/qdrawhelper_x86_p.h \
painting/qdrawhelper_mmx_p.h \
- painting/qdrawhelper_sse_p.h
+ painting/qdrawhelper_sse_p.h \
+ painting/qdrawingprimitive_sse2_p.h
MMX_SOURCES += painting/qdrawhelper_mmx.cpp
MMX3DNOW_SOURCES += painting/qdrawhelper_mmx3dnow.cpp
SSE3DNOW_SOURCES += painting/qdrawhelper_sse3dnow.cpp
diff --git a/src/gui/painting/qdrawhelper_sse2.cpp b/src/gui/painting/qdrawhelper_sse2.cpp
index 6cd8688..ae16fed 100644
--- a/src/gui/painting/qdrawhelper_sse2.cpp
+++ b/src/gui/painting/qdrawhelper_sse2.cpp
@@ -57,162 +57,6 @@
QT_BEGIN_NAMESPACE
-/*
- * Multiply the components of pixelVector by alphaChannel
- * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
- * colorMask must have 0x00ff00ff on each 32 bits component
- * half must have the value 128 (0x80) for each 32 bits compnent
- */
-#define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
-{ \
- /* 1. separate the colors in 2 vectors so each color is on 16 bits \
- (in order to be multiplied by the alpha \
- each 32 bit of dstVectorAG are in the form 0x00AA00GG \
- each 32 bit of dstVectorRB are in the form 0x00RR00BB */\
- __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \
- __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \
- \
- /* 2. multiply the vectors by the alpha channel */\
- pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \
- pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \
- \
- /* 3. devide by 255, that's the tricky part. \
- we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */ \
- /** so first (X + X/256 + rounding) */\
- pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \
- pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \
- pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \
- pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \
- \
- /** second devide by 256 */\
- pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \
- /** for AG, we could >> 8 to divide followed by << 8 to put the \
- bytes in the correct position. By masking instead, we execute \
- only one instruction */\
- pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \
- \
- /* 4. combine the 2 pairs of colors */ \
- result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
-}
-
-/*
- * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
- * oneMinusAlphaChannel must be 255 - alpha for each 32 bits component
- * colorMask must have 0x00ff00ff on each 32 bits component
- * half must have the value 128 (0x80) for each 32 bits compnent
- */
-#define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \
- /* interpolate AG */\
- __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \
- __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \
- __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \
- __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \
- __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \
- finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \
- finalAG = _mm_add_epi16(finalAG, half); \
- finalAG = _mm_andnot_si128(colorMask, finalAG); \
- \
- /* interpolate RB */\
- __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \
- __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \
- __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \
- __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \
- __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \
- finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \
- finalRB = _mm_add_epi16(finalRB, half); \
- finalRB = _mm_srli_epi16(finalRB, 8); \
- \
- /* combine */\
- result = _mm_or_si128(finalAG, finalRB); \
-}
-
-// Basically blend src over dst with the const alpha defined as constAlphaVector.
-// nullVector, half, one, colorMask are constant accross the whole image/texture, and should be defined as:
-//const __m128i nullVector = _mm_set1_epi32(0);
-//const __m128i half = _mm_set1_epi16(0x80);
-//const __m128i one = _mm_set1_epi16(0xff);
-//const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
-//const __m128i alphaMask = _mm_set1_epi32(0xff000000);
-//
-// The computation being done is:
-// result = s + d * (1-alpha)
-// with shortcuts if fully opaque or fully transparent.
-#define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) { \
- int x = 0; \
- for (; x < length-3; x += 4) { \
- const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \
- const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask); \
- if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff) { \
- /* all opaque */ \
- _mm_storeu_si128((__m128i *)&dst[x], srcVector); \
- } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff) { \
- /* not fully transparent */ \
- /* extract the alpha channel on 2 x 16 bits */ \
- /* so we have room for the multiplication */ \
- /* each 32 bits will be in the form 0x00AA00AA */ \
- /* with A being the 1 - alpha */ \
- __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \
- alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \
- alphaChannel = _mm_sub_epi16(one, alphaChannel); \
- \
- const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]); \
- __m128i destMultipliedByOneMinusAlpha; \
- BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \
- \
- /* result = s + d * (1-alpha) */\
- const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \
- _mm_storeu_si128((__m128i *)&dst[x], result); \
- } \
- } \
- for (; x < length; ++x) { \
- uint s = src[x]; \
- if (s >= 0xff000000) \
- dst[x] = s; \
- else if (s != 0) \
- dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
- } \
-}
-
-// Basically blend src over dst with the const alpha defined as constAlphaVector.
-// nullVector, half, one, colorMask are constant accross the whole image/texture, and should be defined as:
-//const __m128i nullVector = _mm_set1_epi32(0);
-//const __m128i half = _mm_set1_epi16(0x80);
-//const __m128i one = _mm_set1_epi16(0xff);
-//const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
-//
-// The computation being done is:
-// dest = (s + d * sia) * ca + d * cia
-// = s * ca + d * (sia * ca + cia)
-// = s * ca + d * (1 - sa*ca)
-#define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \
-{ \
- int x = 0; \
- for (; x < length-3; x += 4) { \
- __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \
- if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) { \
- BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half); \
-\
- __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \
- alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \
- alphaChannel = _mm_sub_epi16(one, alphaChannel); \
- \
- const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]); \
- __m128i destMultipliedByOneMinusAlpha; \
- BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \
- \
- const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \
- _mm_storeu_si128((__m128i *)&dst[x], result); \
- } \
- } \
- for (; x < length; ++x) { \
- quint32 s = src[x]; \
- if (s != 0) { \
- s = BYTE_MUL(s, const_alpha); \
- dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
- } \
- } \
-}
-
void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
const uchar *srcPixels, int sbpl,
int w, int h,
diff --git a/src/gui/painting/qdrawingprimitive_sse2_p.h b/src/gui/painting/qdrawingprimitive_sse2_p.h
new file mode 100644
index 0000000..2b595c5
--- /dev/null
+++ b/src/gui/painting/qdrawingprimitive_sse2_p.h
@@ -0,0 +1,216 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtGui module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QDRAWINGPRIMITIVE_SSE2_P_H
+#define QDRAWINGPRIMITIVE_SSE2_P_H
+
+//
+// W A R N I N G
+// -------------
+//
+// This file is not part of the Qt API. It exists purely as an
+// implementation detail. This header file may change from version to
+// version without notice, or even be removed.
+//
+// We mean it.
+//
+
+QT_BEGIN_NAMESPACE
+
+/*
+ * Multiply the components of pixelVector by alphaChannel
+ * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
+ * colorMask must have 0x00ff00ff on each 32 bits component
+ * half must have the value 128 (0x80) for each 32 bits compnent
+ */
+#define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
+{ \
+ /* 1. separate the colors in 2 vectors so each color is on 16 bits \
+ (in order to be multiplied by the alpha \
+ each 32 bit of dstVectorAG are in the form 0x00AA00GG \
+ each 32 bit of dstVectorRB are in the form 0x00RR00BB */\
+ __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \
+ __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \
+ \
+ /* 2. multiply the vectors by the alpha channel */\
+ pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \
+ pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \
+ \
+ /* 3. devide by 255, that's the tricky part. \
+ we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */ \
+ /** so first (X + X/256 + rounding) */\
+ pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \
+ pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \
+ pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \
+ pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \
+ \
+ /** second devide by 256 */\
+ pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \
+ /** for AG, we could >> 8 to divide followed by << 8 to put the \
+ bytes in the correct position. By masking instead, we execute \
+ only one instruction */\
+ pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \
+ \
+ /* 4. combine the 2 pairs of colors */ \
+ result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
+}
+
+/*
+ * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
+ * oneMinusAlphaChannel must be 255 - alpha for each 32 bits component
+ * colorMask must have 0x00ff00ff on each 32 bits component
+ * half must have the value 128 (0x80) for each 32 bits compnent
+ */
+#define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \
+ /* interpolate AG */\
+ __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \
+ __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \
+ __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \
+ __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \
+ __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \
+ finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \
+ finalAG = _mm_add_epi16(finalAG, half); \
+ finalAG = _mm_andnot_si128(colorMask, finalAG); \
+ \
+ /* interpolate RB */\
+ __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \
+ __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \
+ __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \
+ __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \
+ __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \
+ finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \
+ finalRB = _mm_add_epi16(finalRB, half); \
+ finalRB = _mm_srli_epi16(finalRB, 8); \
+ \
+ /* combine */\
+ result = _mm_or_si128(finalAG, finalRB); \
+}
+
+// Basically blend src over dst with the const alpha defined as constAlphaVector.
+// nullVector, half, one, colorMask are constant accross the whole image/texture, and should be defined as:
+//const __m128i nullVector = _mm_set1_epi32(0);
+//const __m128i half = _mm_set1_epi16(0x80);
+//const __m128i one = _mm_set1_epi16(0xff);
+//const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
+//const __m128i alphaMask = _mm_set1_epi32(0xff000000);
+//
+// The computation being done is:
+// result = s + d * (1-alpha)
+// with shortcuts if fully opaque or fully transparent.
+#define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) { \
+ int x = 0; \
+ for (; x < length-3; x += 4) { \
+ const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \
+ const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask); \
+ if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff) { \
+ /* all opaque */ \
+ _mm_storeu_si128((__m128i *)&dst[x], srcVector); \
+ } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff) { \
+ /* not fully transparent */ \
+ /* extract the alpha channel on 2 x 16 bits */ \
+ /* so we have room for the multiplication */ \
+ /* each 32 bits will be in the form 0x00AA00AA */ \
+ /* with A being the 1 - alpha */ \
+ __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \
+ alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \
+ alphaChannel = _mm_sub_epi16(one, alphaChannel); \
+ \
+ const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]); \
+ __m128i destMultipliedByOneMinusAlpha; \
+ BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \
+ \
+ /* result = s + d * (1-alpha) */\
+ const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \
+ _mm_storeu_si128((__m128i *)&dst[x], result); \
+ } \
+ } \
+ for (; x < length; ++x) { \
+ uint s = src[x]; \
+ if (s >= 0xff000000) \
+ dst[x] = s; \
+ else if (s != 0) \
+ dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
+ } \
+}
+
+// Basically blend src over dst with the const alpha defined as constAlphaVector.
+// nullVector, half, one, colorMask are constant accross the whole image/texture, and should be defined as:
+//const __m128i nullVector = _mm_set1_epi32(0);
+//const __m128i half = _mm_set1_epi16(0x80);
+//const __m128i one = _mm_set1_epi16(0xff);
+//const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
+//
+// The computation being done is:
+// dest = (s + d * sia) * ca + d * cia
+// = s * ca + d * (sia * ca + cia)
+// = s * ca + d * (1 - sa*ca)
+#define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \
+{ \
+ int x = 0; \
+ for (; x < length-3; x += 4) { \
+ __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \
+ if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) { \
+ BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half); \
+\
+ __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \
+ alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \
+ alphaChannel = _mm_sub_epi16(one, alphaChannel); \
+ \
+ const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]); \
+ __m128i destMultipliedByOneMinusAlpha; \
+ BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \
+ \
+ const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \
+ _mm_storeu_si128((__m128i *)&dst[x], result); \
+ } \
+ } \
+ for (; x < length; ++x) { \
+ quint32 s = src[x]; \
+ if (s != 0) { \
+ s = BYTE_MUL(s, const_alpha); \
+ dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
+ } \
+ } \
+}
+
+QT_END_NAMESPACE
+
+#endif // QDRAWINGPRIMITIVE_SSE2_P_H