summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMurray Read <ext-murray.2.read@nokia.com>2012-05-04 15:06:30 (GMT)
committerQt by Nokia <qt-info@nokia.com>2012-05-07 07:44:53 (GMT)
commitb889b3c8e83a56526eae04289ab9b1608d268b2a (patch)
tree7d1b9fd14029e7af9704d354bbe0b4b501cd9135
parentd281f94c30aa15765c9c1ec0a613311514033076 (diff)
downloadQt-b889b3c8e83a56526eae04289ab9b1608d268b2a.zip
Qt-b889b3c8e83a56526eae04289ab9b1608d268b2a.tar.gz
Qt-b889b3c8e83a56526eae04289ab9b1608d268b2a.tar.bz2
On Symbian, use 16bit OpenGL if not enough GPU memory for 32bit
Using 32-bit config on Symbian only when the low-memory GPU is not used. Otherwise apps that did run with this GPU and Qt 4.7, will not run with Qt 4.8. This is a follow up to 54613aec3bdac668d198923814873a9e622ad675 Task-number: ou1cimx1#997217 Change-Id: I945f10c68b40baa10e60b412b03c650d129b1dae Reviewed-by: Juha Kukkonen <ext-juha.kukkonen@nokia.com> Reviewed-by: Pasi Pentikäinen <ext-pasi.a.pentikainen@nokia.com>
-rw-r--r--src/opengl/qgl_egl.cpp119
1 files changed, 64 insertions, 55 deletions
diff --git a/src/opengl/qgl_egl.cpp b/src/opengl/qgl_egl.cpp
index b373346..c521953 100644
--- a/src/opengl/qgl_egl.cpp
+++ b/src/opengl/qgl_egl.cpp
@@ -50,6 +50,10 @@
#include <QtGui/private/qpixmap_x11_p.h>
#endif
+#if defined(Q_OS_SYMBIAN)
+#include <QtGui/private/qgraphicssystemex_symbian_p.h>
+#endif
+
QT_BEGIN_NAMESPACE
QEglProperties *QGLContextPrivate::extraWindowSurfaceCreationProps = NULL;
@@ -64,64 +68,69 @@ void qt_eglproperties_set_glformat(QEglProperties& eglProperties, const QGLForma
int stencilSize = glFormat.stencilBufferSize();
int sampleCount = glFormat.samples();
+ bool prefer32Bit = false;
#ifdef Q_OS_SYMBIAN
- // on Symbian we prefer 32-bit configs
- if (glFormat.alpha() && alphaSize <= 0)
- alphaSize = 8;
- if (glFormat.depth() && depthSize <= 0)
- depthSize = 24;
- if (glFormat.stencil() && stencilSize <= 0)
- stencilSize = 8;
- if (glFormat.sampleBuffers() && sampleCount <= 0)
- sampleCount = 1;
-
- redSize = redSize > 0 ? redSize : 8;
- greenSize = greenSize > 0 ? greenSize : 8;
- blueSize = blueSize > 0 ? blueSize : 8;
- alphaSize = alphaSize > 0 ? alphaSize : 8;
- depthSize = depthSize > 0 ? depthSize : 24;
- stencilSize = stencilSize > 0 ? stencilSize : 8;
- sampleCount = sampleCount >= 0 ? sampleCount : 4;
-#else
- // QGLFormat uses a magic value of -1 to indicate "don't care", even when a buffer of that
- // type has been requested. So we must check QGLFormat's booleans too if size is -1:
- if (glFormat.alpha() && alphaSize <= 0)
- alphaSize = 1;
- if (glFormat.depth() && depthSize <= 0)
- depthSize = 1;
- if (glFormat.stencil() && stencilSize <= 0)
- stencilSize = 1;
- if (glFormat.sampleBuffers() && sampleCount <= 0)
- sampleCount = 1;
-
- // We want to make sure 16-bit configs are chosen over 32-bit configs as they will provide
- // the best performance. The EGL config selection algorithm is a bit stange in this regard:
- // The selection criteria for EGL_BUFFER_SIZE is "AtLeast", so we can't use it to discard
- // 32-bit configs completely from the selection. So it then comes to the sorting algorithm.
- // The red/green/blue sizes have a sort priority of 3, so they are sorted by first. The sort
- // order is special and described as "by larger _total_ number of color bits.". So EGL will
- // put 32-bit configs in the list before the 16-bit configs. However, the spec also goes on
- // to say "If the requested number of bits in attrib_list for a particular component is 0,
- // then the number of bits for that component is not considered". This part of the spec also
- // seems to imply that setting the red/green/blue bits to zero means none of the components
- // are considered and EGL disregards the entire sorting rule. It then looks to the next
- // highest priority rule, which is EGL_BUFFER_SIZE. Despite the selection criteria being
- // "AtLeast" for EGL_BUFFER_SIZE, it's sort order is "smaller" meaning 16-bit configs are
- // put in the list before 32-bit configs. So, to make sure 16-bit is preffered over 32-bit,
- // we must set the red/green/blue sizes to zero. This has an unfortunate consequence that
- // if the application sets the red/green/blue size to 5/6/5 on the QGLFormat, they will
- // probably get a 32-bit config, even when there's an RGB565 config available. Oh well.
-
- // Now normalize the values so -1 becomes 0
- redSize = redSize > 0 ? redSize : 0;
- greenSize = greenSize > 0 ? greenSize : 0;
- blueSize = blueSize > 0 ? blueSize : 0;
- alphaSize = alphaSize > 0 ? alphaSize : 0;
- depthSize = depthSize > 0 ? depthSize : 0;
- stencilSize = stencilSize > 0 ? stencilSize : 0;
- sampleCount = sampleCount > 0 ? sampleCount : 0;
+ // on Symbian we prefer 32-bit configs, unless we're using the low memory GPU
+ prefer32Bit = !QSymbianGraphicsSystemEx::hasBCM2727();
#endif
+ if (prefer32Bit) {
+ if (glFormat.alpha() && alphaSize <= 0)
+ alphaSize = 8;
+ if (glFormat.depth() && depthSize <= 0)
+ depthSize = 24;
+ if (glFormat.stencil() && stencilSize <= 0)
+ stencilSize = 8;
+ if (glFormat.sampleBuffers() && sampleCount <= 0)
+ sampleCount = 1;
+
+ redSize = redSize > 0 ? redSize : 8;
+ greenSize = greenSize > 0 ? greenSize : 8;
+ blueSize = blueSize > 0 ? blueSize : 8;
+ alphaSize = alphaSize > 0 ? alphaSize : 8;
+ depthSize = depthSize > 0 ? depthSize : 24;
+ stencilSize = stencilSize > 0 ? stencilSize : 8;
+ sampleCount = sampleCount >= 0 ? sampleCount : 4;
+ } else {
+ // QGLFormat uses a magic value of -1 to indicate "don't care", even when a buffer of that
+ // type has been requested. So we must check QGLFormat's booleans too if size is -1:
+ if (glFormat.alpha() && alphaSize <= 0)
+ alphaSize = 1;
+ if (glFormat.depth() && depthSize <= 0)
+ depthSize = 1;
+ if (glFormat.stencil() && stencilSize <= 0)
+ stencilSize = 1;
+ if (glFormat.sampleBuffers() && sampleCount <= 0)
+ sampleCount = 1;
+
+ // We want to make sure 16-bit configs are chosen over 32-bit configs as they will provide
+ // the best performance. The EGL config selection algorithm is a bit stange in this regard:
+ // The selection criteria for EGL_BUFFER_SIZE is "AtLeast", so we can't use it to discard
+ // 32-bit configs completely from the selection. So it then comes to the sorting algorithm.
+ // The red/green/blue sizes have a sort priority of 3, so they are sorted by first. The sort
+ // order is special and described as "by larger _total_ number of color bits.". So EGL will
+ // put 32-bit configs in the list before the 16-bit configs. However, the spec also goes on
+ // to say "If the requested number of bits in attrib_list for a particular component is 0,
+ // then the number of bits for that component is not considered". This part of the spec also
+ // seems to imply that setting the red/green/blue bits to zero means none of the components
+ // are considered and EGL disregards the entire sorting rule. It then looks to the next
+ // highest priority rule, which is EGL_BUFFER_SIZE. Despite the selection criteria being
+ // "AtLeast" for EGL_BUFFER_SIZE, it's sort order is "smaller" meaning 16-bit configs are
+ // put in the list before 32-bit configs. So, to make sure 16-bit is preffered over 32-bit,
+ // we must set the red/green/blue sizes to zero. This has an unfortunate consequence that
+ // if the application sets the red/green/blue size to 5/6/5 on the QGLFormat, they will
+ // probably get a 32-bit config, even when there's an RGB565 config available. Oh well.
+
+ // Now normalize the values so -1 becomes 0
+ redSize = redSize > 0 ? redSize : 0;
+ greenSize = greenSize > 0 ? greenSize : 0;
+ blueSize = blueSize > 0 ? blueSize : 0;
+ alphaSize = alphaSize > 0 ? alphaSize : 0;
+ depthSize = depthSize > 0 ? depthSize : 0;
+ stencilSize = stencilSize > 0 ? stencilSize : 0;
+ sampleCount = sampleCount > 0 ? sampleCount : 0;
+ }
+
eglProperties.setValue(EGL_RED_SIZE, redSize);
eglProperties.setValue(EGL_GREEN_SIZE, greenSize);
eglProperties.setValue(EGL_BLUE_SIZE, blueSize);