summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Cooksey <thomas.cooksey@nokia.com>2010-03-10 09:16:23 (GMT)
committerTom Cooksey <thomas.cooksey@nokia.com>2010-03-10 09:53:07 (GMT)
commit620a584c20c1aeb059803dfe246597daaf4f4e04 (patch)
treefddcd80e795db9c99a56a2e2f0061f79dfd101df
parentb0eeab376fc9f698a899be6dbc4c1fe6cef02982 (diff)
downloadQt-620a584c20c1aeb059803dfe246597daaf4f4e04.zip
Qt-620a584c20c1aeb059803dfe246597daaf4f4e04.tar.gz
Qt-620a584c20c1aeb059803dfe246597daaf4f4e04.tar.bz2
Make sure 16-bit EGL configs are chosen over 32-bit configs
We want to make sure 16-bit configs are chosen over 32-bit configs as they will provide the best performance. The EGL config selection algorithm is a bit stange in this regard: The selection criteria for EGL_BUFFER_SIZE is "AtLeast", so we can't use it to discard 32-bit configs completely from the selection. So it then comes to the sorting algorithm. The red/green/blue sizes have a sort priority of 3, so they are sorted by first. The sort order is special and described as "by larger _total_ number of color bits.". So EGL will put 32-bit configs in the list before the 16-bit configs. However, the spec also goes on to say "If the requested number of bits in attrib_list for a particular component is 0, then the number of bits for that component is not considered". This part of the spec also seems to imply that setting the red/green/blue bits to zero means none of the components are considered and EGL disregards the entire sorting rule. It then looks to the next highest priority rule, which is EGL_BUFFER_SIZE. Despite the selection criteria being "AtLeast" for EGL_BUFFER_SIZE, it's sort order is "smaller" meaning 16-bit configs are put in the list before 32-bit configs. So, to make sure 16-bit is preffered over 32-bit, we must set the red/green/blue sizes to zero. This has an unfortunate consequence that if the application sets the red/green/blue size to 5/6/5 on the QGLFormat, they will probably get a 32-bit config, even when there's an RGB565 config avaliable. Oh well. Reviewed-By: TrustMe
-rw-r--r--src/opengl/qgl_egl.cpp83
-rw-r--r--src/opengl/qgl_x11egl.cpp4
2 files changed, 55 insertions, 32 deletions
diff --git a/src/opengl/qgl_egl.cpp b/src/opengl/qgl_egl.cpp
index 91b271b..3d146b7 100644
--- a/src/opengl/qgl_egl.cpp
+++ b/src/opengl/qgl_egl.cpp
@@ -53,35 +53,62 @@ QT_BEGIN_NAMESPACE
void qt_eglproperties_set_glformat(QEglProperties& eglProperties, const QGLFormat& glFormat)
{
- // NOTE: QGLFormat uses a magic value of -1 to indicate "don't care", even when a buffer of that
- // type has been requested.
- if (glFormat.depth()) {
- int depthSize = glFormat.depthBufferSize();
- eglProperties.setValue(EGL_DEPTH_SIZE, depthSize == -1 ? 1 : depthSize);
+ int redSize = glFormat.redBufferSize();
+ int greenSize = glFormat.greenBufferSize();
+ int blueSize = glFormat.blueBufferSize();
+ int alphaSize = glFormat.alphaBufferSize();
+ int depthSize = glFormat.depthBufferSize();
+ int stencilSize = glFormat.stencilBufferSize();
+ int sampleCount = glFormat.samples();
+
+ // QGLFormat uses a magic value of -1 to indicate "don't care", even when a buffer of that
+ // type has been requested. So we must check QGLFormat's booleans too if size is -1:
+ if (glFormat.alpha() && alphaSize <= 0) {
+ qDebug("QGLFormat::alpha() returned true");
+ alphaSize = 1;
}
- if (glFormat.stencil()) {
- int stencilSize = glFormat.stencilBufferSize();
- eglProperties.setValue(EGL_STENCIL_SIZE, stencilSize == -1 ? 1 : stencilSize);
- }
- if (glFormat.sampleBuffers()) {
- int sampleCount = glFormat.samples();
- eglProperties.setValue(EGL_SAMPLES, sampleCount == -1 ? 1 : sampleCount);
- eglProperties.setValue(EGL_SAMPLE_BUFFERS, 1);
- }
- if (glFormat.alpha()) {
- int alphaSize = glFormat.alphaBufferSize();
- eglProperties.setValue(EGL_ALPHA_SIZE, alphaSize == -1 ? 1 : alphaSize);
- }
-
- int redSize = glFormat.redBufferSize();
- int greenSize = glFormat.greenBufferSize();
- int blueSize = glFormat.blueBufferSize();
- int alphaSize = glFormat.alphaBufferSize();
-
- eglProperties.setValue(EGL_RED_SIZE, redSize > 0 ? redSize : 1);
- eglProperties.setValue(EGL_GREEN_SIZE, greenSize > 0 ? greenSize : 1);
- eglProperties.setValue(EGL_BLUE_SIZE, blueSize > 0 ? blueSize : 1);
- eglProperties.setValue(EGL_ALPHA_SIZE, alphaSize > 0 ? alphaSize : 0);
+ if (glFormat.depth() && depthSize <= 0)
+ depthSize = 1;
+ if (glFormat.stencil() && stencilSize <= 0)
+ stencilSize = 1;
+ if (glFormat.sampleBuffers() && sampleCount <= 0)
+ sampleCount = 1;
+
+ // We want to make sure 16-bit configs are chosen over 32-bit configs as they will provide
+ // the best performance. The EGL config selection algorithm is a bit stange in this regard:
+ // The selection criteria for EGL_BUFFER_SIZE is "AtLeast", so we can't use it to discard
+ // 32-bit configs completely from the selection. So it then comes to the sorting algorithm.
+ // The red/green/blue sizes have a sort priority of 3, so they are sorted by first. The sort
+ // order is special and described as "by larger _total_ number of color bits.". So EGL will
+ // put 32-bit configs in the list before the 16-bit configs. However, the spec also goes on
+ // to say "If the requested number of bits in attrib_list for a particular component is 0,
+ // then the number of bits for that component is not considered". This part of the spec also
+ // seems to imply that setting the red/green/blue bits to zero means none of the components
+ // are considered and EGL disregards the entire sorting rule. It then looks to the next
+ // highest priority rule, which is EGL_BUFFER_SIZE. Despite the selection criteria being
+ // "AtLeast" for EGL_BUFFER_SIZE, it's sort order is "smaller" meaning 16-bit configs are
+ // put in the list before 32-bit configs. So, to make sure 16-bit is preffered over 32-bit,
+ // we must set the red/green/blue sizes to zero. This has an unfortunate consequence that
+ // if the application sets the red/green/blue size to 5/6/5 on the QGLFormat, they will
+ // probably get a 32-bit config, even when there's an RGB565 config avaliable. Oh well.
+
+ // Now normalize the values so -1 becomes 0
+ redSize = redSize > 0 ? redSize : 0;
+ greenSize = greenSize > 0 ? greenSize : 0;
+ blueSize = blueSize > 0 ? blueSize : 0;
+ alphaSize = alphaSize > 0 ? alphaSize : 0;
+ depthSize = depthSize > 0 ? depthSize : 0;
+ stencilSize = stencilSize > 0 ? stencilSize : 0;
+ sampleCount = sampleCount > 0 ? sampleCount : 0;
+
+ eglProperties.setValue(EGL_RED_SIZE, redSize);
+ eglProperties.setValue(EGL_GREEN_SIZE, greenSize);
+ eglProperties.setValue(EGL_BLUE_SIZE, blueSize);
+ eglProperties.setValue(EGL_ALPHA_SIZE, alphaSize);
+ eglProperties.setValue(EGL_DEPTH_SIZE, depthSize);
+ eglProperties.setValue(EGL_STENCIL_SIZE, stencilSize);
+ eglProperties.setValue(EGL_SAMPLES, sampleCount);
+ eglProperties.setValue(EGL_SAMPLE_BUFFERS, sampleCount ? 1 : 0);
}
diff --git a/src/opengl/qgl_x11egl.cpp b/src/opengl/qgl_x11egl.cpp
index 81eb35c..a7c92cf 100644
--- a/src/opengl/qgl_x11egl.cpp
+++ b/src/opengl/qgl_x11egl.cpp
@@ -198,10 +198,6 @@ bool QGLContext::chooseContext(const QGLContext* shareContext)
configProps.setRenderableType(QEgl::OpenGL);
qt_eglproperties_set_glformat(configProps, d->glFormat);
- // Use EGL_BUFFER_SIZE to make sure we prefer a 16-bit config over a 32-bit config
- if (device()->depth() == 16 && !d->glFormat.alpha())
- configProps.setValue(EGL_BUFFER_SIZE, 16);
-
if (!d->eglContext->chooseConfig(configProps, QEgl::BestPixelFormat)) {
delete d->eglContext;
d->eglContext = 0;