• Advertisement
Sign in to follow this  

OpenGL glTexSubImage2D() performance

This topic is 2615 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

I'm trying to implement a 2D GUI system for my OpenGL test program. I have the following definitions:

#define E_RMASK 0x00ff0000
#define E_RSHIFT 16
#define E_GMASK 0x0000ff00
#define E_GSHIFT 8
#define E_BMASK 0x000000ff
#define E_BSHIFT 0
#define E_AMASK 0xff000000
#define E_ASHIFT 24
#define E_CFMT GL_BGRA

typedef struct {
int32_t x;
int32_t y;
int32_t w;
int32_t h;
} Erect_t;

typedef struct {
int32_t w;
int32_t h;
uint8_t bpp;
uint32_t fmt;
void *d;

/* private members */
bool dirty;
Erect_t clip;
} Eimage_t;

typedef struct {
Eimage_t *img;
GLuint id;
} Etexture_t;

static inline uint32_t pixel_blend( uint32_t dst, uint32_t src )
uint32_t as = ( src & E_AMASK ) >> E_ASHIFT;
uint32_t ad = ( dst & E_AMASK ) >> E_ASHIFT;
uint32_t a = min( as + ad, 0x000000ff );

if ( !as ) return dst;

#define RGB_MASK ( ~E_AMASK )
#define RB_MASK ( E_RMASK | E_BMASK )
#define AG_MASK ( E_AMASK | E_GMASK )

uint32_t rb = ( ( ( src & RB_MASK ) * as ) + ( ( dst & RB_MASK ) * ( 0xff - as ) ) ) & AG_MASK;
uint32_t g = ( ( ( src & E_GMASK ) * as ) + ( ( dst & E_GMASK ) * ( 0xff - as ) ) ) & E_RMASK;

#undef RB_MASK
#undef AG_MASK

return ( ( a << E_ASHIFT ) & E_AMASK ) | ( ( ( rb | g ) >> 8 ) & RGB_MASK );

uint32_t engineMapColor( uint8_t r, uint8_t g, uint8_t b, uint8_t a )
return ( ( ( uint32_t )r & 0x000000ff ) << E_RSHIFT ) +
( ( ( uint32_t )g & 0x000000ff ) << E_GSHIFT ) +
( ( ( uint32_t )b & 0x000000ff ) << E_BSHIFT ) +
( ( ( uint32_t )a & 0x000000ff ) << E_ASHIFT );

Eimage_t *engineImageCreate( uint32_t w, uint32_t h )
Eimage_t *ret = ( Eimage_t * )calloc( 1, sizeof( Eimage_t ) );

ret->w = w;
ret->h = h;
ret->bpp = 4;
ret->fmt = engineState()->Cfmt;
ret->d = calloc( 4, w * h );
img->clip.x = 0;
img->clip.y = 0;
img->clip.w = img->w;
img->clip.h = img->h;

return ret;
void engineImageFillRect( Eimage_t *img, const Erect_t *r, uint32_t color )
int32_t start_x = min( ( int32_t )img->w, ( r ? max( r->x, img->clip.x ) : 0 ) );
int32_t start_y = min( ( int32_t )img->h, ( r ? max( r->y, img->clip.y ) : 0 ) );
int32_t end_x = ( r ? min( ( int32_t )img->clip.w, r->x + r->w ) : ( int32_t )img->clip.w );
int32_t end_y = ( r ? min( ( int32_t )img->clip.h, r->y + r->h ) : ( int32_t )img->clip.h );
uint32_t *data = ( uint32_t * )img->d + start_y * img->w;

for ( int32_t j = start_y; j < end_y; j++ ) {
int32_t i = end_x - start_x;
int32_t c = ( i + 7 ) / 8;
switch ( i % 8 ) {
case 0: do { { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 7: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 6: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 5: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 4: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 3: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 2: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; }
case 1: { data[start_x + i] = pixel_blend( data[start_x + i], color ); i -= 1; } } while ( --c > 0 );
data += img->w;

img->dirty = true;

Etexture_t *engineTextureCreate( uint32_t w, uint32_t h )
Etexture_t *ret = ( Etexture_t * )calloc( 1, sizeof( Etexture_t ) );
ret->img = engineImageCreate( w, h );

glGenTextures( 1, &ret->id );
glBindTexture( GL_TEXTURE_2D, ret->id );
glTexImage2D( GL_TEXTURE_2D, 0, ret->img->bpp, ret->img->w, ret->img->h, 0, ret->img->fmt, GL_UNSIGNED_BYTE, ret->img->d );

return ret;

void engineTextureBind( Etexture_t *tex )
if ( tex ) {
glBindTexture( GL_TEXTURE_2D, tex->id );
if ( tex->img->dirty ) {
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, tex->img->w, tex->img->h, tex->img->fmt, GL_UNSIGNED_BYTE, tex->img->d );
tex->img->dirty = false;

Then I have (Estate_t is a structure holding various flags, the width/height of the window, etc. I'm not pasting it here since it's pretty big):
static void begin_2d_overlay( Estate_t *st )
glDisable( GL_DEPTH_TEST );
glDisable( GL_LIGHTING );
glEnable( GL_BLEND );
if ( st->wireframe )
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL );

glMatrixMode( GL_PROJECTION );
glOrtho( 0.0, st->wm_width, st->wm_height, 0.0, -1, 1 );

glMatrixMode( GL_MODELVIEW );

glColor3f( 1.0, 1.0, 1.0 );

static void end_2d_overlay( Estate_t *st )
glEnable( GL_DEPTH_TEST );
glEnable( GL_LIGHTING );
glDisable( GL_BLEND );

glMatrixMode( GL_PROJECTION );

glMatrixMode( GL_MODELVIEW );

if ( st->wireframe )
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE );

and my rendering function:

Etexture_t *overlay = engineTextureCreate( st->wm_width, st->wm_height );

static void render_function( Estate_t *st )
/* draw the 3D scene here */

begin_2d_overlay( st );

Erect_t r;
r.x = rand() % st->wm_width;
r.y = rand() % st->wm_height;
r.w = rand() % 300 + 1;
r.h = rand() % 300 + 1;
engineImageFillRect( overlay->img, &r, engineMapColor( rand() % 255, rand() % 255, rand() % 255, 128 ) );

overlay->img->dirty = false; /* this shouldn't be here, see below code block for explanation */

engineTextureBind( overlay );
glBegin( GL_QUADS );
glTexCoord2f( 0.0, 1.0 ); glVertex2f( 0.0, st->wm_height );
glTexCoord2f( 1.0, 1.0 ); glVertex2f( st->wm_width, st->wm_height );
glTexCoord2f( 1.0, 0.0 ); glVertex2f( st->wm_width, 0.0 );
glTexCoord2f( 0.0, 0.0 ); glVertex2f( 0.0, 0.0 );

end_2d_overlay( st );

/* more stuff like swapping OpenGL buffers, etc. */

Notice the "overlay->img->dirty = false;" line. When uncommented it causes engineTextureBind() to not call glTexSubImage2D() which in turn causes my application to jump from rendering ~360 frames per second to rendering ~3600 frames per second (yes, 10 times more). I only added that line to see if the slowdown is caused by my rectangle drawing code or something else. After multiple tries I managed to isolate glTexSubImage() as the cause of the slow-down.

My questions are:
(1) is this anywhere near the proper way to do a 2D GUI with OpenGL ?
(2) am I doing something wrong that causes glTexSubImage() to run that slow ?
(3) should I be using something else than glTexSubImage() to update the GUI ?

[Edited by - ileonte on November 21, 2010 4:29:57 AM]

Share this post

Link to post
Share on other sites
The difference between 360fps and 3600fps is only 2ms render time. That is not much. Don't use fps to measure performance, use seconds per frame instead.
1) No, you should do all the rendering in OpenGL. Currently you are doing a lot of computation on the CPU and then sending huge amount of pixel data to the GPU. I don't know at what resolution you are working but the situation will become worth on higher resolutions. At some point even the CPU might become the bottleneck.
2) I have not checked the code in details but using the format the GPU uses internally can improve performance. PBOs are also a good choice.
Fast Texture Transfers
3) Yes, render everything using OpenGL or use a library that does this, like Qt.

Share this post

Link to post
Share on other sites
The optimal glTex(Sub)Image2D parameters are:

internalformal: GL_RGBA
format: GL_BGRA
type: GL_UNSIGNED_INT_8_8_8_8_REV

Use these (and set up your source data to be happy with them) and you'll get good performance on all hardware with no stalls, no copying back to system memory to do the update, and likely no need to use a PBO (which may give you a lot of work for zero performance gain otherwise).

The reason why is that OpenGL stores your texture using 4 components irrespective of what you specify in your glTex(Sub)Image2D call - check the spec if you doubt me. Using any combination of parameters that doesn't match the real internal storage will mean that your driver will need to copy, expand, swizzle, fill in unspecified components and all the rest itself, and - in especially evil cases (Intel) - copy the texture data back to system memory.

People often choose suboptimal formats and internalformats such as GL_RGB (or GL_BGR) because they think it "saves memory" - it doesn't. OpenGL will always expand textures to 4 components, so not only does it not "save memory" but it also slows down your program.

A format of GL_BGRA (instead of GL_RGBA) can give up to a 6x increase in transfer speed on even recent NVIDIA hardware.

GL_UNSIGNED_INT_8_8_8_8_REV instead of GL_UNSIGNED_BYTE is optional but I recommend it as it can give a hint to some drivers that you're transferring 4-component texels and therefore put them into a more optimal special case path (Intel again).

Mandatory reading:
http://www.opengl.org/wiki/Common_Mistakes#Texture_upload_and_pixel_reads (all of it)
http://www.opengl.org/wiki/Common_Mistakes#Paletted_textures (the last sentence)

The best code structure for efficient use of glTexSubImage2D looks like this:
Identify what needs to be updated.
Update textures.
Draw the scene.
The worst looks like this:
Update subrect.
Draw stuff.
Update another subrect.
Draw stuff.
Update another subrect.
Draw stuff.
The latter will result in multiple pipeline stalls per frame as OpenGL will need to wait until you're finished drawing with the texture before it can update each next subrect.

Using these techniques I've had reports of an over 90x overall performance improvement from one user - not to be sniffed at.

[Edited by - mhagain on November 21, 2010 9:07:54 AM]

Share this post

Link to post
Share on other sites
Sign in to follow this  

  • Advertisement
  • Advertisement
  • Popular Now

  • Advertisement
  • Similar Content

    • By khawk
      We've just released all of the source code for the NeHe OpenGL lessons on our Github page at https://github.com/gamedev-net/nehe-opengl. code - 43 total platforms, configurations, and languages are included.
      Now operated by GameDev.net, NeHe is located at http://nehe.gamedev.net where it has been a valuable resource for developers wanting to learn OpenGL and graphics programming.

      View full story
    • By TheChubu
      The Khronos™ Group, an open consortium of leading hardware and software companies, announces from the SIGGRAPH 2017 Conference the immediate public availability of the OpenGL® 4.6 specification. OpenGL 4.6 integrates the functionality of numerous ARB and EXT extensions created by Khronos members AMD, Intel, and NVIDIA into core, including the capability to ingest SPIR-V™ shaders.
      SPIR-V is a Khronos-defined standard intermediate language for parallel compute and graphics, which enables content creators to simplify their shader authoring and management pipelines while providing significant source shading language flexibility. OpenGL 4.6 adds support for ingesting SPIR-V shaders to the core specification, guaranteeing that SPIR-V shaders will be widely supported by OpenGL implementations.
      OpenGL 4.6 adds the functionality of these ARB extensions to OpenGL’s core specification:
      GL_ARB_gl_spirv and GL_ARB_spirv_extensions to standardize SPIR-V support for OpenGL GL_ARB_indirect_parameters and GL_ARB_shader_draw_parameters for reducing the CPU overhead associated with rendering batches of geometry GL_ARB_pipeline_statistics_query and GL_ARB_transform_feedback_overflow_querystandardize OpenGL support for features available in Direct3D GL_ARB_texture_filter_anisotropic (based on GL_EXT_texture_filter_anisotropic) brings previously IP encumbered functionality into OpenGL to improve the visual quality of textured scenes GL_ARB_polygon_offset_clamp (based on GL_EXT_polygon_offset_clamp) suppresses a common visual artifact known as a “light leak” associated with rendering shadows GL_ARB_shader_atomic_counter_ops and GL_ARB_shader_group_vote add shader intrinsics supported by all desktop vendors to improve functionality and performance GL_KHR_no_error reduces driver overhead by allowing the application to indicate that it expects error-free operation so errors need not be generated In addition to the above features being added to OpenGL 4.6, the following are being released as extensions:
      GL_KHR_parallel_shader_compile allows applications to launch multiple shader compile threads to improve shader compile throughput WGL_ARB_create_context_no_error and GXL_ARB_create_context_no_error allow no error contexts to be created with WGL or GLX that support the GL_KHR_no_error extension “I’m proud to announce OpenGL 4.6 as the most feature-rich version of OpenGL yet. We've brought together the most popular, widely-supported extensions into a new core specification to give OpenGL developers and end users an improved baseline feature set. This includes resolving previous intellectual property roadblocks to bringing anisotropic texture filtering and polygon offset clamping into the core specification to enable widespread implementation and usage,” said Piers Daniell, chair of the OpenGL Working Group at Khronos. “The OpenGL working group will continue to respond to market needs and work with GPU vendors to ensure OpenGL remains a viable and evolving graphics API for all its customers and users across many vital industries.“
      The OpenGL 4.6 specification can be found at https://khronos.org/registry/OpenGL/index_gl.php. The GLSL to SPIR-V compiler glslang has been updated with GLSL 4.60 support, and can be found at https://github.com/KhronosGroup/glslang.
      Sophisticated graphics applications will also benefit from a set of newly released extensions for both OpenGL and OpenGL ES to enable interoperability with Vulkan and Direct3D. These extensions are named:
      GL_EXT_memory_object GL_EXT_memory_object_fd GL_EXT_memory_object_win32 GL_EXT_semaphore GL_EXT_semaphore_fd GL_EXT_semaphore_win32 GL_EXT_win32_keyed_mutex They can be found at: https://khronos.org/registry/OpenGL/index_gl.php
      Industry Support for OpenGL 4.6
      “With OpenGL 4.6 our customers have an improved set of core features available on our full range of OpenGL 4.x capable GPUs. These features provide improved rendering quality, performance and functionality. As the graphics industry’s most popular API, we fully support OpenGL and will continue to work closely with the Khronos Group on the development of new OpenGL specifications and extensions for our customers. NVIDIA has released beta OpenGL 4.6 drivers today at https://developer.nvidia.com/opengl-driver so developers can use these new features right away,” said Bob Pette, vice president, Professional Graphics at NVIDIA.
      "OpenGL 4.6 will be the first OpenGL release where conformant open source implementations based on the Mesa project will be deliverable in a reasonable timeframe after release. The open sourcing of the OpenGL conformance test suite and ongoing work between Khronos and X.org will also allow for non-vendor led open source implementations to achieve conformance in the near future," said David Airlie, senior principal engineer at Red Hat, and developer on Mesa/X.org projects.

      View full story
    • By _OskaR
      I have an OpenGL application but without possibility to wite own shaders.
      I need to perform small VS modification - is possible to do it in an alternative way? Do we have apps or driver modifictions which will catch the shader sent to GPU and override it?
    • By xhcao
      Does sync be needed to read texture content after access texture image in compute shader?
      My simple code is as below,
      glBindImageTexture(0, texture[0], 0, GL_FALSE, 3, GL_READ_ONLY, GL_R32UI);
      glBindImageTexture(1, texture[1], 0, GL_FALSE, 4, GL_WRITE_ONLY, GL_R32UI);
      glDispatchCompute(1, 1, 1);
      // Does sync be needed here?
      glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer);
                                     GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, texture[1], 0);
      glReadPixels(0, 0, kWidth, kHeight, GL_RED_INTEGER, GL_UNSIGNED_INT, outputValues);
      Compute shader is very simple, imageLoad content from texture[0], and imageStore content to texture[1]. Does need to sync after dispatchCompute?
    • By Jonathan2006
      My question: is it possible to transform multiple angular velocities so that they can be reinserted as one? My research is below:
      // This works quat quaternion1 = GEQuaternionFromAngleRadians(angleRadiansVector1); quat quaternion2 = GEMultiplyQuaternions(quaternion1, GEQuaternionFromAngleRadians(angleRadiansVector2)); quat quaternion3 = GEMultiplyQuaternions(quaternion2, GEQuaternionFromAngleRadians(angleRadiansVector3)); glMultMatrixf(GEMat4FromQuaternion(quaternion3).array); // The first two work fine but not the third. Why? quat quaternion1 = GEQuaternionFromAngleRadians(angleRadiansVector1); vec3 vector1 = GETransformQuaternionAndVector(quaternion1, angularVelocity1); quat quaternion2 = GEQuaternionFromAngleRadians(angleRadiansVector2); vec3 vector2 = GETransformQuaternionAndVector(quaternion2, angularVelocity2); // This doesn't work //quat quaternion3 = GEQuaternionFromAngleRadians(angleRadiansVector3); //vec3 vector3 = GETransformQuaternionAndVector(quaternion3, angularVelocity3); vec3 angleVelocity = GEAddVectors(vector1, vector2); // Does not work: vec3 angleVelocity = GEAddVectors(vector1, GEAddVectors(vector2, vector3)); static vec3 angleRadiansVector; vec3 angularAcceleration = GESetVector(0.0, 0.0, 0.0); // Sending it through one angular velocity later in my motion engine angleVelocity = GEAddVectors(angleVelocity, GEMultiplyVectorAndScalar(angularAcceleration, timeStep)); angleRadiansVector = GEAddVectors(angleRadiansVector, GEMultiplyVectorAndScalar(angleVelocity, timeStep)); glMultMatrixf(GEMat4FromEulerAngle(angleRadiansVector).array); Also how do I combine multiple angularAcceleration variables? Is there an easier way to transform the angular values?
  • Advertisement