# OpenGL X11 + OpenGL causes segfault

## Recommended Posts

I'm having issues getting X11 to work with OpenGL. I've tried everything! It used to work with GLFW but I'm trying to get it to work with GLX instead, and Win32 currently works, so this is very system-dependent. Why does the program segfault when it reaches either glClear, glClearColor, or glGetString? Thanks in advance!

This all lies inside a function, Initialize(), of a class X11Window. There is barely any code that happens before. I have NVidia drivers installed.

display = XOpenDisplay(NULL);
if (display == NULL) {
std::cout << "Could not open display\n";
return 1;
}
screen = DefaultScreenOfDisplay(display);
screenId = DefaultScreen(display);

// Check GLX version
GLint majorGLX, minorGLX = 0;
glXQueryVersion(display, &majorGLX, &minorGLX);
if (majorGLX <= 1 && minorGLX < 2) {
std::cout << "GLX 1.2 or greater is required.\n";
XCloseDisplay(display);
return 1;
}
else {
std::cout << "GLX client version: " << glXGetClientString(display, GLX_VERSION) << '\n';
std::cout << "GLX client vendor: " << glXGetClientString(display, GLX_VENDOR) << "\n";
std::cout << "GLX client extensions:\n\t" << glXGetClientString(display, GLX_EXTENSIONS) << "\n";

std::cout << "GLX server version: " << glXQueryServerString(display, screenId, GLX_VERSION) << "\n";
std::cout << "GLX server vendoe: " << glXQueryServerString(display, screenId, GLX_VENDOR) << "\n";
std::cout << "GLX server extensions:\n\t " << glXQueryServerString(display, screenId, GLX_EXTENSIONS) << "\n";
}

GLint glxAttribs[] = {
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
None
};

int fbcount;
GLXFBConfig* fbc = glXChooseFBConfig(display, screenId, glxAttribs, &fbcount);
if (fbc == 0) {
std::cout << "Failed to retrieve framebuffer.\n";
XCloseDisplay(display);
return 1;
}
std::cout << "Found " << fbcount << " matching framebuffers.\n";

// Pick the FB config/visual with the most samples per pixel
std::cout << "Getting best XVisualInfo\n";
int best_fbc = -1, worst_fbc = -1, best_num_samp = -1, worst_num_samp = 999;
for (int i = 0; i < fbcount; ++i) {
XVisualInfo *vi = glXGetVisualFromFBConfig( display, fbc[i] );
if ( vi != 0) {
int samp_buf, samples;
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLE_BUFFERS, &samp_buf );
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLES , &samples );
//std::cout << " Matching fbconfig " << i << ", SAMPLE_BUFFERS = " << samp_buf << ", SAMPLES = " << samples << ".\n";

if ( best_fbc < 0 || (samp_buf && samples > best_num_samp) ) {
best_fbc = i;
best_num_samp = samples;
}
if ( worst_fbc < 0 || !samp_buf || samples < worst_num_samp )
worst_fbc = i;
worst_num_samp = samples;
}
XFree( vi );
}
std::cout << "Best visual info index: " << best_fbc << "\n";
GLXFBConfig bestFbc = fbc[ best_fbc ];
XFree( fbc ); // Make sure to free this!

XVisualInfo* visual = glXGetVisualFromFBConfig( display, bestFbc );

if (visual == 0) {
std::cout << "Could not create correct visual window.\n";
XCloseDisplay(display);
return 1;
}

if (screenId != visual->screen) {
std::cout << "screenId(" << screenId << ") does not match visual->screen(" << visual->screen << ").\n";
XCloseDisplay(display);
return 1;

}

// Open the window
XSetWindowAttributes windowAttribs;
windowAttribs.border_pixel = BlackPixel(display, screenId);
windowAttribs.background_pixel = WhitePixel(display, screenId);
windowAttribs.override_redirect = True;
windowAttribs.colormap = XCreateColormap(display, RootWindow(display, screenId), visual->visual, AllocNone);
window = XCreateWindow(display, RootWindow(display, screenId), 0, 0, game.settings.resolution.x, game.settings.resolution.y, 0, visual->depth, InputOutput, visual->visual, CWBackPixel | CWColormap | CWBorderPixel | CWEventMask, &windowAttribs);

// Create GLX OpenGL context
glXCreateContextAttribsARBProc glXCreateContextAttribsARB = 0;
glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc) glXGetProcAddressARB( (const GLubyte *) "glXCreateContextAttribsARB" );

const char *glxExts = glXQueryExtensionsString( display, screenId );
std::cout << "Late extensions:\n\t" << glxExts << "\n\n";
if (glXCreateContextAttribsARB == 0) {
}

int context_attribs[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 3,
GLX_CONTEXT_FLAGS_ARB, 0,
None
};

GLXContext context = 0;
if (!isExtensionSupported( glxExts, "GLX_ARB_create_context")) {
context = glXCreateNewContext( display, bestFbc, GLX_RGBA_TYPE, 0, True );
}
else {
context = glXCreateContextAttribsARB( display, bestFbc, 0, true, context_attribs );
}
XSync( display, False );

// Verifying that context is a direct context
if (!glXIsDirect (display, context)) {
std::cout << "Indirect GLX rendering context obtained\n";
}
else {
std::cout << "Direct GLX rendering context obtained\n";
}
glXMakeCurrent(display, window, context);

std::cout << "GL Vendor: " << glGetString(GL_VENDOR) << "\n";
std::cout << "GL Renderer: " << glGetString(GL_RENDERER) << "\n";
std::cout << "GL Version: " << glGetString(GL_VERSION) << "\n";

// Show the window
XClearWindow(display, window);
XMapRaised(display, window);

// Set GL Sample stuff
glClearColor(0.5f, 0.6f, 0.7f, 1.0f);
game.HandleMessage(ENGINE_MESSAGE_CONSOLE, ENGINE_MESSAGE_OUTPUT, "TEST:ClearColor...\n");

// Resize window
/*unsigned int change_values = CWWidth | CWHeight;
XWindowChanges values;
values.width = game.settings.resolution.x;
values.height = game.settings.resolution.y;
XConfigureWindow(display, window, change_values, &values);*/

// Enter message loop
while (true) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}

// Cleanup
XDestroyWindow(display, window);
XCloseDisplay(display);

game.HandleMessage(ENGINE_MESSAGE_CONSOLE, ENGINE_MESSAGE_OUTPUT, "TEST: X11 Window Created...\n");
return true;

##### Share on other sites

glXMakeCurrent also has a return-code, like the single one you don't check.. :)

Probably the context isn't properly made current there.

If it is, try getting the function pointers for the normal GL functions instead of relying on them by linking.

##### Share on other sites

glXMakeCurrent also has a return-code, like the single one you don't check.. :)

Probably the context isn't properly made current there.

If it is, try getting the function pointers for the normal GL functions instead of relying on them by linking.

Thanks for the input Erik! It returned true, but at least now I know of a place to test for any future bugs :P So it's still not working. Any other ideas?

##### Share on other sites

Did you try glXGetProcAddress to get the glGetString function pointer instead of calling it by its prototype?

(If I understand correctly and the first glGetString call to get GL_VENDOR crashes)

##### Share on other sites

Did you try glXGetProcAddress to get the glGetString function pointer instead of calling it by its prototype?

(If I understand correctly and the first glGetString call to get GL_VENDOR crashes)

That wouldn't explain the issue with glClearColor or glClear, though, right? :S

##### Share on other sites

Should be very easy to test, and it might, though I'm not sure I understand exactly what happens.

Does it always crash every time on the first glGetString if that call is there, and crashes on glClearColor instead if you comment out the glGetString?

Or do you  mean that it randomly crashes on some GL call somewhere, but sometimes executes glGetString and properly prints it before crashing on glClearColor?

If it always crashes on the first GL call whatever that may be when MakeCurrent returns true it seems likely it's something with the function not calling the proper driver.. though just a guess.

If it is random it seems more likely that some memory corruption leads to a crash at some random point in the future or something.

You might also want to check your std::cout as I believe glGetString returns unsigned byte pointers.. if that is the case then they won't print a string (though it shouldn't crash as they should just print an address instead.. but you could cast them to char* and see if something changes).

##### Share on other sites

Erik, I mean it crashes on the first OpenGL call. I'm going to look into it the mis-return of MakeCurrent, though I have no idea why that would be a problem :S

##### Share on other sites

Again, I suggest then to do glXGetProcAddress("glGetString") and checking so that the function pointer returned is not null, and if it isn't then calling through that instead of using the regular glGetString. (Provided glXGetProcAddress doesn't crash instead, if it does, reinstall the driver).

And first maybe try glXMakeContextCurrent instead of glXMakeCurrent.

Edited by Erik Rufelt

##### Share on other sites

Also I see now that you don't seem to actually check that 'context' returned isn't NULL when you create it. MakeCurrent could return True on a NULL context by just disabling the thread context.

Edited by Erik Rufelt

##### Share on other sites
Tried this, didn't call. It seems literally everything is running fine until the gl calls -_-
if (context == NULL) {
std::cout << "Null Context!" << std::endl;
exit (0);
}

##### Share on other sites

And if you meant replacing

if (!glXMakeCurrent(display, window, context))

with this:

if (!glXMakeContextCurrent(display, window, window, context))

Then it didn't work :/

##### Share on other sites

Try printing it like std::cout << "context = " << std::hex << (void*)context..

Also the function pointers are not supposed to be true, if they just return that then you probably have a null context..

Print them the same.. << std::hex << (void*)glXGetProcAddress("glGetString").. it should print a proper pointer and not '1' or anything like that..

##### Share on other sites

Where did you call that glXGetProcAddress? Make sure it works in the same place as where you do the call to glClear.

##### Share on other sites

Where did you call that glXGetProcAddress? Make sure it works in the same place as where you do the call to glClear.

Sponji I put it right before either the glGetString or the glClear, depending on the one I was testing.

##### Share on other sites

Yeah, but those functions work? Try it where it crashes, because then it would probably give you null pointers, which would mean that the context is not made current.

Edit: Or what, do those work or not? Not sure if you've made that clear yet. Do the gl functions work right after creating the context?

Edited by Sponji

##### Share on other sites

Yeah, but those functions work? Try it where it crashes, because then it would probably give you null pointers, which would mean that the context is not made current.

Edit: Or what, do those work or not? Not sure if you've made that clear yet. Do the gl functions work right after creating the context?

Try printing it like std::cout << "context = " << std::hex << (void*)context..

Also the function pointers are not supposed to be true, if they just return that then you probably have a null context..

Print them the same.. << std::hex << (void*)glXGetProcAddress("glGetString").. it should print a proper pointer and not '1' or anything like that..

Sponji and Erik, context, and the glXGetProcAdress of both glGetString and glClear all return proper hex values. When I said they return true, I meant I put them in an if condition and they returned a value, but I did as Erik said, to no avail. Thanks again guys! Do you have any other suggestions? This really makes no sense and I'm starting to miss Win32 of all things! XD

EDIT: Sponji, they still cause segfaults. I'm saying when I write this:

...

std::cout << "Result = " << std::hex << (void*)glXGetProcAddress((const GLubyte *)"glClear") << std::endl;

glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

...

...it still crashes :/

Edit 2: I tried an iteration of this earlier today without any of the other code and it worked, but NO code is being called before this :/

Edited by KarimIO

##### Share on other sites

Try something like this, as first GL call instead, see if it prints something or if that also crashes:

typedef const GLubyte *(APIENTRYP PFNGLGETSTRINGPROC) (GLenum name);

...

std::cout << (const char*)pfnglGetString(GL_VENDOR) << std::endl;


(Btw did you reboot after you installed the driver?)

Try running some OpenGL demo app or something.

Edited by Erik Rufelt

##### Share on other sites

Strangely enough, (const char*)pfnglGetString(GL_VENDOR) doesn't return anything.

And by the way, I didn't install the driver today, I installed it a while ago. It's updated though.

##### Share on other sites

It doesn't crash?
Try using the pfnglgetString function pointer to get the other strings as well, and check the return values for null as well to make sure any crash is actually in the function call and not in using the return value. And then get glClearColor and glClear through glXGetProcAddress as well.

##### Share on other sites

It doesn't crash?
Try using the pfnglgetString function pointer to get the other strings as well, and check the return values for null as well to make sure any crash is actually in the function call and not in using the return value. And then get glClearColor and glClear through glXGetProcAddress as well.

Okay, it works,apparently, but I can't exactly just use

pfnglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

instead of glClear O_O Any ideas?

##### Share on other sites

Okay, it works,apparently, but I can't exactly just use

Sure you can..

If you're going to use GL3 you will have to load a whole bunch of functions anyway. GLEW loads all of them for you, so if you use that perhaps it will fix everything for you.

Otherwise, try downing the context version to 2.0 or set the compatibility profile or something and see if that works, or just use the "old method" to create the context from before the whole context with attribs thing in GL3...

If you want some of the reasonably new GL functionality get glcorearb.h from the OpenGL registry and it has the function pointer definitions already so you don't have to typedef them yourself for the older functions.

Anyway.. strongly recommend getting GLEW to avoid the hassle.

Edited by Erik Rufelt

##### Share on other sites

Yeah I actually JUST connected the InitializeWindow to the rest of my game engine. It works now, though I'm getting some crazy junk for the mouse pointer so the game is spinning like crazy XD And yeah, I moved from GLEW to GL3W long ago because GLEW is outdated and requires Experimental :/

Thanks again to both of you. I can't thank you enough, really!!

## Create an account

Register a new account

• ### Forum Statistics

• Total Topics
628285
• Total Posts
2981837
• ### Similar Content

• By mellinoe
Hi all,
First time poster here, although I've been reading posts here for quite a while. This place has been invaluable for learning graphics programming -- thanks for a great resource!
Right now, I'm working on a graphics abstraction layer for .NET which supports D3D11, Vulkan, and OpenGL at the moment. I have implemented most of my planned features already, and things are working well. Some remaining features that I am planning are Compute Shaders, and some flavor of read-write shader resources. At the moment, my shaders can just get simple read-only access to a uniform (or constant) buffer, a texture, or a sampler. Unfortunately, I'm having a tough time grasping the distinctions between all of the different kinds of read-write resources that are available. In D3D alone, there seem to be 5 or 6 different kinds of resources with similar but different characteristics. On top of that, I get the impression that some of them are more or less "obsoleted" by the newer kinds, and don't have much of a place in modern code. There seem to be a few pivots:
The data source/destination (buffer or texture) Read-write or read-only Structured or unstructured (?) Ordered vs unordered (?) These are just my observations based on a lot of MSDN and OpenGL doc reading. For my library, I'm not interested in exposing every possibility to the user -- just trying to find a good "middle-ground" that can be represented cleanly across API's which is good enough for common scenarios.
Can anyone give a sort of "overview" of the different options, and perhaps compare/contrast the concepts between Direct3D, OpenGL, and Vulkan? I'd also be very interested in hearing how other folks have abstracted these concepts in their libraries.
• By aejt
I recently started getting into graphics programming (2nd try, first try was many years ago) and I'm working on a 3d rendering engine which I hope to be able to make a 3D game with sooner or later. I have plenty of C++ experience, but not a lot when it comes to graphics, and while it's definitely going much better this time, I'm having trouble figuring out how assets are usually handled by engines.
I'm not having trouble with handling the GPU resources, but more so with how the resources should be defined and used in the system (materials, models, etc).
This is my plan now, I've implemented most of it except for the XML parts and factories and those are the ones I'm not sure of at all:
I have these classes:
For GPU resources:
Geometry: holds and manages everything needed to render a geometry: VAO, VBO, EBO. Texture: holds and manages a texture which is loaded into the GPU. Shader: holds and manages a shader which is loaded into the GPU. For assets relying on GPU resources:
Material: holds a shader resource, multiple texture resources, as well as uniform settings. Mesh: holds a geometry and a material. Model: holds multiple meshes, possibly in a tree structure to more easily support skinning later on? For handling GPU resources:
ResourceCache<T>: T can be any resource loaded into the GPU. It owns these resources and only hands out handles to them on request (currently string identifiers are used when requesting handles, but all resources are stored in a vector and each handle only contains resource's index in that vector) Resource<T>: The handles given out from ResourceCache. The handles are reference counted and to get the underlying resource you simply deference like with pointers (*handle).
And my plan is to define everything into these XML documents to abstract away files:
Resources.xml for ref-counted GPU resources (geometry, shaders, textures) Resources are assigned names/ids and resource files, and possibly some attributes (what vertex attributes does this geometry have? what vertex attributes does this shader expect? what uniforms does this shader use? and so on) Are reference counted using ResourceCache<T> Assets.xml for assets using the GPU resources (materials, meshes, models) Assets are not reference counted, but they hold handles to ref-counted resources. References the resources defined in Resources.xml by names/ids. The XMLs are loaded into some structure in memory which is then used for loading the resources/assets using factory classes:
Factory classes for resources:
For example, a texture factory could contain the texture definitions from the XML containing data about textures in the game, as well as a cache containing all loaded textures. This means it has mappings from each name/id to a file and when asked to load a texture with a name/id, it can look up its path and use a "BinaryLoader" to either load the file and create the resource directly, or asynchronously load the file's data into a queue which then can be read from later to create the resources synchronously in the GL context. These factories only return handles.
Factory classes for assets:
Much like for resources, these classes contain the definitions for the assets they can load. For example, with the definition the MaterialFactory will know which shader, textures and possibly uniform a certain material has, and with the help of TextureFactory and ShaderFactory, it can retrieve handles to the resources it needs (Shader + Textures), setup itself from XML data (uniform values), and return a created instance of requested material. These factories return actual instances, not handles (but the instances contain handles).

Is this a good or commonly used approach? Is this going to bite me in the ass later on? Are there other more preferable approaches? Is this outside of the scope of a 3d renderer and should be on the engine side? I'd love to receive and kind of advice or suggestions!
Thanks!
• By nedondev
I 'm learning how to create game by using opengl with c/c++ coding, so here is my fist game. In video description also have game contain in Dropbox. May be I will make it better in future.
Thanks.

• So I've recently started learning some GLSL and now I'm toying with a POM shader. I'm trying to optimize it and notice that it starts having issues at high texture sizes, especially with self-shadowing.
Now I know POM is expensive either way, but would pulling the heightmap out of the normalmap alpha channel and in it's own 8bit texture make doing all those dozens of texture fetches more cheap? Or is everything in the cache aligned to 32bit anyway? I haven't implemented texture compression yet, I think that would help? But regardless, should there be a performance boost from decoupling the heightmap? I could also keep it in a lower resolution than the normalmap if that would improve performance.
Any help is much appreciated, please keep in mind I'm somewhat of a newbie. Thanks!

• Hi,
I'm trying to learn OpenGL through a website and have proceeded until this page of it. The output is a simple triangle. The problem is the complexity.
I have read that page several times and tried to analyse the code but I haven't understood the code properly and completely yet. This is the code: