Sign in to follow this  
KarimIO

OpenGL X11 + OpenGL causes segfault

Recommended Posts

I'm having issues getting X11 to work with OpenGL. I've tried everything! It used to work with GLFW but I'm trying to get it to work with GLX instead, and Win32 currently works, so this is very system-dependent. Why does the program segfault when it reaches either glClear, glClearColor, or glGetString? Thanks in advance!

This all lies inside a function, Initialize(), of a class X11Window. There is barely any code that happens before. I have NVidia drivers installed.

display = XOpenDisplay(NULL);
if (display == NULL) {
std::cout << "Could not open display\n";
return 1;
}
screen = DefaultScreenOfDisplay(display);
screenId = DefaultScreen(display);


// Check GLX version
GLint majorGLX, minorGLX = 0;
glXQueryVersion(display, &majorGLX, &minorGLX);
if (majorGLX <= 1 && minorGLX < 2) {
std::cout << "GLX 1.2 or greater is required.\n";
XCloseDisplay(display);
return 1;
}
else {
std::cout << "GLX client version: " << glXGetClientString(display, GLX_VERSION) << '\n';
std::cout << "GLX client vendor: " << glXGetClientString(display, GLX_VENDOR) << "\n";
std::cout << "GLX client extensions:\n\t" << glXGetClientString(display, GLX_EXTENSIONS) << "\n";

std::cout << "GLX server version: " << glXQueryServerString(display, screenId, GLX_VERSION) << "\n";
std::cout << "GLX server vendoe: " << glXQueryServerString(display, screenId, GLX_VENDOR) << "\n";
std::cout << "GLX server extensions:\n\t " << glXQueryServerString(display, screenId, GLX_EXTENSIONS) << "\n";
}

GLint glxAttribs[] = {
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
None
};

int fbcount;
GLXFBConfig* fbc = glXChooseFBConfig(display, screenId, glxAttribs, &fbcount);
if (fbc == 0) {
std::cout << "Failed to retrieve framebuffer.\n";
XCloseDisplay(display);
return 1;
}
std::cout << "Found " << fbcount << " matching framebuffers.\n";

// Pick the FB config/visual with the most samples per pixel
std::cout << "Getting best XVisualInfo\n";
int best_fbc = -1, worst_fbc = -1, best_num_samp = -1, worst_num_samp = 999;
for (int i = 0; i < fbcount; ++i) {
XVisualInfo *vi = glXGetVisualFromFBConfig( display, fbc[i] );
if ( vi != 0) {
int samp_buf, samples;
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLE_BUFFERS, &samp_buf );
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLES , &samples );
//std::cout << " Matching fbconfig " << i << ", SAMPLE_BUFFERS = " << samp_buf << ", SAMPLES = " << samples << ".\n";

if ( best_fbc < 0 || (samp_buf && samples > best_num_samp) ) {
best_fbc = i;
best_num_samp = samples;
}
if ( worst_fbc < 0 || !samp_buf || samples < worst_num_samp )
worst_fbc = i;
worst_num_samp = samples;
}
XFree( vi );
}
std::cout << "Best visual info index: " << best_fbc << "\n";
GLXFBConfig bestFbc = fbc[ best_fbc ];
XFree( fbc ); // Make sure to free this!

XVisualInfo* visual = glXGetVisualFromFBConfig( display, bestFbc );

if (visual == 0) {
std::cout << "Could not create correct visual window.\n";
XCloseDisplay(display);
return 1;
}

if (screenId != visual->screen) {
std::cout << "screenId(" << screenId << ") does not match visual->screen(" << visual->screen << ").\n";
XCloseDisplay(display);
return 1;

}

// Open the window
XSetWindowAttributes windowAttribs;
windowAttribs.border_pixel = BlackPixel(display, screenId);
windowAttribs.background_pixel = WhitePixel(display, screenId);
windowAttribs.override_redirect = True;
windowAttribs.colormap = XCreateColormap(display, RootWindow(display, screenId), visual->visual, AllocNone);
windowAttribs.event_mask = ExposureMask | KeyPressMask | KeyReleaseMask | KeymapStateMask | PointerMotionMask | ButtonPressMask | ButtonReleaseMask | EnterWindowMask | LeaveWindowMask;
window = XCreateWindow(display, RootWindow(display, screenId), 0, 0, game.settings.resolution.x, game.settings.resolution.y, 0, visual->depth, InputOutput, visual->visual, CWBackPixel | CWColormap | CWBorderPixel | CWEventMask, &windowAttribs);

// Create GLX OpenGL context
glXCreateContextAttribsARBProc glXCreateContextAttribsARB = 0;
glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc) glXGetProcAddressARB( (const GLubyte *) "glXCreateContextAttribsARB" );

const char *glxExts = glXQueryExtensionsString( display, screenId );
std::cout << "Late extensions:\n\t" << glxExts << "\n\n";
if (glXCreateContextAttribsARB == 0) {
std::cout << "glXCreateContextAttribsARB() not found.\n";
}

int context_attribs[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 3,
GLX_CONTEXT_FLAGS_ARB, 0,
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
None
};

GLXContext context = 0;
if (!isExtensionSupported( glxExts, "GLX_ARB_create_context")) {
context = glXCreateNewContext( display, bestFbc, GLX_RGBA_TYPE, 0, True );
}
else {
context = glXCreateContextAttribsARB( display, bestFbc, 0, true, context_attribs );
}
XSync( display, False );

// Verifying that context is a direct context
if (!glXIsDirect (display, context)) {
std::cout << "Indirect GLX rendering context obtained\n";
}
else {
std::cout << "Direct GLX rendering context obtained\n";
}
glXMakeCurrent(display, window, context);

game.HandleMessage(ENGINE_MESSAGE_CONSOLE, ENGINE_MESSAGE_OUTPUT, "TEST: Made Current...\n");


std::cout << "GL Vendor: " << glGetString(GL_VENDOR) << "\n";
std::cout << "GL Renderer: " << glGetString(GL_RENDERER) << "\n";
std::cout << "GL Version: " << glGetString(GL_VERSION) << "\n";
std::cout << "GL Shading Language: " << glGetString(GL_SHADING_LANGUAGE_VERSION) << "\n";


// Show the window
XClearWindow(display, window);
XMapRaised(display, window);
game.HandleMessage(ENGINE_MESSAGE_CONSOLE, ENGINE_MESSAGE_OUTPUT, "TEST: Made ClearWindow...\n");

// Set GL Sample stuff
glClearColor(0.5f, 0.6f, 0.7f, 1.0f);
game.HandleMessage(ENGINE_MESSAGE_CONSOLE, ENGINE_MESSAGE_OUTPUT, "TEST:ClearColor...\n");


// Resize window
/*unsigned int change_values = CWWidth | CWHeight;
XWindowChanges values;
values.width = game.settings.resolution.x;
values.height = game.settings.resolution.y;
XConfigureWindow(display, window, change_values, &values);*/


// Enter message loop
while (true) {
ReadEvents();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}

// Cleanup
XDestroyWindow(display, window);
XCloseDisplay(display);

game.HandleMessage(ENGINE_MESSAGE_CONSOLE, ENGINE_MESSAGE_OUTPUT, "TEST: X11 Window Created...\n");
return true;

Share this post


Link to post
Share on other sites

glXMakeCurrent also has a return-code, like the single one you don't check.. :)

Probably the context isn't properly made current there.

If it is, try getting the function pointers for the normal GL functions instead of relying on them by linking.

Share this post


Link to post
Share on other sites

glXMakeCurrent also has a return-code, like the single one you don't check.. :)

Probably the context isn't properly made current there.

If it is, try getting the function pointers for the normal GL functions instead of relying on them by linking.

 

Thanks for the input Erik! It returned true, but at least now I know of a place to test for any future bugs :P So it's still not working. Any other ideas?

Share this post


Link to post
Share on other sites

Did you try glXGetProcAddress to get the glGetString function pointer instead of calling it by its prototype?

(If I understand correctly and the first glGetString call to get GL_VENDOR crashes)

Share this post


Link to post
Share on other sites

Did you try glXGetProcAddress to get the glGetString function pointer instead of calling it by its prototype?

(If I understand correctly and the first glGetString call to get GL_VENDOR crashes)

That wouldn't explain the issue with glClearColor or glClear, though, right? :S

Share this post


Link to post
Share on other sites

Should be very easy to test, and it might, though I'm not sure I understand exactly what happens.

 

Does it always crash every time on the first glGetString if that call is there, and crashes on glClearColor instead if you comment out the glGetString?

Or do you  mean that it randomly crashes on some GL call somewhere, but sometimes executes glGetString and properly prints it before crashing on glClearColor?

 

If it always crashes on the first GL call whatever that may be when MakeCurrent returns true it seems likely it's something with the function not calling the proper driver.. though just a guess.

 

If it is random it seems more likely that some memory corruption leads to a crash at some random point in the future or something.

 

You might also want to check your std::cout as I believe glGetString returns unsigned byte pointers.. if that is the case then they won't print a string (though it shouldn't crash as they should just print an address instead.. but you could cast them to char* and see if something changes).

Share this post


Link to post
Share on other sites

Again, I suggest then to do glXGetProcAddress("glGetString") and checking so that the function pointer returned is not null, and if it isn't then calling through that instead of using the regular glGetString. (Provided glXGetProcAddress doesn't crash instead, if it does, reinstall the driver).

And first maybe try glXMakeContextCurrent instead of glXMakeCurrent.

Edited by Erik Rufelt

Share this post


Link to post
Share on other sites

Also I see now that you don't seem to actually check that 'context' returned isn't NULL when you create it. MakeCurrent could return True on a NULL context by just disabling the thread context.

Edited by Erik Rufelt

Share this post


Link to post
Share on other sites

Try printing it like std::cout << "context = " << std::hex << (void*)context..

Also the function pointers are not supposed to be true, if they just return that then you probably have a null context..

Print them the same.. << std::hex << (void*)glXGetProcAddress("glGetString").. it should print a proper pointer and not '1' or anything like that..

Share this post


Link to post
Share on other sites

Where did you call that glXGetProcAddress? Make sure it works in the same place as where you do the call to glClear.

Sponji I put it right before either the glGetString or the glClear, depending on the one I was testing.

Share this post


Link to post
Share on other sites

Yeah, but those functions work? Try it where it crashes, because then it would probably give you null pointers, which would mean that the context is not made current.

 

Edit: Or what, do those work or not? Not sure if you've made that clear yet. Do the gl functions work right after creating the context?

Edited by Sponji

Share this post


Link to post
Share on other sites

Yeah, but those functions work? Try it where it crashes, because then it would probably give you null pointers, which would mean that the context is not made current.

 

Edit: Or what, do those work or not? Not sure if you've made that clear yet. Do the gl functions work right after creating the context?

 

Try printing it like std::cout << "context = " << std::hex << (void*)context..

Also the function pointers are not supposed to be true, if they just return that then you probably have a null context..

Print them the same.. << std::hex << (void*)glXGetProcAddress("glGetString").. it should print a proper pointer and not '1' or anything like that..

Sponji and Erik, context, and the glXGetProcAdress of both glGetString and glClear all return proper hex values. When I said they return true, I meant I put them in an if condition and they returned a value, but I did as Erik said, to no avail. Thanks again guys! Do you have any other suggestions? This really makes no sense and I'm starting to miss Win32 of all things! XD

 

EDIT: Sponji, they still cause segfaults. I'm saying when I write this:

 

    ...

    std::cout << "Result = " << std::hex << (void*)glXGetProcAddress((const GLubyte *)"glClear") << std::endl;

    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    ...

 

...it still crashes :/

 

Edit 2: I tried an iteration of this earlier today without any of the other code and it worked, but NO code is being called before this :/ 

Edited by KarimIO

Share this post


Link to post
Share on other sites

Try something like this, as first GL call instead, see if it prints something or if that also crashes:

typedef const GLubyte *(APIENTRYP PFNGLGETSTRINGPROC) (GLenum name);

...

PFNGLGETSTRINGPROC pfnglGetString = (PFNGLGETSTRINGPROC)glXGetProcAddress((const GLubyte*)"glGetString");
std::cout << (const char*)pfnglGetString(GL_VENDOR) << std::endl;

(Btw did you reboot after you installed the driver?)

Try running some OpenGL demo app or something.

Edited by Erik Rufelt

Share this post


Link to post
Share on other sites

It doesn't crash?
Try using the pfnglgetString function pointer to get the other strings as well, and check the return values for null as well to make sure any crash is actually in the function call and not in using the return value. And then get glClearColor and glClear through glXGetProcAddress as well.

Share this post


Link to post
Share on other sites

It doesn't crash?
Try using the pfnglgetString function pointer to get the other strings as well, and check the return values for null as well to make sure any crash is actually in the function call and not in using the return value. And then get glClearColor and glClear through glXGetProcAddress as well.

Okay, it works,apparently, but I can't exactly just use 

 
PFNGLCLEAR pfnglClear = (PFNGLCLEAR)glXGetProcAddress((const GLubyte*)"glClear");

pfnglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

 

instead of glClear O_O Any ideas?

Share this post


Link to post
Share on other sites

Okay, it works,apparently, but I can't exactly just use

 

Sure you can..

If you're going to use GL3 you will have to load a whole bunch of functions anyway. GLEW loads all of them for you, so if you use that perhaps it will fix everything for you.

 

 

Otherwise, try downing the context version to 2.0 or set the compatibility profile or something and see if that works, or just use the "old method" to create the context from before the whole context with attribs thing in GL3...

If you want some of the reasonably new GL functionality get glcorearb.h from the OpenGL registry and it has the function pointer definitions already so you don't have to typedef them yourself for the older functions.

Anyway.. strongly recommend getting GLEW to avoid the hassle.

Edited by Erik Rufelt

Share this post


Link to post
Share on other sites

Yeah I actually JUST connected the InitializeWindow to the rest of my game engine. It works now, though I'm getting some crazy junk for the mouse pointer so the game is spinning like crazy XD And yeah, I moved from GLEW to GL3W long ago because GLEW is outdated and requires Experimental :/

 

Thanks again to both of you. I can't thank you enough, really!!

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Forum Statistics

    • Total Topics
      628285
    • Total Posts
      2981837
  • Similar Content

    • By mellinoe
      Hi all,
      First time poster here, although I've been reading posts here for quite a while. This place has been invaluable for learning graphics programming -- thanks for a great resource!
      Right now, I'm working on a graphics abstraction layer for .NET which supports D3D11, Vulkan, and OpenGL at the moment. I have implemented most of my planned features already, and things are working well. Some remaining features that I am planning are Compute Shaders, and some flavor of read-write shader resources. At the moment, my shaders can just get simple read-only access to a uniform (or constant) buffer, a texture, or a sampler. Unfortunately, I'm having a tough time grasping the distinctions between all of the different kinds of read-write resources that are available. In D3D alone, there seem to be 5 or 6 different kinds of resources with similar but different characteristics. On top of that, I get the impression that some of them are more or less "obsoleted" by the newer kinds, and don't have much of a place in modern code. There seem to be a few pivots:
      The data source/destination (buffer or texture) Read-write or read-only Structured or unstructured (?) Ordered vs unordered (?) These are just my observations based on a lot of MSDN and OpenGL doc reading. For my library, I'm not interested in exposing every possibility to the user -- just trying to find a good "middle-ground" that can be represented cleanly across API's which is good enough for common scenarios.
      Can anyone give a sort of "overview" of the different options, and perhaps compare/contrast the concepts between Direct3D, OpenGL, and Vulkan? I'd also be very interested in hearing how other folks have abstracted these concepts in their libraries.
    • By aejt
      I recently started getting into graphics programming (2nd try, first try was many years ago) and I'm working on a 3d rendering engine which I hope to be able to make a 3D game with sooner or later. I have plenty of C++ experience, but not a lot when it comes to graphics, and while it's definitely going much better this time, I'm having trouble figuring out how assets are usually handled by engines.
      I'm not having trouble with handling the GPU resources, but more so with how the resources should be defined and used in the system (materials, models, etc).
      This is my plan now, I've implemented most of it except for the XML parts and factories and those are the ones I'm not sure of at all:
      I have these classes:
      For GPU resources:
      Geometry: holds and manages everything needed to render a geometry: VAO, VBO, EBO. Texture: holds and manages a texture which is loaded into the GPU. Shader: holds and manages a shader which is loaded into the GPU. For assets relying on GPU resources:
      Material: holds a shader resource, multiple texture resources, as well as uniform settings. Mesh: holds a geometry and a material. Model: holds multiple meshes, possibly in a tree structure to more easily support skinning later on? For handling GPU resources:
      ResourceCache<T>: T can be any resource loaded into the GPU. It owns these resources and only hands out handles to them on request (currently string identifiers are used when requesting handles, but all resources are stored in a vector and each handle only contains resource's index in that vector) Resource<T>: The handles given out from ResourceCache. The handles are reference counted and to get the underlying resource you simply deference like with pointers (*handle).  
      And my plan is to define everything into these XML documents to abstract away files:
      Resources.xml for ref-counted GPU resources (geometry, shaders, textures) Resources are assigned names/ids and resource files, and possibly some attributes (what vertex attributes does this geometry have? what vertex attributes does this shader expect? what uniforms does this shader use? and so on) Are reference counted using ResourceCache<T> Assets.xml for assets using the GPU resources (materials, meshes, models) Assets are not reference counted, but they hold handles to ref-counted resources. References the resources defined in Resources.xml by names/ids. The XMLs are loaded into some structure in memory which is then used for loading the resources/assets using factory classes:
      Factory classes for resources:
      For example, a texture factory could contain the texture definitions from the XML containing data about textures in the game, as well as a cache containing all loaded textures. This means it has mappings from each name/id to a file and when asked to load a texture with a name/id, it can look up its path and use a "BinaryLoader" to either load the file and create the resource directly, or asynchronously load the file's data into a queue which then can be read from later to create the resources synchronously in the GL context. These factories only return handles.
      Factory classes for assets:
      Much like for resources, these classes contain the definitions for the assets they can load. For example, with the definition the MaterialFactory will know which shader, textures and possibly uniform a certain material has, and with the help of TextureFactory and ShaderFactory, it can retrieve handles to the resources it needs (Shader + Textures), setup itself from XML data (uniform values), and return a created instance of requested material. These factories return actual instances, not handles (but the instances contain handles).
       
       
      Is this a good or commonly used approach? Is this going to bite me in the ass later on? Are there other more preferable approaches? Is this outside of the scope of a 3d renderer and should be on the engine side? I'd love to receive and kind of advice or suggestions!
      Thanks!
    • By nedondev
      I 'm learning how to create game by using opengl with c/c++ coding, so here is my fist game. In video description also have game contain in Dropbox. May be I will make it better in future.
      Thanks.
    • By Abecederia
      So I've recently started learning some GLSL and now I'm toying with a POM shader. I'm trying to optimize it and notice that it starts having issues at high texture sizes, especially with self-shadowing.
      Now I know POM is expensive either way, but would pulling the heightmap out of the normalmap alpha channel and in it's own 8bit texture make doing all those dozens of texture fetches more cheap? Or is everything in the cache aligned to 32bit anyway? I haven't implemented texture compression yet, I think that would help? But regardless, should there be a performance boost from decoupling the heightmap? I could also keep it in a lower resolution than the normalmap if that would improve performance.
      Any help is much appreciated, please keep in mind I'm somewhat of a newbie. Thanks!
    • By test opty
      Hi,
      I'm trying to learn OpenGL through a website and have proceeded until this page of it. The output is a simple triangle. The problem is the complexity.
      I have read that page several times and tried to analyse the code but I haven't understood the code properly and completely yet. This is the code:
       
      #include <glad/glad.h> #include <GLFW/glfw3.h> #include <C:\Users\Abbasi\Desktop\std_lib_facilities_4.h> using namespace std; //****************************************************************************** void framebuffer_size_callback(GLFWwindow* window, int width, int height); void processInput(GLFWwindow *window); // settings const unsigned int SCR_WIDTH = 800; const unsigned int SCR_HEIGHT = 600; const char *vertexShaderSource = "#version 330 core\n" "layout (location = 0) in vec3 aPos;\n" "void main()\n" "{\n" " gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n" "}\0"; const char *fragmentShaderSource = "#version 330 core\n" "out vec4 FragColor;\n" "void main()\n" "{\n" " FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n" "}\n\0"; //******************************* int main() { // glfw: initialize and configure // ------------------------------ glfwInit(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // glfw window creation GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "My First Triangle", nullptr, nullptr); if (window == nullptr) { cout << "Failed to create GLFW window" << endl; glfwTerminate(); return -1; } glfwMakeContextCurrent(window); glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); // glad: load all OpenGL function pointers if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) { cout << "Failed to initialize GLAD" << endl; return -1; } // build and compile our shader program // vertex shader int vertexShader = glCreateShader(GL_VERTEX_SHADER); glShaderSource(vertexShader, 1, &vertexShaderSource, nullptr); glCompileShader(vertexShader); // check for shader compile errors int success; char infoLog[512]; glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(vertexShader, 512, nullptr, infoLog); cout << "ERROR::SHADER::VERTEX::COMPILATION_FAILED\n" << infoLog << endl; } // fragment shader int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(fragmentShader, 1, &fragmentShaderSource, nullptr); glCompileShader(fragmentShader); // check for shader compile errors glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(fragmentShader, 512, nullptr, infoLog); cout << "ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n" << infoLog << endl; } // link shaders int shaderProgram = glCreateProgram(); glAttachShader(shaderProgram, vertexShader); glAttachShader(shaderProgram, fragmentShader); glLinkProgram(shaderProgram); // check for linking errors glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success); if (!success) { glGetProgramInfoLog(shaderProgram, 512, nullptr, infoLog); cout << "ERROR::SHADER::PROGRAM::LINKING_FAILED\n" << infoLog << endl; } glDeleteShader(vertexShader); glDeleteShader(fragmentShader); // set up vertex data (and buffer(s)) and configure vertex attributes float vertices[] = { -0.5f, -0.5f, 0.0f, // left 0.5f, -0.5f, 0.0f, // right 0.0f, 0.5f, 0.0f // top }; unsigned int VBO, VAO; glGenVertexArrays(1, &VAO); glGenBuffers(1, &VBO); // bind the Vertex Array Object first, then bind and set vertex buffer(s), //and then configure vertex attributes(s). glBindVertexArray(VAO); glBindBuffer(GL_ARRAY_BUFFER, VBO); glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); glEnableVertexAttribArray(0); // note that this is allowed, the call to glVertexAttribPointer registered VBO // as the vertex attribute's bound vertex buffer object so afterwards we can safely unbind glBindBuffer(GL_ARRAY_BUFFER, 0); // You can unbind the VAO afterwards so other VAO calls won't accidentally // modify this VAO, but this rarely happens. Modifying other // VAOs requires a call to glBindVertexArray anyways so we generally don't unbind // VAOs (nor VBOs) when it's not directly necessary. glBindVertexArray(0); // uncomment this call to draw in wireframe polygons. //glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // render loop while (!glfwWindowShouldClose(window)) { // input // ----- processInput(window); // render // ------ glClearColor(0.2f, 0.3f, 0.3f, 1.0f); glClear(GL_COLOR_BUFFER_BIT); // draw our first triangle glUseProgram(shaderProgram); glBindVertexArray(VAO); // seeing as we only have a single VAO there's no need to // bind it every time, but we'll do so to keep things a bit more organized glDrawArrays(GL_TRIANGLES, 0, 3); // glBindVertexArray(0); // no need to unbind it every time // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) glfwSwapBuffers(window); glfwPollEvents(); } // optional: de-allocate all resources once they've outlived their purpose: glDeleteVertexArrays(1, &VAO); glDeleteBuffers(1, &VBO); // glfw: terminate, clearing all previously allocated GLFW resources. glfwTerminate(); return 0; } //************************************************** // process all input: query GLFW whether relevant keys are pressed/released // this frame and react accordingly void processInput(GLFWwindow *window) { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, true); } //******************************************************************** // glfw: whenever the window size changed (by OS or user resize) this callback function executes void framebuffer_size_callback(GLFWwindow* window, int width, int height) { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. glViewport(0, 0, width, height); } As you see, about 200 lines of complicated code only for a simple triangle. 
      I don't know what parts are necessary for that output. And also, what the correct order of instructions for such an output or programs is, generally. That start point is too complex for a beginner of OpenGL like me and I don't know how to make the issue solved. What are your ideas please? What is the way to figure both the code and the whole program out correctly please?
      I wish I'd read a reference that would teach me OpenGL through a step-by-step method. 
  • Popular Now