Sign in to follow this  
VansFannel

OpenGL Drawing code not working

Recommended Posts

Hello.

I'm developing an Android application with OpenGL ES 2.0 native code.

The native code compiles ok, and runs without any problem. But it doesn't draw anything.

Here is my code:

##################################################################################

#include <jni.h>
#include <android/log.h>
#include <stdio.h>
#include <string.h>

#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>

#ifdef __cplusplus
extern "C"
{
#endif

unsigned int shaderProgramID;
GLint vertexHandle;
GLint normalHandle;
GLint textureCoordHandle;

// Constants:
static const float kObjectScale = 3.f;

static const char* cubeMeshVertexShader = "
attribute vec4 vertexPosition;
attribute vec4 vertexNormal;
attribute vec2 vertexTexCoord;
varying vec2 texCoord;
varying vec4 normal;
uniform mat4 modelViewProjectionMatrix;

void main() {
gl_Position = modelViewProjectionMatrix * vertexPosition;
normal = vertexNormal;
texCoord = vertexTexCoord;
} ";


static const char* cubeMeshFragmentShader = "
precision mediump float;
varying vec2 texCoord;
varying vec4 normal;
uniform sampler2D texSampler2D;
void main() {
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);
} ";

static const char* meshVertexShaderNoNormalTexCoor = "
attribute vec4 vertexPosition;
void main() {
gl_Position = vertexPosition;
} ";

static const char* fragmentShaderNoNormalTexCoor = "
precision mediump float;
void main() {
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);
} ";

unsigned int
initShader(unsigned int shaderType, const char* source)
{
GLuint shader = glCreateShader((GLenum)shaderType);
if (shader)
{
glShaderSource(shader, 1, &source, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);

if (!compiled)
{
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen)
{
char* buf = (char*) malloc(infoLen);
if (buf)
{
glGetShaderInfoLog(shader, infoLen, NULL, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
}


unsigned int
createProgramFromBuffer(const char* vertexShaderBuffer,
const char* fragmentShaderBuffer)
{

GLuint vertexShader = initShader(GL_VERTEX_SHADER, vertexShaderBuffer);
if (!vertexShader)
{
__android_log_write(ANDROID_LOG_ERROR, "createProgramFromBuffer", "init-vertexShader");
return 0;
}

GLuint fragmentShader = initShader(GL_FRAGMENT_SHADER,
fragmentShaderBuffer);
if (!fragmentShader)
{
__android_log_write(ANDROID_LOG_ERROR, "createProgramFromBuffer", "init-fragmentShader");
return 0;
}

GLuint program = glCreateProgram();
if (program)
{
glAttachShader(program, vertexShader);

glAttachShader(program, fragmentShader);

glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);

if (linkStatus != GL_TRUE)
{
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength)
{
char* buf = (char*) malloc(bufLength);
if (buf)
{
glGetProgramInfoLog(program, bufLength, NULL, buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
__android_log_write(ANDROID_LOG_ERROR, "glGetProgramiv", "linkStatus");
}
}
__android_log_write(ANDROID_LOG_VERBOSE, "LoaderRenderer", "createProgramFromBuffer-OK");
return program;
}


JNIEXPORT void JNICALL Java_com_company_tests_LoaderRenderer_initRendering
(JNIEnv *env, jobject obj, jint numVNormal, jint numVTexCoord, jint width, jint heigth)
{
glViewport(0, 0, width, heigth);
// Define clear color
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);

if ((numVNormal > 0) && (numVTexCoord > 0))
{
__android_log_write(ANDROID_LOG_VERBOSE, "initRendering", "(numVNormal > 0) && (numVTexCoord > 0)");
shaderProgramID = createProgramFromBuffer(cubeMeshVertexShader,
cubeMeshFragmentShader);
normalHandle = glGetAttribLocation(shaderProgramID,
"vertexNormal");
textureCoordHandle = glGetAttribLocation(shaderProgramID,
"vertexTexCoord");
}
else if (numVNormal == 0)
{
if (numVTexCoord == 0)
{
__android_log_write(ANDROID_LOG_VERBOSE, "initRendering", "(numVNormal == 0) && (numVTexCoord == 0)");
shaderProgramID =createProgramFromBuffer(meshVertexShaderNoNormalTexCoor,
fragmentShaderNoNormalTexCoor);
}
else
{
__android_log_write(ANDROID_LOG_VERBOSE, "initRendering", "(numVNormal == 0) && (numVTexCoord > 0)");
shaderProgramID = createProgramFromBuffer(meshVertexShaderNoNormalTexCoor,
fragmentShaderNoNormalTexCoor);
textureCoordHandle = glGetAttribLocation(shaderProgramID,
"vertexTexCoord");
}
}
else if ((numVNormal > 0) && (numVTexCoord == 0))
{
__android_log_write(ANDROID_LOG_VERBOSE, "initRendering", "(numVNormal > 0) && (numVTexCoord == 0)");
shaderProgramID =createProgramFromBuffer(meshVertexShaderNoNormalTexCoor,
fragmentShaderNoNormalTexCoor);
normalHandle = glGetAttribLocation(shaderProgramID,
"vertexNormal");
}

vertexHandle = glGetAttribLocation(shaderProgramID,
"vertexPosition");
__android_log_write(ANDROID_LOG_VERBOSE, "LoaderRenderer", "initRendering-OK");
}

JNIEXPORT void JNICALL Java_com_company_tests_LoaderRenderer_updateRendering(
JNIEnv* env, jobject obj, jint width, jint heigth)
{
// Set the viewport
glViewport(0, 0, width, heigth);
// Clear the color buffer
glClear(GL_COLOR_BUFFER_BIT);
}

JNIEXPORT void JNICALL Java_com_company_tests_LoaderRenderer_renderFrame
(JNIEnv *env, jobject obj,
jfloatArray vertices, jfloatArray normals,
jfloatArray texCoord, jintArray indices)
{
jfloat *vertPos, *vertNorm, *vertTexC;
jint *indicesArray;

jsize numNormals = env->GetArrayLength(normals);
jsize numTexCoords = env->GetArrayLength(texCoord);

GLsizei numIndices = (GLsizei) env->GetArrayLength(indices);

vertPos = env->GetFloatArrayElements(vertices, 0);
indicesArray = env->GetIntArrayElements(indices, 0);

if (numNormals > 0)
vertNorm = env->GetFloatArrayElements(normals, 0);
if (numTexCoords > 0)
vertTexC = env->GetFloatArrayElements(texCoord, 0);

// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);

glUseProgram(shaderProgramID);

glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) vertPos);
if (numNormals > 0)
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) vertNorm);
if (numTexCoords > 0)
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) vertTexC);

glEnableVertexAttribArray(vertexHandle);
if (numNormals > 0)
glEnableVertexAttribArray(normalHandle);
if (numTexCoords > 0)
{
glEnableVertexAttribArray(textureCoordHandle);
}
glDrawElements(GL_TRIANGLES, numIndices, GL_UNSIGNED_SHORT,
(const GLvoid*) indicesArray);

glDisable(GL_DEPTH_TEST);

glDisableVertexAttribArray(vertexHandle);
if (numNormals > 0)
glDisableVertexAttribArray(normalHandle);
if (numTexCoords > 0)
glDisableVertexAttribArray(textureCoordHandle);

if (numNormals > 0)
env->ReleaseFloatArrayElements(normals, vertNorm, 0);
if (numTexCoords > 0)
env->ReleaseFloatArrayElements(texCoord, vertTexC, 0);

env->ReleaseFloatArrayElements(vertices, vertPos, 0);
env->ReleaseIntArrayElements(indices, indicesArray, 0);
}
#ifdef __cplusplus
}
#endif




Please, do you see where is my error?

Thanks.

[Edited by - VansFannel on December 8, 2010 7:52:35 AM]

Share this post


Link to post
Share on other sites
Quote:
Original post by VansFannel
Please, do you see where is my error?


No. But have you seen the FAQ and grasp what they write in "How to ask questions the smart way?" and further, did you read the famous "What have you tried?" website?

Share this post


Link to post
Share on other sites
Is there an article about debugging Android native code on gamedev.net?

I think it would be more useful if you recommend me articles about OpenGL ES 2.0 programming, or something about my question instead of http://www.catb.org/~esr/faqs/smart-questions.html.

I have put my code here because I though someone with experience on OpenGL programming could see my error easily.

I'm so sorry if you don't like the way I have done my question but I prefer to waste my time learning OpenGL and not talking with you about the way I ask.

Share this post


Link to post
Share on other sites
Quote:
Original post by VansFannel
Is there an article about debugging Android native code on gamedev.net?

I think it would be more useful if you recommend me articles about OpenGL ES 2.0 programming, or something about my question instead of http://www.catb.org/~esr/faqs/smart-questions.html.

I have put my code here because I though someone with experience on OpenGL programming could see my error easily.

I'm so sorry if you don't like the way I have done my question but I prefer to waste my time learning OpenGL and not talking with you about the way I ask.


You *should* spend some time on how to find errors in your code.

A question like
Quote:
Please, do you see where is my error?

upon a big fat chunk of badly formatted code only indicates that your real problem is not an OpenGL-problem, but a "can't help myself or have not tried hard enough"-problem.


A question like
Quote:
Please, do you see where is my error?

upon a big fat chunk of badly formatted code clearly shows that you don't even know where the error is at least roughly at. If that's not true, than it shows that you are lazy.


A statement like
Quote:
but I prefer to waste my time learning OpenGL and not talking with you about the way I ask.

shows that you might only be interested in ready solutions and not in learning how to help yourself. You don't want to waste time learning how to fish, you just want the fish. But you expect us to waste our unpaid and voluntary time with very, very non-specific questions and big code-dumps, not even remotely going in detail about your exact problem.


Before you state something like "phresnel, the funny thing is, you are wasting time with this post, why not with my code", then my answer is, no. I spent my time not only in assembling ready solutions, but also in other things, like touch-typing, so it took me <5 minutes, an average cigarette pause*, to write this shit.


* smoking hurts your body and your environment

Share this post


Link to post
Share on other sites
While you are answering my question, I'm checking my code to find my error.

If you believe that I haven't test my code enough before post it here, you are wrong. If I have posted my code here is because I can't find the error by myself.

And I see you have time to tell me how must I make my question, but you don't have time to check my code.

Sorry, but I don't have time to continue answering you.

Share this post


Link to post
Share on other sites
Quote:
Original post by VansFannel
While you are answering my question, I'm checking my code to find my error.

If you believe that I haven't test my code enough before post it here, you are wrong. If I have posted my code here is because I can't find the error by myself.

And I see you have time to tell me how must I make my question, but you don't have time to check my code.


Then, why not state what you have tried and what you've found out, so we don't waste time hunting for solutions that you were already unlucky at?

Quote:
Sorry, but I don't have time to continue answering you.

You know. We all have stuff to do, not only you. That's the reason why we often post seemingly harsh comments like this. It even happens that the most experienced here use to be older than many newbies, and therefore have even less time to waste.


Simply put: As long as you are on unpaid forums where ppl do voluntary work, if you want ppl to help you, try to minimize the time they must spend at your problem.



Quote:
And I see you have time to tell me how must I make my question, but you don't have time to check my code.

If you look back, you'll see I knew this cockiness comes.

Share this post


Link to post
Share on other sites
But if you would tell us what you have tried already, that would be a great help to us, that means we would have a better chance to help you.

So tell us what you have tried.

And yes, Phresnel took the time and effort to lecture you, but it is for the others: you seem to be a bit lazy based on your posting history, so we would benefit from your "improvement". And he's right: why do WE have to to ALL the debugging for you?

Again: even if you had done your work with it, we don't have a clue because you don't show us what you did.

Share this post


Link to post
Share on other sites
I think the question is simple: it doesn't draw anything on screen. I see a completely black screen. And glGetError doesn't return anything.

vertices, normals, texture coordinates has data, and attribute bindings don't throw any error.

There aren't errors but it doesn't draw anything.

May I have to do some initialization?

Share this post


Link to post
Share on other sites
What are you actually trying to do?
What do you expect from the code?
What do you want to be drawn?
Can you draw stuff at all? (Don't expect us to be "up to date" with your knowledge and project, and don't expect us to dig through your posting history just to understand your project).

Share this post


Link to post
Share on other sites
I'm trying to draw a CUBE.

I have exported a model from blender to Wavefront .OBJ., loaded using JAVA to arrays and pass to native code. The problem isn't what I'm trying to draw because I use a model parsed correctly to vertices, normals and texture coordinates and I see the same: a black screen.

Methods Order:

1. Java_com_company_tests_LoaderRenderer_initRendering

Uses to:
a ) Create program.
b ) Select correct vertex and fragment shader.
c ) Get attribute locations for attributes needed (I always need vertices coordinates and indices). Here I see if I will need normal and texture coordinate attributes.

2. Java_com_company_tests_LoaderRenderer_renderFrame

Here I draw the cube.
a ) I retrieve vertices, normals, texture coordinates and indices parameters.
b ) Enable vertices, normals and texture coordinate attributes.
c ) Draw model with glDrawElements.
d ) Disable attributes.
e ) JNI cleaning code.

Other functions are called from this functions.
Java_com_company_tests_LoaderRenderer_updateRendering is called when screen size change.

I use jfloatArray vertices, jfloatArray normals, jfloatArray texCoord, jintArray indices are arrays to draw the model. Cube's vertices are in these arrays. I have these arrays to draw many models.

I take a working example that read vertices, normals, texture coordinates and indices from a header file. I've modified the example to work with many models, not with only a model defined inside a header file.

If I use another model, it continues drawing nothing.

I haven't draw anything yet.

Do you need more details?

My project is an Android application with native code. All OpenGL ES 2.0 stuff is here.

Share this post


Link to post
Share on other sites
what's this?

env->ReleaseFloatArrayElements(vertices, vertPos, 0);

Are you deleting the vertices? if you are then nothing will draw according to your code. try committing this and the one below it out.

Share this post


Link to post
Share on other sites
oops I don't know anything about the coding for the android yet so I'll stick to opengl since that's most likely the problem.

try a simple triangle with an orthographic projection. I would think if that works then it may be a problem with the vertices. Or even more so draw with points instead.

another thing about your vertex shader:


static const char* meshVertexShaderNoNormalTexCoor = "
attribute vec4 vertexPosition;
void main() {
gl_Position = vertexPosition;
} ";








This is fine but would really only show up for 2d stuff. you may need to create a modelview and projection matrix.

gl_Position = ProjectionMatrix * ModelViewMatrix * vertexPosition;

which ProjectionMatrix and ModelViewMatrix have to be shader uniforms. So it's not that it's drawing the cube but that the transformation are not right. at least for a 3d object.

use GLM math library: http://glm.g-truc.net/
This a good math library.

This is a shader from the opengl es 2.0 quick reference card
here: http://www.khronos.org/opengles/2_X/


VERTEX SHADER

uniform mat4 mvp_matrix; // model-view-projection matrix
uniform mat3 normal_matrix; // normal matrix
uniform vec3 ec_light_dir; // light direction in eye coords
attribute vec4 a_vertex; // vertex position
attribute vec3 a_normal; // vertex normal
attribute vec2 a_texcoord; // texture coordinates
varying float v_diffuse;
varying vec2 v_texcoord;

void main(void)
{
// put vertex normal into eye coords
vec3 ec_normal = normalize(normal_matrix * a_normal);
// emit diffuse scale factor, texcoord, and position
v_diffuse = max(dot(ec_light_dir, ec_normal), 0.0);
v_texcoord = a_texcoord;
gl_Position = mvp_matrix * a_vertex;
}

FRAGMENT SHADER

precision mediump float;
uniform sampler2D t_reflectance;
uniform vec4 i_ambient;
varying float v_diffuse;
varying vec2 v_texcoord;
void main (void)
{
vec4 color = texture2D(t_reflectance, v_texcoord);
gl_FragColor = color * (vec4(v_diffuse) + i_ambient);
}





Share this post


Link to post
Share on other sites
I've changed vertexShader and fragmentShader with these:


static const char* cubeMeshVertexShader = "
attribute vec4 vertexPosition;
attribute vec4 vertexNormal;
attribute vec2 vertexTexCoord;
varying vec2 texCoord;
varying vec4 normal;
void main() {
gl_Position = gl_ProjectionMatrix * gl_ModelViewMatrix * vertexPosition;
normal = vertexNormal;
texCoord = vertexTexCoord;
} ";


static const char* cubeMeshFragmentShader = "
precision mediump float;
varying vec2 texCoord;
varying vec4 normal;
uniform sampler2D texSampler2D;
void main() {
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);
} ";




And I see nothing.

[Edited by - VansFannel on December 8, 2010 12:52:11 PM]

Share this post


Link to post
Share on other sites
did you set the matrices themselves.
You define a matrix in c++ then pass that matrix to the shader so...

You need to make the matrices uniform and feed in the data to them:
gl_ProjectionMatrix
gl_ModelViewMatrix


uniform mat4 gl_ProjectionMatrix;
uniform mat4 gl_ModelViewMatrix;

The usual gl_ProjectionMatrix or gl_ModelViewMatrix doesn't work in opengl es 2.0 only 1.0 which mean those used to be built in functions are deprecated and won't work.

Use GLM math library to construct your matrices or some other math library. Android may already have a math lib for this reason.

This is an example of the code from glm. It's the same thing just the only 1 matrix is passed to the shader as a

uniform mat4 MVP

matrix, so you could do the same. It's no different then splitting up the matrix work.

You should really check out glm.
This is the link to example code on about hoe the the math lib works:
http://glm.g-truc.net/code.html


// glm::vec3, glm::vec4, glm::ivec4, glm::mat4
#include <glm/glm.hpp>
// glm::perspective
#include <glm/gtc/matrix_projection.hpp>
// glm::translate, glm::rotate, glm::scale
#include <glm/gtc/matrix_transform.hpp>
// glm::value_ptr
#include <glm/gtc/type_ptr.hpp>

{
glm::mat4 Projection =
glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 100.f);
glm::mat4 ViewTranslate = glm::translate(
glm::mat4(1.0f),
glm::vec3(0.0f, 0.0f, -Translate));
glm::mat4 ViewRotateX = glm::rotate(
ViewTranslate,
Rotate.y, glm::vec3(-1.0f, 0.0f, 0.0f));
glm::mat4 View = glm::rotate(
ViewRotateX,
Rotate.x, glm::vec3(0.0f, 1.0f, 0.0f));
glm::mat4 Model = glm::scale(
glm::mat4(1.0f),
glm::vec3(0.5f));
glm::mat4 ModelView = View * Model;

glUniformMatrix4fv(
LocationModelView , 1, GL_FALSE, glm::value_ptr(ModelView ));
}

glUniformMatrix4fv(
LocationProjection, 1, GL_FALSE, glm::value_ptr(Projection));
}


Share this post


Link to post
Share on other sites
You are right. The problem is with my ProjectionMatrix and ModelViewMatrix.

I don't know how to set up them, but I'm sure the problem is with them.

Thank you very much for your time and help.

If you need more information, tell me.

Share this post


Link to post
Share on other sites
if you look at my prev post that code at the bottom shows you how to make setup the projection and modelview matrices using glm math lib and there's the website to download the lib. Once you have your matrices setup pass them to shader as uniforms:

give it a try and if nothing shows up post again.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Forum Statistics

    • Total Topics
      628278
    • Total Posts
      2981785
  • Similar Content

    • By mellinoe
      Hi all,
      First time poster here, although I've been reading posts here for quite a while. This place has been invaluable for learning graphics programming -- thanks for a great resource!
      Right now, I'm working on a graphics abstraction layer for .NET which supports D3D11, Vulkan, and OpenGL at the moment. I have implemented most of my planned features already, and things are working well. Some remaining features that I am planning are Compute Shaders, and some flavor of read-write shader resources. At the moment, my shaders can just get simple read-only access to a uniform (or constant) buffer, a texture, or a sampler. Unfortunately, I'm having a tough time grasping the distinctions between all of the different kinds of read-write resources that are available. In D3D alone, there seem to be 5 or 6 different kinds of resources with similar but different characteristics. On top of that, I get the impression that some of them are more or less "obsoleted" by the newer kinds, and don't have much of a place in modern code. There seem to be a few pivots:
      The data source/destination (buffer or texture) Read-write or read-only Structured or unstructured (?) Ordered vs unordered (?) These are just my observations based on a lot of MSDN and OpenGL doc reading. For my library, I'm not interested in exposing every possibility to the user -- just trying to find a good "middle-ground" that can be represented cleanly across API's which is good enough for common scenarios.
      Can anyone give a sort of "overview" of the different options, and perhaps compare/contrast the concepts between Direct3D, OpenGL, and Vulkan? I'd also be very interested in hearing how other folks have abstracted these concepts in their libraries.
    • By aejt
      I recently started getting into graphics programming (2nd try, first try was many years ago) and I'm working on a 3d rendering engine which I hope to be able to make a 3D game with sooner or later. I have plenty of C++ experience, but not a lot when it comes to graphics, and while it's definitely going much better this time, I'm having trouble figuring out how assets are usually handled by engines.
      I'm not having trouble with handling the GPU resources, but more so with how the resources should be defined and used in the system (materials, models, etc).
      This is my plan now, I've implemented most of it except for the XML parts and factories and those are the ones I'm not sure of at all:
      I have these classes:
      For GPU resources:
      Geometry: holds and manages everything needed to render a geometry: VAO, VBO, EBO. Texture: holds and manages a texture which is loaded into the GPU. Shader: holds and manages a shader which is loaded into the GPU. For assets relying on GPU resources:
      Material: holds a shader resource, multiple texture resources, as well as uniform settings. Mesh: holds a geometry and a material. Model: holds multiple meshes, possibly in a tree structure to more easily support skinning later on? For handling GPU resources:
      ResourceCache<T>: T can be any resource loaded into the GPU. It owns these resources and only hands out handles to them on request (currently string identifiers are used when requesting handles, but all resources are stored in a vector and each handle only contains resource's index in that vector) Resource<T>: The handles given out from ResourceCache. The handles are reference counted and to get the underlying resource you simply deference like with pointers (*handle).  
      And my plan is to define everything into these XML documents to abstract away files:
      Resources.xml for ref-counted GPU resources (geometry, shaders, textures) Resources are assigned names/ids and resource files, and possibly some attributes (what vertex attributes does this geometry have? what vertex attributes does this shader expect? what uniforms does this shader use? and so on) Are reference counted using ResourceCache<T> Assets.xml for assets using the GPU resources (materials, meshes, models) Assets are not reference counted, but they hold handles to ref-counted resources. References the resources defined in Resources.xml by names/ids. The XMLs are loaded into some structure in memory which is then used for loading the resources/assets using factory classes:
      Factory classes for resources:
      For example, a texture factory could contain the texture definitions from the XML containing data about textures in the game, as well as a cache containing all loaded textures. This means it has mappings from each name/id to a file and when asked to load a texture with a name/id, it can look up its path and use a "BinaryLoader" to either load the file and create the resource directly, or asynchronously load the file's data into a queue which then can be read from later to create the resources synchronously in the GL context. These factories only return handles.
      Factory classes for assets:
      Much like for resources, these classes contain the definitions for the assets they can load. For example, with the definition the MaterialFactory will know which shader, textures and possibly uniform a certain material has, and with the help of TextureFactory and ShaderFactory, it can retrieve handles to the resources it needs (Shader + Textures), setup itself from XML data (uniform values), and return a created instance of requested material. These factories return actual instances, not handles (but the instances contain handles).
       
       
      Is this a good or commonly used approach? Is this going to bite me in the ass later on? Are there other more preferable approaches? Is this outside of the scope of a 3d renderer and should be on the engine side? I'd love to receive and kind of advice or suggestions!
      Thanks!
    • By nedondev
      I 'm learning how to create game by using opengl with c/c++ coding, so here is my fist game. In video description also have game contain in Dropbox. May be I will make it better in future.
      Thanks.
    • By Abecederia
      So I've recently started learning some GLSL and now I'm toying with a POM shader. I'm trying to optimize it and notice that it starts having issues at high texture sizes, especially with self-shadowing.
      Now I know POM is expensive either way, but would pulling the heightmap out of the normalmap alpha channel and in it's own 8bit texture make doing all those dozens of texture fetches more cheap? Or is everything in the cache aligned to 32bit anyway? I haven't implemented texture compression yet, I think that would help? But regardless, should there be a performance boost from decoupling the heightmap? I could also keep it in a lower resolution than the normalmap if that would improve performance.
      Any help is much appreciated, please keep in mind I'm somewhat of a newbie. Thanks!
    • By test opty
      Hi,
      I'm trying to learn OpenGL through a website and have proceeded until this page of it. The output is a simple triangle. The problem is the complexity.
      I have read that page several times and tried to analyse the code but I haven't understood the code properly and completely yet. This is the code:
       
      #include <glad/glad.h> #include <GLFW/glfw3.h> #include <C:\Users\Abbasi\Desktop\std_lib_facilities_4.h> using namespace std; //****************************************************************************** void framebuffer_size_callback(GLFWwindow* window, int width, int height); void processInput(GLFWwindow *window); // settings const unsigned int SCR_WIDTH = 800; const unsigned int SCR_HEIGHT = 600; const char *vertexShaderSource = "#version 330 core\n" "layout (location = 0) in vec3 aPos;\n" "void main()\n" "{\n" " gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n" "}\0"; const char *fragmentShaderSource = "#version 330 core\n" "out vec4 FragColor;\n" "void main()\n" "{\n" " FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n" "}\n\0"; //******************************* int main() { // glfw: initialize and configure // ------------------------------ glfwInit(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // glfw window creation GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "My First Triangle", nullptr, nullptr); if (window == nullptr) { cout << "Failed to create GLFW window" << endl; glfwTerminate(); return -1; } glfwMakeContextCurrent(window); glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); // glad: load all OpenGL function pointers if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) { cout << "Failed to initialize GLAD" << endl; return -1; } // build and compile our shader program // vertex shader int vertexShader = glCreateShader(GL_VERTEX_SHADER); glShaderSource(vertexShader, 1, &vertexShaderSource, nullptr); glCompileShader(vertexShader); // check for shader compile errors int success; char infoLog[512]; glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(vertexShader, 512, nullptr, infoLog); cout << "ERROR::SHADER::VERTEX::COMPILATION_FAILED\n" << infoLog << endl; } // fragment shader int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(fragmentShader, 1, &fragmentShaderSource, nullptr); glCompileShader(fragmentShader); // check for shader compile errors glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(fragmentShader, 512, nullptr, infoLog); cout << "ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n" << infoLog << endl; } // link shaders int shaderProgram = glCreateProgram(); glAttachShader(shaderProgram, vertexShader); glAttachShader(shaderProgram, fragmentShader); glLinkProgram(shaderProgram); // check for linking errors glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success); if (!success) { glGetProgramInfoLog(shaderProgram, 512, nullptr, infoLog); cout << "ERROR::SHADER::PROGRAM::LINKING_FAILED\n" << infoLog << endl; } glDeleteShader(vertexShader); glDeleteShader(fragmentShader); // set up vertex data (and buffer(s)) and configure vertex attributes float vertices[] = { -0.5f, -0.5f, 0.0f, // left 0.5f, -0.5f, 0.0f, // right 0.0f, 0.5f, 0.0f // top }; unsigned int VBO, VAO; glGenVertexArrays(1, &VAO); glGenBuffers(1, &VBO); // bind the Vertex Array Object first, then bind and set vertex buffer(s), //and then configure vertex attributes(s). glBindVertexArray(VAO); glBindBuffer(GL_ARRAY_BUFFER, VBO); glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); glEnableVertexAttribArray(0); // note that this is allowed, the call to glVertexAttribPointer registered VBO // as the vertex attribute's bound vertex buffer object so afterwards we can safely unbind glBindBuffer(GL_ARRAY_BUFFER, 0); // You can unbind the VAO afterwards so other VAO calls won't accidentally // modify this VAO, but this rarely happens. Modifying other // VAOs requires a call to glBindVertexArray anyways so we generally don't unbind // VAOs (nor VBOs) when it's not directly necessary. glBindVertexArray(0); // uncomment this call to draw in wireframe polygons. //glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // render loop while (!glfwWindowShouldClose(window)) { // input // ----- processInput(window); // render // ------ glClearColor(0.2f, 0.3f, 0.3f, 1.0f); glClear(GL_COLOR_BUFFER_BIT); // draw our first triangle glUseProgram(shaderProgram); glBindVertexArray(VAO); // seeing as we only have a single VAO there's no need to // bind it every time, but we'll do so to keep things a bit more organized glDrawArrays(GL_TRIANGLES, 0, 3); // glBindVertexArray(0); // no need to unbind it every time // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) glfwSwapBuffers(window); glfwPollEvents(); } // optional: de-allocate all resources once they've outlived their purpose: glDeleteVertexArrays(1, &VAO); glDeleteBuffers(1, &VBO); // glfw: terminate, clearing all previously allocated GLFW resources. glfwTerminate(); return 0; } //************************************************** // process all input: query GLFW whether relevant keys are pressed/released // this frame and react accordingly void processInput(GLFWwindow *window) { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, true); } //******************************************************************** // glfw: whenever the window size changed (by OS or user resize) this callback function executes void framebuffer_size_callback(GLFWwindow* window, int width, int height) { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. glViewport(0, 0, width, height); } As you see, about 200 lines of complicated code only for a simple triangle. 
      I don't know what parts are necessary for that output. And also, what the correct order of instructions for such an output or programs is, generally. That start point is too complex for a beginner of OpenGL like me and I don't know how to make the issue solved. What are your ideas please? What is the way to figure both the code and the whole program out correctly please?
      I wish I'd read a reference that would teach me OpenGL through a step-by-step method. 
  • Popular Now