Sign in to follow this  
hoonsworld

OpenGL Fundamental problem with smooth normals

Recommended Posts

Hello from Germany :) I created 2 triangles with different vertex order (counter clockwise and clockwise). This orders are user data and may not be changed. For the flat shading the view is ok. But for the smooth shading I get partial or full black triangles for the clockwise orders :( The problem is that I have only 1 averaged normal for a vertex. Is there a openGl statement to correct this mistake? Are the normals to be set principle after the "right hand rule"? Thanks in advance Greetings hoon Here's is a pic: black triangle And here's is the code with jogl (executable):
import javax.swing.*;
import javax.media.opengl.GLEventListener;
import javax.media.opengl.GLCanvas;
import javax.media.opengl.GLAutoDrawable;
import javax.media.opengl.GL;
import javax.media.opengl.glu.GLU;
import java.nio.FloatBuffer;

public class NormalProblem
        extends JFrame
        implements GLEventListener
{
    private static final boolean SMOOTH = true;
    private int angle = 0;

    public static void main(final String[] args)
    {
        new NormalProblem();
    }

    public NormalProblem()
    {
        final GLCanvas canvas = new GLCanvas();
        canvas.addGLEventListener(this);
        add(canvas);

        setSize(500, 500);
        setVisible(true);
        setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);

        while (isWoken())
            canvas.display();
    }

    public void display(final GLAutoDrawable drawable)
    {
        final GL gl = drawable.getGL();

        gl.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT);
        gl.glLoadIdentity();

        gl.glTranslatef(0, 0, -6);
        gl.glRotated(angle, 1, 1, 1);
        gl.glRotated(angle++, 0, 1, 0);

        /*           3
         *          /          *         / 1 \    triangle 1 [1, 2, 3] (counter clockwise vertex order)
         *       1/_____\ 2
         *        \     /
         *  y      \ 2 /    triangle 2 [1, 2, 4] (clockwise vertex order)
         *  ^       \ /
         *  |        4
         *  '---> x
         */

        if (SMOOTH)
            gl.glShadeModel(GL.GL_SMOOTH);

        else
            gl.glShadeModel(GL.GL_FLAT);

        drawTriangles(gl);
        drawNormals(gl);
    }

    private void drawTriangles(final GL gl)
    {
        gl.glBegin(GL.GL_TRIANGLES);

        if (SMOOTH)
        {
            // triangle 1 [1, 2, 3]
            gl.glNormal3f(0, 0, 1);
            gl.glVertex3f(-2, 0, 0);

            gl.glNormal3f(0, 0, 1);
            gl.glVertex3f(2, 0, 0);

            gl.glNormal3f(0, 0, 1);
            gl.glVertex3f(0, 2, 0);

            // triangle 2 [1, 2, 4]
            gl.glNormal3f(0, 0, 1); // (0, 0, -1) is correct but I have only "one" vertex normal
            gl.glVertex3f(-2, 0, 0);

            gl.glNormal3f(0, 0, 1); // (0, 0, -1) is correct but I have only "one" vertex normal
            gl.glVertex3f(2, 0, 0);

            gl.glNormal3f(0, 0, -1);
            gl.glVertex3f(0, -2, 0);
        }

        else
        {
            // triangle 1 [1, 2, 3]
            gl.glNormal3f(0, 0, 1);
            gl.glVertex3f(-2, 0, 0);
            gl.glVertex3f(2, 0, 0);
            gl.glVertex3f(0, 2, 0);

            // triangle 2 [1, 2, 4]
            gl.glNormal3f(0, 0, -1);
            gl.glVertex3f(-2, 0, 0);
            gl.glVertex3f(2, 0, 0);
            gl.glVertex3f(0, -2, 0);
        }

        gl.glEnd();
    }

    private void drawNormals(final GL gl)
    {
        gl.glDisable(GL.GL_LIGHTING);
        gl.glBegin(GL.GL_LINES);

        if (SMOOTH)
        {
            gl.glColor3f(1, 1, 0);
            // vertex normals of triangle 1 [1, 2, 3]
            // common normal on vertex 1 for triangle 1 and 2
            gl.glVertex3f(-2, 0, 0);
            gl.glVertex3f(-2, 0, 1);
            // common normal on vertex 2 for triangle 1 and 2
            gl.glVertex3f(2, 0, 0);
            gl.glVertex3f(2, 0, 1);
            gl.glColor3f(0, 1, 0);
            gl.glVertex3f(0, 2, 0);
            gl.glVertex3f(0, 2, 1);

            gl.glColor3f(1, 0, 0);
            // vertex normals of triangle 2 [1, 2, 4]
            /* normals for vertex 1 and 2 already exist
            gl.glVertex3f(-2, 0, 0);
            gl.glVertex3f(-2, 0, -1);
            gl.glVertex3f(2, 0, 0);
            gl.glVertex3f(2, 0, -1);
            */
            gl.glVertex3f(0, -2, 0);
            gl.glVertex3f(0, -2, -1);
        }

        else
        {
            gl.glColor3f(0, 1, 0);
            // normal of triangle 1 [1, 2, 3]
            gl.glVertex3f(0, 1, 0);
            gl.glVertex3f(0, 1, 1);

            gl.glColor3f(1, 0, 0);
            // normal of triangle 2 [1, 2, 4]
            gl.glVertex3f(0, -1, 0);
            gl.glVertex3f(0, -1, -1);
        }

        gl.glEnd();
        gl.glEnable(GL.GL_LIGHTING);
    }

    public void init(final GLAutoDrawable drawable)
    {
        final GL gl = drawable.getGL();

        gl.glEnable(GL.GL_DEPTH_TEST);

        gl.glLightModeli(GL.GL_LIGHT_MODEL_TWO_SIDE, GL.GL_TRUE);

        gl.glEnable(GL.GL_LIGHT0);
        gl.glEnable(GL.GL_LIGHTING);
        gl.glLightfv(GL.GL_LIGHT0, GL.GL_AMBIENT, FloatBuffer.wrap(new float[]{0, 0, 0, 1}));
        gl.glLightfv(GL.GL_LIGHT0, GL.GL_DIFFUSE, FloatBuffer.wrap(new float[]{1, 1, 1, 1}));
        gl.glLightfv(GL.GL_LIGHT0, GL.GL_SPECULAR, FloatBuffer.wrap(new float[]{1, 1, 1, 1}));
        gl.glLightfv(GL.GL_LIGHT0, GL.GL_POSITION, FloatBuffer.wrap(new float[]{0, 0, 6}));

        gl.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_AMBIENT, FloatBuffer.wrap(new float[]{0.2f, 0.1f, 0, 1}));
        gl.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_DIFFUSE, FloatBuffer.wrap(new float[]{0.6f, 0.2f, 0.1f, 1}));
        gl.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_SPECULAR, FloatBuffer.wrap(new float[]{0.6f, 0.2f, 0.1f, 1}));
        gl.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_EMISSION, FloatBuffer.wrap(new float[]{0, 0, 0, 1}));
        gl.glMaterialf(GL.GL_FRONT_AND_BACK, GL.GL_SHININESS, 50);

        gl.glMatrixMode(GL.GL_PROJECTION);
        gl.glLoadIdentity();
        new GLU().gluPerspective(50, drawable.getWidth() / drawable.getHeight(), 1, 1000);
        gl.glMatrixMode(GL.GL_MODELVIEW);
        gl.glLoadIdentity();

        gl.glLineWidth(3);
    }

    private boolean isWoken()
    {
        try
        {
            Thread.sleep(10);
        }

        catch (InterruptedException e)
        {
            e.printStackTrace();
        }

        return true;
    }

    public void reshape(
            final GLAutoDrawable drawable,
            final int x,
            final int y,
            final int width,
            final int height)
    {
    }

    public void displayChanged(
            final GLAutoDrawable drawable,
            final boolean mode,
            final boolean device)
    {
    }
}






[Edited by - hoonsworld on January 28, 2008 3:01:33 PM]

Share this post


Link to post
Share on other sites
Yes, this is a fundamental problem in OpenGL. In order for the lighting to look good the polygon winding and normals have to be consistent. I haven't tested this, but could probably solve the problem like this:

Draw front facing polygons:
- Enable back face culling
- Draw the scene

Draw back facing polygons:
- Flip the normals
- Enable front face culling
- Draw the scene again

Share this post


Link to post
Share on other sites
Quote:

Is there a openGl statement to correct this mistake?


there is also:

glFrontFace(GLenum mode);

(for mode: GL_CCW, GL_CW i.e. counterclockwise and clockwise respectively)
to match the simple, hardcoded and constrained nature of your sample code.

Editorial comment:It is a fundamental issue the programmer (or modeller?) should/may account for with regard to 3d geometry in both Opengl and DirectX.

Share this post


Link to post
Share on other sites
Quote:
Original post by deathkrush
Yes, this is a fundamental problem in OpenGL.


I won't say this is a problem in OpenGL. Polygon winding and normals need not to be consistant. In fact, if the normals are correct, your 3D model can still look good even its polygons have a lousy winding.

For hoon: When we use averaged normals, we are expecting a smooth surface. If you are not, why are you averaging your normals in the first place? The polygons share the same vertices doesn't mean they need to share normals. And, if your artist give you a 3D model that share/not-share normals, just let it be.

For more details about how and when to average the normals, please refer to
http://www.xmission.com/~nate/smooth.html

Share this post


Link to post
Share on other sites
Hi

Thanks for your answers :)

Quote:
Original post by ma_hty
The polygons share the same vertices doesn't mean they need to share normals.


Yes, I tried it with a second inverse normal on the shared vertices and it works correct :)
I thought it exist an OpenGL statement for this problem :(

OK, now I write a net algorithm to recognize the opposite wound triangles over the adjacent edges and their vertex order. A wound triangle obtains a boolean flag for usage the inverse normal.

I think it is a good idea :)
Or are there other ideas?

Here is our mini project:
gui3d.org

Best regards from Hamburg
hoon

Share this post


Link to post
Share on other sites
The correct way is to obtain a 3D model with a consistant winding, instead of struggling about the winding problem. In general, I can easily construct a 3D model with a winding so ambiguous that impossible to be fixed. Therefore, no matter how much effort you spend on your routine, you cannot handle all cases.

By the way, any 3D model designing software or 3D model scanner will, try their best to, produce a 3D model with consistant winding. Why bother?

Share this post


Link to post
Share on other sites
Arr......

In case your 3D model have its back-face visible, you can command OpenGL to flip the normal whenever the face is backward facing via

glLightModelf(GL_LIGHT_MODEL_TWO_SIDE, 1.0);

Share this post


Link to post
Share on other sites
Hi ma_hty :)

In the example I set up the statement:

gl.glLightModeli(GL.GL_LIGHT_MODEL_TWO_SIDE, GL.GL_TRUE);

It works correct but it is not the solution for the problem :(


Yes, you are right! Any 3D modeler will try to produce consistant windings. But our program/engine works with simulation data too :) This simulation data based on finite elements. The user has the full control about the winding for each element (polygon) or element groups. Here are 2 links for the background and a little simulation video:
en.wikipedia.org/wiki/Finite_element_analysis
ncac.gwu.edu

This is the reason for our effort.
The inconsistent winding is a feature in our engine but I have to visualize it correctly.

OK, Thanks again :)

Regards from Germany
hoon

Share this post


Link to post
Share on other sites
Hi :)

In our program we have to manage the windings. The windings are important for the
most finite element solvers. These solvers are very sophisticated programs in
view of mathematics and physics.

For example: An airbag has internal pressure!
But in which direction does the pressure have an effect?
The user has to set up this information over the windings or the finite
element normals. The following video shows an airbag simulation:
airbag deployment

This is not the only reason for the role of winding. There are contact problems, and many of much other ...
In our program the user can select one or several polygons or groups and can invert the winding direction.
The selecting is possible over picking or an intelligent fence mechanism with box or polygon selecting.

For flat shading it is not a problem but the smooth shading is difficult :(

I think, I write a net algorithm to recognize the opposite wound triangles or quads!?

Regards
hoon

Share this post


Link to post
Share on other sites
If you are talking about pressure simulation, I think only specifying normals is enough. You have no reason to control winding in this case.

I don't quite sure what you mean by "contact problems". However, if you are talking about the relations between faces, it is definately a bad idea for you to allow users to control winding.

For the video you shown above, I don't see any necessity for abitrary winding. You can do exactly the same with consistant winding and varying normals. And very likely, it will also simplify your program and your calculation by a huge extend.

The vertices normals usually come from the average of faces normals. To get the vertices normals of each frame, you can keep the contributing faces of each vertex and average the face normals after each deformation.

Share this post


Link to post
Share on other sites
The simulation programs calculates the normal direction from winding (vertex order). It happens over the "Right hand rule" and the cross

For the airbag we have consistance normals. It was a bad example :( But for other simulations the normals (vertex orders) can vary. The requests of the simulation programs are very hard.

Here is a worst case:
I have 2 adjacent triangles with the following polygon normals:
triangle 1: [0, 0, 1]
triangle 2: [0, 0, -1]
The averaged vertex normal is [0, 0, 0] :(

Only the information about the polygon winding can help. The winding information is important for the vertex normal calculation and for rendering.
For rendering the opposite wound triangles use the inverted vertex normal for smooth shading. This is my idea for the net algorithm ...

Share this post


Link to post
Share on other sites
Quote:

In our program the user can select one or several polygons or groups and can invert the winding direction.
The selecting is possible over picking or an intelligent fence mechanism with box or polygon selecting.


Looks like the definition of the problem is coming along
(it often takes a long time to simply describe an actual problem
on a forum)

It still looks a little hazy:
So...lets see if my (sort of) paraphrasing of the problem is close:

In the program the user can select one or several polygons or groups and can invert the winding direction. For physics/physical simulation purposes only?
[i.e. to actually generate different normals(based on winding or even user defined)] to experiment
with say... physical simulations/'whatif' scenarios?.

But you want the 'real life' visual appearance of the structure/object in question to remain... err.. realistic (as it would appear in real life?)
when smooth shaded?

If so:

I would 'conceptually' devise 2 systems:
The physical simulation system and the visual/rendering system.
Maintain separate normals for each system. Or is that too simple?

Another (less appealing) option (with the same aim, but different means) would be to store a flag(with face or vertex.. that's up to you) indicating wether the orginal normal(winding?)has been flipped by the user or not. When it comes time to do physical simulation processing, check the flag to see if is flipped: and use the flipped normal(I can only presume this is the reason it is flipped). When it comes time to do the visual rendering use the orginal normal data( I presume you aim is to maintain realistic visuals). Or visa versa.

Is there another way?

Let me be a bit more presumptuous:

I bet you knew all this, but you just wanted to see if there are other better ways to go about it?

Or your system does not allow this?

[Edited by - steven katic on January 31, 2008 5:26:48 PM]

Share this post


Link to post
Share on other sites
Arr......

I think you are abusing the winding information.

For a smooth surface, the triangles of it should have consistnat winding. You shouldn't allow user to change it in the first place. That's why you have your problem. The correct way to fix your problem is to fix the fault design of the user interface. Otherwise, you will have strange results for complicated 3D model regardless of how much effort you spend on your correction routine, as some vital information of the geometry had lost.

And, the vertices normals is for display only. In general, they are not very useful for simulation. Just, there is nothing to stop you to run your simulation with face normal and display your 3D model with vertex normal.

And, to average face normal for vertex normals, you can refer to
http://www.xmission.com/~nate/smooth.html
for more information. The way to avoid the problems from averaging has been discussed there. The basic idea is simple, i.e. don't do the averaging if it is not appropriate.

Share this post


Link to post
Share on other sites
Quote:

I think you are abusing the winding information.


who me or hoonsworld?

why does/would the user change the winding?
(I am just assuming the user would want to be able to do something it:
I can't imagine what, but a non existent physics/physical simulation system is as good as any other place to use it, or at the very least it seems like a system requirement?)

Look further back in the replies and reread the reasons hoonsworld gives for allowing the user to change the winding.



You there hoonsworld?
(EDIT: hoonsworld must have what he needs now; another happy customer
RE: abuse of winding info; it's ok isn't, so long as it is not illegal abuse :))


[Edited by - steven katic on February 1, 2008 4:31:57 PM]

Share this post


Link to post
Share on other sites
Hello steven and ma_hty

yesterday, I was on party and I drank a lot of yummy beer :)


OK, for example the winding control (node/vertex order) is useful for difficult contact definitions in a crash simulation. Some contact definitions are dependent on the normal directions. The mesh generators does not know in which direction the user needs the normals :(
See the following crash video:
Pedestiran impacting

Quote:
But you want the 'real life' visual appearance of the structure/object in question to remain... err.. realistic (as it would appear in real life?)
when smooth shaded?

If so:

I would 'conceptually' devise 2 systems:
The physical simulation system and the visual/rendering system.
Maintain separate normals for each system. Or is that too simple?

Another (less appealing) option (with the same aim, but different means) would be to store a flag(with face or vertex.. that's up to you) indicating wether the orginal normal(winding?)has been flipped by the user or not. When it comes time to do physical simulation processing, check the flag to see if is flipped: and use the flipped normal(I can only presume this is the reason it is flipped). When it comes time to do the visual rendering use the orginal normal data( I presume you aim is to maintain realistic visuals). Or visa versa.

Yes, this is correct :)


Here is a example with the commercial program Patran (pics). This program managed the
pre- and post-processing for the nastran code.

The first pic shows the opposite wound triangles 7, 8 and 9 and the normal wound
triangles 16, 17, 18, 25, 26 and 27. For example the triangle 9 has the vertex
order [9, 19, 20, 10] and the triangle 18 has the order [19, 20, 30, 29]. They are opposed :)

The second pic shows the polygon normals and the smooth shading. There are also two systems!




Quote:
http://www.xmission.com/~nate/smooth.html

Thanks for the link ma_hty :)

That is not the winding solution, but very helpful for the next step and the quality of appearance :)


OK, thanks for all - now I start the implementation with the flipped flag and the different normal treatment
(physical/visual)

Best regards from Hamburg :)
hoon

Share this post


Link to post
Share on other sites
Quote:
Original post by hoonsworld
OK, for example the winding control (node/vertex order) is useful for difficult contact definitions in a crash simulation. Some contact definitions are dependent on the normal directions. The mesh generators does not know in which direction the user needs the normals :(


Oh... people refer your "contact problem"/"contact definitions" as collision. And, I think I have repeated myself too many times. Your application doesn't require an abitrary defined winding. Everything you claimed that are going to require winding information, are indeed, require normal instead.

The problems you have are probably came from your problematic user interface implementation. If you wanted to solve your problem, that's the way to go.

Share this post


Link to post
Share on other sites
Yes this is the way! We design a very flexible engine for multi-disciplinary fields of application in general for simulation, animation and cyber space :)

Yes in the world of simulation the people say "contact problem" for collision :)
The simulation engineers calculates with spring forces when a vertex (node) permeates a triangle or quad (finite element). Keep in your mind: "A simulation is an abstract physical calculation to portray the reality". A lot of products in our life are designed on computers. Here are two example:
Toy
Helmet

Share this post


Link to post
Share on other sites
Quote:
Original post by hoonsworld
...
The simulation engineers calculates with spring forces when a vertex (node) permeates a triangle or quad (finite element).
...


It don't require an arbitrary defined winding neither. What's your point?

PS : Please tell me your need of arbitrary defined winding is not a lousy way of recording normal information.

Share this post


Link to post
Share on other sites
Quote:

It don't require an arbitrary defined winding neither. What's your point?

PS : Please tell me your need of arbitrary defined winding is not a lousy way of recording normal information.


ewwh.. the level of decorum seems to have slipped a notch or two?

Hi ma_hty,

The important point you should try and see here is that the visual normal is treated the way you expect it to be treated visually. The other 'normal' or 'flipped normal' or 'user or arbitrarily defined winding' that we have made mention of is a completely different animal. These terms refer to data related to the physics/physical simulation system used to simulate (among other things) the behaviour of the particular type of material (that the 3D object may be made of) during contact/collision.

hoonsworld is the expert here though, not me. Sadly I suspect your tone may have put him off from responding to your questions (understandably).

Hopefully you can grasp more from the link(s) hoons provided here:

en.wikipedia.org/wiki/Finite_element_analysis

It's mandatory reading to understand what hoons is doing:
It's all there (and more) in a much more verbose form (than my distilled summary), so you may need to synthesize the data into useful info pertaining to you specific question(s) and concerns if you are really interested.
Or if you ask nicely, hoons may be interested in discussing FEA examples with you, it is ,after all, what he does (as far as this post is concerned).

I hope that helps you.

PS. I remember you now ma_hty: The Intraobject Transparency post and the dreaded old "cull-mode sorting trick" hack. I hope you found the info you needed on that.

Cheers

[Edited by - steven katic on February 6, 2008 1:02:16 AM]

Share this post


Link to post
Share on other sites
Steven Katic,

Be frank, I will not believe in something just because the name is big (let alone some strangers claim himself an expert). I just believe in evidence. That's why I wrote you a demo program in the thread "The Intraobject Transparency".

Anyway, as you don't like my post, I will stop posting further reply to this thread simply. Then, you are happy and I save my valuable time. Everyone win, isn't excellent?

Yours sincerely,
Gary

Share this post


Link to post
Share on other sites
Quote:

Be frank, I will not believe in something just because the name is big (let alone some strangers claim himself an expert). I just believe in evidence.


As do we all?

The evidence is in the authoritative information you find and research, not in anything anyone tries to convince you of, that's why references to relevant/background information (that is hopefully legitimate and valid) is provided often (as hoons has done with FEA and as I think I did with the "cull-mode sorting trick").


Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Forum Statistics

    • Total Topics
      628275
    • Total Posts
      2981760
  • Similar Content

    • By mellinoe
      Hi all,
      First time poster here, although I've been reading posts here for quite a while. This place has been invaluable for learning graphics programming -- thanks for a great resource!
      Right now, I'm working on a graphics abstraction layer for .NET which supports D3D11, Vulkan, and OpenGL at the moment. I have implemented most of my planned features already, and things are working well. Some remaining features that I am planning are Compute Shaders, and some flavor of read-write shader resources. At the moment, my shaders can just get simple read-only access to a uniform (or constant) buffer, a texture, or a sampler. Unfortunately, I'm having a tough time grasping the distinctions between all of the different kinds of read-write resources that are available. In D3D alone, there seem to be 5 or 6 different kinds of resources with similar but different characteristics. On top of that, I get the impression that some of them are more or less "obsoleted" by the newer kinds, and don't have much of a place in modern code. There seem to be a few pivots:
      The data source/destination (buffer or texture) Read-write or read-only Structured or unstructured (?) Ordered vs unordered (?) These are just my observations based on a lot of MSDN and OpenGL doc reading. For my library, I'm not interested in exposing every possibility to the user -- just trying to find a good "middle-ground" that can be represented cleanly across API's which is good enough for common scenarios.
      Can anyone give a sort of "overview" of the different options, and perhaps compare/contrast the concepts between Direct3D, OpenGL, and Vulkan? I'd also be very interested in hearing how other folks have abstracted these concepts in their libraries.
    • By aejt
      I recently started getting into graphics programming (2nd try, first try was many years ago) and I'm working on a 3d rendering engine which I hope to be able to make a 3D game with sooner or later. I have plenty of C++ experience, but not a lot when it comes to graphics, and while it's definitely going much better this time, I'm having trouble figuring out how assets are usually handled by engines.
      I'm not having trouble with handling the GPU resources, but more so with how the resources should be defined and used in the system (materials, models, etc).
      This is my plan now, I've implemented most of it except for the XML parts and factories and those are the ones I'm not sure of at all:
      I have these classes:
      For GPU resources:
      Geometry: holds and manages everything needed to render a geometry: VAO, VBO, EBO. Texture: holds and manages a texture which is loaded into the GPU. Shader: holds and manages a shader which is loaded into the GPU. For assets relying on GPU resources:
      Material: holds a shader resource, multiple texture resources, as well as uniform settings. Mesh: holds a geometry and a material. Model: holds multiple meshes, possibly in a tree structure to more easily support skinning later on? For handling GPU resources:
      ResourceCache<T>: T can be any resource loaded into the GPU. It owns these resources and only hands out handles to them on request (currently string identifiers are used when requesting handles, but all resources are stored in a vector and each handle only contains resource's index in that vector) Resource<T>: The handles given out from ResourceCache. The handles are reference counted and to get the underlying resource you simply deference like with pointers (*handle).  
      And my plan is to define everything into these XML documents to abstract away files:
      Resources.xml for ref-counted GPU resources (geometry, shaders, textures) Resources are assigned names/ids and resource files, and possibly some attributes (what vertex attributes does this geometry have? what vertex attributes does this shader expect? what uniforms does this shader use? and so on) Are reference counted using ResourceCache<T> Assets.xml for assets using the GPU resources (materials, meshes, models) Assets are not reference counted, but they hold handles to ref-counted resources. References the resources defined in Resources.xml by names/ids. The XMLs are loaded into some structure in memory which is then used for loading the resources/assets using factory classes:
      Factory classes for resources:
      For example, a texture factory could contain the texture definitions from the XML containing data about textures in the game, as well as a cache containing all loaded textures. This means it has mappings from each name/id to a file and when asked to load a texture with a name/id, it can look up its path and use a "BinaryLoader" to either load the file and create the resource directly, or asynchronously load the file's data into a queue which then can be read from later to create the resources synchronously in the GL context. These factories only return handles.
      Factory classes for assets:
      Much like for resources, these classes contain the definitions for the assets they can load. For example, with the definition the MaterialFactory will know which shader, textures and possibly uniform a certain material has, and with the help of TextureFactory and ShaderFactory, it can retrieve handles to the resources it needs (Shader + Textures), setup itself from XML data (uniform values), and return a created instance of requested material. These factories return actual instances, not handles (but the instances contain handles).
       
       
      Is this a good or commonly used approach? Is this going to bite me in the ass later on? Are there other more preferable approaches? Is this outside of the scope of a 3d renderer and should be on the engine side? I'd love to receive and kind of advice or suggestions!
      Thanks!
    • By nedondev
      I 'm learning how to create game by using opengl with c/c++ coding, so here is my fist game. In video description also have game contain in Dropbox. May be I will make it better in future.
      Thanks.
    • By Abecederia
      So I've recently started learning some GLSL and now I'm toying with a POM shader. I'm trying to optimize it and notice that it starts having issues at high texture sizes, especially with self-shadowing.
      Now I know POM is expensive either way, but would pulling the heightmap out of the normalmap alpha channel and in it's own 8bit texture make doing all those dozens of texture fetches more cheap? Or is everything in the cache aligned to 32bit anyway? I haven't implemented texture compression yet, I think that would help? But regardless, should there be a performance boost from decoupling the heightmap? I could also keep it in a lower resolution than the normalmap if that would improve performance.
      Any help is much appreciated, please keep in mind I'm somewhat of a newbie. Thanks!
    • By test opty
      Hi,
      I'm trying to learn OpenGL through a website and have proceeded until this page of it. The output is a simple triangle. The problem is the complexity.
      I have read that page several times and tried to analyse the code but I haven't understood the code properly and completely yet. This is the code:
       
      #include <glad/glad.h> #include <GLFW/glfw3.h> #include <C:\Users\Abbasi\Desktop\std_lib_facilities_4.h> using namespace std; //****************************************************************************** void framebuffer_size_callback(GLFWwindow* window, int width, int height); void processInput(GLFWwindow *window); // settings const unsigned int SCR_WIDTH = 800; const unsigned int SCR_HEIGHT = 600; const char *vertexShaderSource = "#version 330 core\n" "layout (location = 0) in vec3 aPos;\n" "void main()\n" "{\n" " gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n" "}\0"; const char *fragmentShaderSource = "#version 330 core\n" "out vec4 FragColor;\n" "void main()\n" "{\n" " FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n" "}\n\0"; //******************************* int main() { // glfw: initialize and configure // ------------------------------ glfwInit(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // glfw window creation GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "My First Triangle", nullptr, nullptr); if (window == nullptr) { cout << "Failed to create GLFW window" << endl; glfwTerminate(); return -1; } glfwMakeContextCurrent(window); glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); // glad: load all OpenGL function pointers if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) { cout << "Failed to initialize GLAD" << endl; return -1; } // build and compile our shader program // vertex shader int vertexShader = glCreateShader(GL_VERTEX_SHADER); glShaderSource(vertexShader, 1, &vertexShaderSource, nullptr); glCompileShader(vertexShader); // check for shader compile errors int success; char infoLog[512]; glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(vertexShader, 512, nullptr, infoLog); cout << "ERROR::SHADER::VERTEX::COMPILATION_FAILED\n" << infoLog << endl; } // fragment shader int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(fragmentShader, 1, &fragmentShaderSource, nullptr); glCompileShader(fragmentShader); // check for shader compile errors glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(fragmentShader, 512, nullptr, infoLog); cout << "ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n" << infoLog << endl; } // link shaders int shaderProgram = glCreateProgram(); glAttachShader(shaderProgram, vertexShader); glAttachShader(shaderProgram, fragmentShader); glLinkProgram(shaderProgram); // check for linking errors glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success); if (!success) { glGetProgramInfoLog(shaderProgram, 512, nullptr, infoLog); cout << "ERROR::SHADER::PROGRAM::LINKING_FAILED\n" << infoLog << endl; } glDeleteShader(vertexShader); glDeleteShader(fragmentShader); // set up vertex data (and buffer(s)) and configure vertex attributes float vertices[] = { -0.5f, -0.5f, 0.0f, // left 0.5f, -0.5f, 0.0f, // right 0.0f, 0.5f, 0.0f // top }; unsigned int VBO, VAO; glGenVertexArrays(1, &VAO); glGenBuffers(1, &VBO); // bind the Vertex Array Object first, then bind and set vertex buffer(s), //and then configure vertex attributes(s). glBindVertexArray(VAO); glBindBuffer(GL_ARRAY_BUFFER, VBO); glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); glEnableVertexAttribArray(0); // note that this is allowed, the call to glVertexAttribPointer registered VBO // as the vertex attribute's bound vertex buffer object so afterwards we can safely unbind glBindBuffer(GL_ARRAY_BUFFER, 0); // You can unbind the VAO afterwards so other VAO calls won't accidentally // modify this VAO, but this rarely happens. Modifying other // VAOs requires a call to glBindVertexArray anyways so we generally don't unbind // VAOs (nor VBOs) when it's not directly necessary. glBindVertexArray(0); // uncomment this call to draw in wireframe polygons. //glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // render loop while (!glfwWindowShouldClose(window)) { // input // ----- processInput(window); // render // ------ glClearColor(0.2f, 0.3f, 0.3f, 1.0f); glClear(GL_COLOR_BUFFER_BIT); // draw our first triangle glUseProgram(shaderProgram); glBindVertexArray(VAO); // seeing as we only have a single VAO there's no need to // bind it every time, but we'll do so to keep things a bit more organized glDrawArrays(GL_TRIANGLES, 0, 3); // glBindVertexArray(0); // no need to unbind it every time // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) glfwSwapBuffers(window); glfwPollEvents(); } // optional: de-allocate all resources once they've outlived their purpose: glDeleteVertexArrays(1, &VAO); glDeleteBuffers(1, &VBO); // glfw: terminate, clearing all previously allocated GLFW resources. glfwTerminate(); return 0; } //************************************************** // process all input: query GLFW whether relevant keys are pressed/released // this frame and react accordingly void processInput(GLFWwindow *window) { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, true); } //******************************************************************** // glfw: whenever the window size changed (by OS or user resize) this callback function executes void framebuffer_size_callback(GLFWwindow* window, int width, int height) { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. glViewport(0, 0, width, height); } As you see, about 200 lines of complicated code only for a simple triangle. 
      I don't know what parts are necessary for that output. And also, what the correct order of instructions for such an output or programs is, generally. That start point is too complex for a beginner of OpenGL like me and I don't know how to make the issue solved. What are your ideas please? What is the way to figure both the code and the whole program out correctly please?
      I wish I'd read a reference that would teach me OpenGL through a step-by-step method. 
  • Popular Now