Sign in to follow this  
Wilds

OpenGL Skybox cubemap not showing properly

Recommended Posts

I have used a cubemap to render my skybox, I am using openGL 3.3, GLSL 330.
The problem is that it's only showing a a blue skybox.
I can see inside gDEBugger that my cubemap is created properly.

Loading of my image files and creating the cubemap
[source lang="cpp"]void TextureCube::LoadTGA(std::string filename, bool mipmap)
{
// used vars
TGAFile tga[6];
memset(tga, 0, sizeof(TGAFile) * 6);
GLenum internForm;
GLenum externForm;

if(mTextureID != 0)
Dispose();

// fill in texture names
mTextures[0] = filename + "_lf.tga";
mTextures[1] = filename + "_rt.tga";
mTextures[2] = filename + "_bk.tga";
mTextures[3] = filename + "_ft.tga";
mTextures[4] = filename + "_dn.tga";
mTextures[5] = filename + "_up.tga";

// load each file
for(int i = 0; i < 6; i++)
{
if( !LoadImageTGA(mTextures[i], &tga[i]))
{
printf("Could not load %s\n", mTextures[i].c_str());

// delete previous loaded tga
for(int j = 0; j < i;j++)
{
delete []tga[i].data;
}
return;
}
}

// set data
mWidth = tga[0].width;
mHeight = tga[0].height;

// create texture and bind it
glGenTextures(1, &mTextureID);
glBindTexture(GL_TEXTURE_CUBE_MAP, mTextureID);

// check which formats to use
if(tga[0].bytesperpixel == 1)
{
internForm = GL_LUMINANCE;
externForm = GL_LUMINANCE;
}
else if(tga[0].bytesperpixel == 3)
{
internForm = GL_RGB;
externForm = GL_BGR;
}
else if(tga[0].bytesperpixel == 4)
{
internForm = GL_RGBA;
externForm = GL_BGRA;
}

// set texture state
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

// check if we want to generate mipmaps
if(mipmap)
{
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
}
else
{
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}

// put the data inside
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, internForm, mWidth, mHeight, 0, externForm, GL_UNSIGNED_BYTE, tga[1].data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, internForm, mWidth, mHeight, 0, externForm, GL_UNSIGNED_BYTE, tga[0].data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, internForm, mWidth, mHeight, 0, externForm, GL_UNSIGNED_BYTE, tga[5].data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, internForm, mWidth, mHeight, 0, externForm, GL_UNSIGNED_BYTE, tga[4].data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, internForm, mWidth, mHeight, 0, externForm, GL_UNSIGNED_BYTE, tga[3].data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, internForm, mWidth, mHeight, 0, externForm, GL_UNSIGNED_BYTE, tga[2].data);


// unbind
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);

//free memory
for(int i = 0; i < 6; i++)
{
delete []tga[i].data;
}
}[/source]

The rest:
[source lang="cpp"]class TextureCube
{
private:
std::string mTextures[6];
GLuint mTextureID;
GLuint mWidth;
GLuint mHeight;

public:
TextureCube() {}
~TextureCube() { Dispose(); }

// Create texture
GLvoid LoadTGA(std::string fileName, bool mipmap = false);

// methods
inline GLuint GetID() { return mTextureID; }
inline GLuint GetWidth() { return mWidth; }
inline GLuint GetHeight() { return mHeight; }
inline GLvoid Bind()
{
if(glIsTexture(mTextureID))
{
glBindTexture(GL_TEXTURE_CUBE_MAP, mTextureID);
}
else
{
printf("Not a valid texture name");
}
}

inline GLvoid Unbind() { glBindTexture(GL_TEXTURE_CUBE_MAP, 0); }

inline GLvoid Dispose()
{
if(mTextureID)
{
Unbind();
glDeleteTextures(1, &mTextureID);
}
}
};[/source]
Vertex shader:
[source lang="cpp"]#version 330

layout(location = 0) in vec3 inPos;
out vec3 outUV;
uniform mat4 mvpMatrix;

void main()
{
outUV = inPos;
gl_Position = mvpMatrix * vec4(inPos, 1.0);
}[/source]

Fragment shader:
[source lang="cpp"]#version 330

in vec3 UV;
out vec4 fragcolor;
uniform samplerCube cubeMap;

void main()
{
fragcolor = texture(cubeMap, UV);
}[/source]

Skybox class:
[source lang="cpp"]#ifndef SKYBOX_H_
#define SKYBOX_H_

#include <string>
#include "../System/CAbstractSystem.h"
#include "../Graphics/Buffers/VertexBuffer.h"
#include "../Graphics/TextureCube.h"
#include "../Graphics/Shader.h"
#include "Camera.h"

class Skybox
{
private:
// features
Camera* mCamera;
TextureCube mTexCube;
ShaderProgram mShader;

// buffers
VertexBuffer mVertices;
GLuint mElementsID;
GLuint vaoID;

public:
Skybox() {}

void Init(std::string fileName, Camera* camera)
{
mCamera = camera;

// vertex position in object space
GLfloat verticeAttrib[] =
{
// front
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
-1.0, 1.0, -1.0,
// top
-1.0, 1.0, -1.0,
1.0, 1.0, -1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
// back
1.0, -1.0, 1.0,
-1.0, -1.0, 1.0,
-1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
// bottom
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, -1.0, -1.0,
-1.0, -1.0, -1.0,
// left
-1.0, -1.0, 1.0,
-1.0, -1.0, -1.0,
-1.0, 1.0, -1.0,
-1.0, 1.0, 1.0,
// right
1.0, -1.0, -1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, -1.0,
};

GLushort elements[] = {
// front
0, 1, 2,
2, 3, 0,
// top
4, 5, 6,
6, 7, 4,
// back
8, 9, 10,
10, 11, 8,
// bottom
12, 13, 14,
14, 15, 12,
// left
16, 17, 18,
18, 19, 16,
// right
20, 21, 22,
22, 23, 20,
};

// generate vertex array object to melt all buffer objects
glGenVertexArrays(1,&vaoID);
glBindVertexArray(vaoID);

// create vertex buffer object
mVertices.Create(verticeAttrib, sizeof(verticeAttrib) / sizeof(GLfloat), GL_STATIC_DRAW);

// indices
glGenBuffers(1, &mElementsID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mElementsID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(elements), elements, GL_STATIC_DRAW);

// load texture
mTexCube.LoadTGA(fileName);

// shader data
std::string vertex;
std::string fragment;

// creating shaders
CAbstractSystem::LoadTextFile("Data\\shaders\\skybox.vert", vertex);
CAbstractSystem::LoadTextFile("Data\\shaders\\skybox.frag", fragment);

// load shaders
mShader.CreateProgram(vertex, fragment);
}

void Dispose()
{
mTexCube.Dispose();
mShader.Dispose();
mVertices.Dispose();

glDeleteBuffers(1, &mElementsID);

glBindVertexArray(0);
glDeleteVertexArrays(1, &vaoID);
}

~Skybox()
{
Dispose();
}

void Draw()
{
GLint OldCullFaceMode;
glGetIntegerv(GL_CULL_FACE_MODE, &OldCullFaceMode);
GLint OldDepthFuncMode;
glGetIntegerv(GL_DEPTH_FUNC, &OldDepthFuncMode);

glCullFace(GL_FRONT);
glDepthFunc(GL_LEQUAL);

Mat4 trans = Mat4::Translate(mCamera->GetWorld().GetPosition()) * Mat4::ScaleUniform(85);
Mat4 combined = mCamera->GetProjection() * mCamera->GetView() * trans;

//get matrix uniform
mShader.Bind();
int mLoc = glGetUniformLocation(mShader.GetID(), "mvpMatrix");
glUniformMatrix4fv(mLoc, 1, GL_FALSE, (GLfloat*)&combined);

//enable vertex position
mVertices.Bind();
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);

mTexCube.Bind();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mElementsID);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_SHORT, 0);

glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(0); // vertex positions

glCullFace(OldCullFaceMode);
glDepthFunc(OldDepthFuncMode);

mVertices.Unbind();
mTexCube.Unbind();
mShader.Unbind();
}
};

#endif[/source] Edited by Wilds

Share this post


Link to post
Share on other sites
I would say the problem is in your mvpMatrix. From your code, it seems you add translation, which shouldn't be there. Edited by Ignifex

Share this post


Link to post
Share on other sites
I have no other good ideas other than make each side a different color. And see if at least the left is all red, right is all blue. If so, it could be doing something completely wrong. I would post a pic. Are you drawing any of the scene with it? Is your glClearColor blue? Maybe you aren't pointing the normals inward and its not drawing the skybox at all just using the clear color?

Share this post


Link to post
Share on other sites
hi, the proper way to do skyboxes is the following:
obviously create a 1x1x1 box in blender or whatever editor you prefer. you can just export the default cube to the obj format. (make sure you set normals to smooth!)
Load the 6 faces of your cubemap into a cubemap texture, and when you render it bind it to the GL_TEXTURE_CUBE_MAP target.
When rendering the skybox make sure you disable depth testing. this way you can make sure that the skybox is rendered properly behind everything.
then you have to pass a matrix to your shader: the inverse of your modelview matrix.
then use these shaders:
[CODE]
[vertex shader]
//set version to your target version
#version 420 core

uniform mat4 modelview_inv; //here comes the inverse modelview matrix

in vec4 in_vertex; //here comes the vertex data (ie. the cube)

out cross_shader_data
{
vec3 tex_coord; //this gets passed to the pixel shader to sample the cubemap
} o;

void main()
{
o.tex_coord = mat3(modelview_inv) * in_vertex.xyz; //calculate the texture coordinate, then interpolate along the cube faces
gl_Position = in_vertex; //here's a trick: since we don't transform the cube it will always stay at (0, 0, 0) therefore wherever you go, it stays with you
}

[pixel shader]
//again set the version
#version 420 core

uniform samplerCube texture0; //you need to use a samplerCube for cubemapping

in cross_shader_data
{
vec3 tex_coord; //incoming texture coordinates
} i;

out vec4 color; //this gets drawn into the framebuffer

void main()
{
color = texture(texture0, i.tex_coord); //sample the cubemap along the coordinates
}
[/CODE]

it is important to note that you either need to draw the cubemap before rendering anything (so that it doesn't cover any other objects), or you can use stenciling for optimizing the whole thing, and this way you'll need to draw the skybox last.
to add you'll probably need to adjust (rotate) the incoming cubemap faces (textures), because they may be in other coordinate system, and therefore they may not be in their place when rendered. Edited by Yours3!f

Share this post


Link to post
Share on other sites
For your method: you aren't using a skybox, you are using a backdrop. Because of the fact you never rotate the box. That is the only reason you need to pass the modelview inverse and what not.

Secondly: Why use a stencil map in your method? Since you are using only 1 face of the cubemap (a backdrop plane). Why don't u just translate it to the end of the farplane and get rid of stencil operations completely and just use depth testing?

I wouldn't say that is the proper way. You are doing extra work to generate the same texture coordinates as this guy is with a static cube that gets rotated. And needing to pass extra data and perform extra operations with the inverse. Typically that method is more for a cube mapped object in the world.

[quote]here's a trick: since we don't transform the cube it will always stay at (0, 0, 0) therefore wherever you go, it stays with you[/quote]
All you need to do to get that trick without using the inverse is just don't pass the translation portion of the camera matrix and just the rotation.

Share this post


Link to post
Share on other sites
Nope.

Draw as a 10x10x10 cube centered on the camera origin (I'm using 10 here because it's going to be comfortably above your near clipping plane distance; adjust as required) but otherwise untransformed. Draw it [u]last[/u], after everything else. Use glDepthRange (1, 1). Disable depth writing but leave depth test enabled.

This way you get correct positioning, early-Z, skybox behind all objects and minimal fillrate overhead.

Share this post


Link to post
Share on other sites
[quote name='dpadam450' timestamp='1344550038' post='4967928']
For your method: you aren't using a skybox, you are using a backdrop. Because of the fact you never rotate the box. That is the only reason you need to pass the modelview inverse and what not.

Secondly: Why use a stencil map in your method? Since you are using only 1 face of the cubemap (a backdrop plane). Why don't u just translate it to the end of the farplane and get rid of stencil operations completely and just use depth testing?

I wouldn't say that is the proper way. You are doing extra work to generate the same texture coordinates as this guy is with a static cube that gets rotated. And needing to pass extra data and perform extra operations with the inverse. Typically that method is more for a cube mapped object in the world.

[quote]here's a trick: since we don't transform the cube it will always stay at (0, 0, 0) therefore wherever you go, it stays with you[/quote]
All you need to do to get that trick without using the inverse is just don't pass the translation portion of the camera matrix and just the rotation.
[/quote]
well you'd do stenciling anyways for lighting, so it may come handy. you'd use more than one face of the cube, because of perspectivity, but it's true that if you render it to the back of your frustum than it's the same. I don't think that's extra work. it's 1 matrix mul vs 1 matrix mul... (plus only 1 matrix gets passed, vs 1 matrix passed, that is the same amount of data passed :) )

Share this post


Link to post
Share on other sites
what i can conclude is that it only picks the front texture, the +Z, I concluded this by making each part of the cubemap a different color.

I have added a link to my executable! please tell me the results.
Windows only: [url="http://www.wildrune.com/files/Gaian.rar"]download[/url] Edited by Wilds

Share this post


Link to post
Share on other sites
[quote name='Wilds' timestamp='1344527994' post='4967826']
The cube coordinates are not affected by the transformation as the shader gets the untransformed object space vertices from the VBO.
[/quote]

gl_Position shouldn't be translated either to keep the cube centered on the camera. Try Ignifex' suggestion and use [b]Mat4 combined = mCamera->GetProjection() * mCamera->GetView();[/b] without the translation part.

Share this post


Link to post
Share on other sites
I translate the cube's position to the camera's to keep it's position at the center of the camera.
I tried what you suggested, the translation of the position shouldn't affect texture coords, only which textures I am seeing based on the cube's orientation.

Share this post


Link to post
Share on other sites
Ah, that clears up your idea behind the code there and your method should work.
Honestly though, there are only a few things that can still go wrong:
- mCamera->getView() does not handle rotation, although I am sure you use it in other parts of your code as well.
- The translation you apply to move the cube toward the camera is wrong. Why the scale 85?
For the above two, the obvious thing to try is to render your texture coordinates as RGB and see what you get.
Although unlikely:
- Your binding of the cube map faces is still wrong. Could you set your uniform samplerCube cubeMap to 0, just to be safe?

Share this post


Link to post
Share on other sites
Thanks for your comment I tried visually debugging the skybox, it seems the texture coordinate passed is 0,0,0 the faces are all black!

- mCamera->getView() does not handle rotation, although I am sure you use it in other parts of your code as well.
[b]It handles rotation as it is the inverse of the camera's world matrice.[/b]

- The translation you apply to move the cube toward the camera is wrong. Why the scale 85?
[b]This one is also correct, the 85 scale is just a test value.[/b]

For the above two, the obvious thing to try is to render your texture coordinates as RGB and see what you get.
Although unlikely:
- Your binding of the cube map faces is still wrong. Could you set your uniform samplerCube cubeMap to 0, just to be safe?
[b]Could you tell me how I do this, as just putting 0 behind [i][u]uniform samplerCube[/u] tex[/i] is not working.[/b]

Share this post


Link to post
Share on other sites
Oke I solved it! thanks everyone, especially Ignifex, your tip about visually debugging helped me alot!
Good learning lesson, always name the input of the fragment shader the same as the output of the vertex shader!

[b]The problem:[/b]
In the shaders i named the linking output and input of the vertex to fragment shaders differently.

Wrong
out vec3 outUV; // inside vertex shader
in vec3 UV; // inside fragment shader

Good:
out vec3 UV; // inside vertex shader
in Vec3 UV; // inside fragment shader Edited by Wilds

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Forum Statistics

    • Total Topics
      627770
    • Total Posts
      2979002
  • Similar Content

    • By DelicateTreeFrog
      Hello! As an exercise for delving into modern OpenGL, I'm creating a simple .obj renderer. I want to support things like varying degrees of specularity, geometry opacity, things like that, on a per-material basis. Different materials can also have different textures. Basic .obj necessities. I've done this in old school OpenGL, but modern OpenGL has its own thing going on, and I'd like to conform as closely to the standards as possible so as to keep the program running correctly, and I'm hoping to avoid picking up bad habits this early on.
      Reading around on the OpenGL Wiki, one tip in particular really stands out to me on this page:
      For something like a renderer for .obj files, this sort of thing seems almost ideal, but according to the wiki, it's a bad idea. Interesting to note!
      So, here's what the plan is so far as far as loading goes:
      Set up a type for materials so that materials can be created and destroyed. They will contain things like diffuse color, diffuse texture, geometry opacity, and so on, for each material in the .mtl file. Since .obj files are conveniently split up by material, I can load different groups of vertices/normals/UVs and triangles into different blocks of data for different models. When it comes to the rendering, I get a bit lost. I can either:
      Between drawing triangle groups, call glUseProgram to use a different shader for that particular geometry (so a unique shader just for the material that is shared by this triangle group). or
      Between drawing triangle groups, call glUniform a few times to adjust different parameters within the "master shader", such as specularity, diffuse color, and geometry opacity. In both cases, I still have to call glBindTexture between drawing triangle groups in order to bind the diffuse texture used by the material, so there doesn't seem to be a way around having the CPU do *something* during the rendering process instead of letting the GPU do everything all at once.
      The second option here seems less cluttered, however. There are less shaders to keep up with while one "master shader" handles it all. I don't have to duplicate any code or compile multiple shaders. Arguably, I could always have the shader program for each material be embedded in the material itself, and be auto-generated upon loading the material from the .mtl file. But this still leads to constantly calling glUseProgram, much more than is probably necessary in order to properly render the .obj. There seem to be a number of differing opinions on if it's okay to use hundreds of shaders or if it's best to just use tens of shaders.
      So, ultimately, what is the "right" way to do this? Does using a "master shader" (or a few variants of one) bog down the system compared to using hundreds of shader programs each dedicated to their own corresponding materials? Keeping in mind that the "master shaders" would have to track these additional uniforms and potentially have numerous branches of ifs, it may be possible that the ifs will lead to additional and unnecessary processing. But would that more expensive than constantly calling glUseProgram to switch shaders, or storing the shaders to begin with?
      With all these angles to consider, it's difficult to come to a conclusion. Both possible methods work, and both seem rather convenient for their own reasons, but which is the most performant? Please help this beginner/dummy understand. Thank you!
    • By JJCDeveloper
      I want to make professional java 3d game with server program and database,packet handling for multiplayer and client-server communicating,maps rendering,models,and stuffs Which aspect of java can I learn and where can I learn java Lwjgl OpenGL rendering Like minecraft and world of tanks
    • By AyeRonTarpas
      A friend of mine and I are making a 2D game engine as a learning experience and to hopefully build upon the experience in the long run.

      -What I'm using:
          C++;. Since im learning this language while in college and its one of the popular language to make games with why not.     Visual Studios; Im using a windows so yea.     SDL or GLFW; was thinking about SDL since i do some research on it where it is catching my interest but i hear SDL is a huge package compared to GLFW, so i may do GLFW to start with as learning since i may get overwhelmed with SDL.  
      -Questions
      Knowing what we want in the engine what should our main focus be in terms of learning. File managements, with headers, functions ect. How can i properly manage files with out confusing myself and my friend when sharing code. Alternative to Visual studios: My friend has a mac and cant properly use Vis studios, is there another alternative to it?  
    • By ferreiradaselva
      Both functions are available since 3.0, and I'm currently using `glMapBuffer()`, which works fine.
      But, I was wondering if anyone has experienced advantage in using `glMapBufferRange()`, which allows to specify the range of the mapped buffer. Could this be only a safety measure or does it improve performance?
      Note: I'm not asking about glBufferSubData()/glBufferData. Those two are irrelevant in this case.
    • By xhcao
      Before using void glBindImageTexture(    GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format), does need to make sure that texture is completeness. 
  • Popular Now