• Advertisement
Sign in to follow this  

OpenGL Problems with shadows using cg dot3 shader

This topic is 4784 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hello, since several days i try to develop an cg fragment and vertex shader for dot3 based bumpmapping. The shaders looks like working, i got my bumpmapped object, but i've problems with the light and shadowing ... Because an Image tells more than thousand words, here is the one: http://img.photobucket.com/albums/v211/Hellhound_01/shadow.jpg What you see is a simple object, the object looks like a dot3 bumpmapped object, but the shadows are too sharpen at the edges and when i move the light arround the object often the shadows gets translucent. The other problem is, that the shadows often switch to fast and sometimes the light is suddently on the wrong side... I think there could be a problem in my calculations, perhaps someone of you could help me ... Here is my vertex shader:
// input structure
struct appl2vert
{
	float3 Position			: POSITION;
	float3 Normal			: NORMAL;
	float2 TextureCoords	         : TEXCOORD0;
	float3 Tangents     	         : TEXCOORD1;
};

// output structure
struct vert2frag
{
	float4 Position				: POSITION;
	float2 TextureCoords		         :TEXCOORD0;
	float3 TangentSpaceLightPos 	         :TEXCOORD1;
};


void main(const appl2vert IN, out vert2frag OUT,
		  const uniform float4x4 ModelViewProj,
		  const uniform float4x4 InvModelViewMatrix,
		  const uniform float4   LightPos,
		  )
{
	// Rotate vertices to clip-space
	OUT.Position = mul(ModelViewProj, float4(IN.Position, 1) );

	// get Light form object space to  view-space
	float4	lightPosOS = mul(InvModelViewMatrix, LightPos);
	float3  surface2light = normalize(lightPosOS.xyz - IN.Position.xyz);

	// calculate the TangentSpaceMatrix (TBN)
	float3 binormal = cross( IN.Normal, IN.Dot3Texture );
	float3x3 tbn_matrix = float3x3( IN.Dot3Texture, binormal, IN.Normal );

	// get light direction L 
	OUT.TangentSpaceLightPos = mul(tbn_matrix, surface2light.xyz);
	
	// transfer coords
	OUT.TextureCoords = IN.TextureCoords;
}
And here my fragment shader:
// input structure
struct vert2frag
{
    float2 TextureCoords		:TEXCOORD0;
    float3 TangentSpaceLightPos	:TEXCOORD1;
};

void main(const vert2frag IN, out float3 oColor:COLOR,
		  float4 AmbientLight,
		  float4 LMs : TEXCOORD4,
		  const uniform sampler2D NormalMap,
		  const uniform sampler2D DecalMap )
{
	// normalize Light
	float3 normTangentSpaceLightPos = normalize(IN.TangentSpaceLightPos);

	// Do Dot3 calculation (N.L)  (multiplication for more reflection)
	float3 nmap= 2*(tex2D(NormalMap,IN.TextureCoords)-0.5).rgb;
        
    //adding decal color (multiplication for better color results)
	oColor = dot(nmap.xyz, normTangentSpaceLightPos)*tex2D(DecalMap,IN.TextureCoords);
}
And here is my openGL code fragment:
// Bind the programs
cgGLBindProgram(m_pCgDot3VertexShader);
cgGLBindProgram(m_pCgDot3FragmentShader);
	
// Update matrix
cgGLSetStateMatrixParameter(m_cgpMdlViewProjMatrix, CG_GL_MODELVIEW_PROJECTION_MATRIX, CG_GL_MATRIX_IDENTITY);
	cgGLSetStateMatrixParameter(m_cgpInvModelViewMatrix, CG_GL_MODELVIEW_MATRIX, CG_GL_MATRIX_INVERSE);
	
// Update Light Pos
cgGLSetParameter3f(m_cgpLightPos, m_pLightSource->m_pvLightPosition[0],m_pLightSource->m_pvLightPosition[1] , m_pLightSource->m_pvLightPosition[2]);
	
// Enable the profiles
cgGLEnableProfile(m_pCgVertexProfile);
cgGLEnableProfile(m_pCgFragmentProfile);

// Set Decal Texture (Param1 TEXCOORDS0 as TextureCoords)
texIter = pDmgLvTexturesToMap->find(CTextureSetElement::TYPE_DECAL);

//Aktivate array for VBOs
glClientActiveTextureARB(GL_TEXTURE0_ARB);
glBindBufferARB( GL_ARRAY_BUFFER_ARB, getGPUTextureOffsetID());
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer( 2, GL_FLOAT, 0, (char *) NULL );
glEnable(GL_TEXTURE_2D);

//Aktivate and bind decaltexture
glActiveTextureARB(GL_TEXTURE0_ARB);
glBindTexture(GL_TEXTURE_2D, getTextureOffsetID());

// bind texture to fragment-shader
cgGLSetTextureParameter(m_cgpBaseTexture, getTextureOffsetID());
cgGLEnableTextureParameter(m_cgpBaseTexture);

// bind tangents to vertex-shader (Param2 TEXTURECOORDS1)
for (int i=0; i<m_pMeshesToRender->getVertexCount(); i++) {
		glMultiTexCoord3fARB(GL_TEXTURE1_ARB, m_pMeshesToRender->m_pvTangents.m_fX, m_pMeshesToRender->m_pvTangents.m_fY, m_pMeshesToRender->m_pvTangents.m_fZ);
	}

	
// bind normal texture to fragment-shader
cgGLSetTextureParameter(m_cgpNormalMapTexture, getNormalTextureOffsetID());
cgGLEnableTextureParameter(m_cgpNormalMapTexture);
Thanks for any help, Christian

Share this post


Link to post
Share on other sites
Advertisement
I've tested the shaders again and again and figured out, that only two states of light are computed. When i place the position of the light on the right side of the object i got the correct lighting of the object from the right side.

When i now move the light position near to the object, normaly the reflection must be more intensive but there is no approximation computed. On the other hand, when i move the light more far a way the object must be lighted darker, but nothing happends ...

When i move the light position through the object on the other side, the lighting switches hardly at the "zero" point to the other side.

I've no idea what could be wrong. The Light position looks like correct computed in the shader. Has anyone an idea what could be wrong?

Share this post


Link to post
Share on other sites
I can see 2 things wrong in the vertex shader:

1)The light vector:

float4 lightPosOS = mul(InvModelViewMatrix, LightPos);
float3 surface2light = normalize(lightPosOS.xyz - IN.Position.xyz);

LightPosOS is in eye space, but IN.Position is in object space. Multiply In.Position with the ModelView matrix, so it will be too in eye space.

2)The TBN matrix:

Again, the tangent and normal vectors are in object space. You must multiply them with InvModelViewMatrix so they are in eyespace, since the lightvector is also in eyespace before you multiply it with the TBN matrix. The binormal will automatically be in eyespace, since it will be computed based on T,N vectors.

Generally remember that for vector calculations to be correct, all vectors must be in the same space.

Share this post


Link to post
Share on other sites
Thanks Mikeman for your fast reply. I've followed your instructions and changed the vertex-shader code, but this was not the failure. Nothing has changed. The problem with the lighting is still the same...

Here are my changes in changed shader code:


// Rotate vertex, tangents and normals to eye-space
float4 vertPosES = mul(ModelViewMatrix, float4(IN.Position, 1));
float3 normalsES = mul(InvModelViewMatrix, float4(IN.Normal, 1));
float3 tangentsES = mul(InvModelViewMatrix, float4(IN.Tangents,1));

// Calculate Lightdirection L
float3 surface2light = normalize(lightPosES.xyz - vertPosES.xyz);

// Calculate binormals and TangentSpaceMatrix (TBN)
float3 binormal = cross( normalsES, tangentsES);
float3x3 tbn_matrix = float3x3( tangentsES, binormal, normalsES);

// Calculate tangent-space lightposition
OUT.TangentSpaceLightPos = mul(tbn_matrix, surface2light.xyz);


I've corrected another failure with no effect: The parameter values of the light source in the vertex-shader are float4, but i've hooked only an float3 Light positon. Now i send from the OpenGL applicatioin 4 float values.

[Edited by - Hellhound on January 8, 2005 6:26:54 AM]

Share this post


Link to post
Share on other sites
Quote:

float3 normalsES = mul(InvModelViewMatrix, float4(IN.Normal, 1));
float3 tangentsES = mul(InvModelViewMatrix, float4(IN.Tangents,1));


Normal and Tangent are directions, not points. The fact that you do float4(IN.Normal, 1) sets the w=1, which is for points. That means they get affected
by translations, which they shouldn't. The correct would be:

float3 normalsES = mul(InvModelViewMatrix, float4(IN.Normal, 0));
float3 tangentsES = mul(InvModelViewMatrix, float4(IN.Tangents,0));

Share this post


Link to post
Share on other sites
Ups, copy and paste failure ... I've corrected it, but nothing happends, it's the same problem. Could it be possible that my tangents are wrong? Here is the
code of my tangents calculation:


Object::calcTangentForTris(unsigned int index0, unsigned int index1, unsigned int index2)
{
float fu21 = m_pvTextureCoords[index1].m_fX - m_pvTextureCoords[index0].m_fX;
float fv21 = m_pvTextureCoords[index1].m_fY - m_pvTextureCoords[index0].m_fY;

float fu31 = m_pvTextureCoords[index2].m_fX - m_pvTextureCoords[index0].m_fX;
float fv31 = m_pvTextureCoords[index2].m_fY - m_pvTextureCoords[index0].m_fY;

CVector3f vA(m_pvVerticesCoords[index1].m_fX - m_pvVerticesCoords[index0].m_fX, fu21, fv21);
CVector3f vB(m_pvVerticesCoords[index2].m_fX - m_pvVerticesCoords[index0].m_fX, fu31, fv31);
CVector3f vC = CVector3f::getCrossProduct(vA, vB);

CVector3f vTangent = CVector3f();

if(vC.m_fX > 0.00001f)
vTangent.m_fX = -vC.m_fY / vC.m_fX;

vA.setVectorCoords(m_pvVerticesCoords[index1].m_fY - m_pvVerticesCoords[index0].m_fY, fu21, fv21);
vB.setVectorCoords(m_pvVerticesCoords[index2].m_fY - m_pvVerticesCoords[index0].m_fY, fu31, fv31);
vC = CVector3f::getCrossProduct(vA, vB);

if(vC.m_fX > 0.00001f)
vTangent.m_fY = -vC.m_fY / vC.m_fX;

vA.setVectorCoords(m_pvVerticesCoords[index1].m_fZ - m_pvVerticesCoords[index0].m_fZ, fu21, fv21);
vB.setVectorCoords(m_pvVerticesCoords[index2].m_fZ - m_pvVerticesCoords[index0].m_fZ, fu31, fv31);
vC = CVector3f::getCrossProduct(vA, vB);

if(vC.m_fX > 0.00001f)
vTangent.m_fZ = -vC.m_fY / vC.m_fX;

vTangent.normalizeVector();
return vTangent;

Share this post


Link to post
Share on other sites
I've added some additional pictures which shows the lighting failure in more detail (The purple sphere represents the position of the light):

http://img.photobucket.com/albums/v211/Hellhound_01/rightLight.jpg
http://img.photobucket.com/albums/v211/Hellhound_01/leftLight.jpg
http://img.photobucket.com/albums/v211/Hellhound_01/FailureLight.jpg

The last image shows the failure of lighting in detail. I've marked the edge of the "texture switch" with red elipses. The lower part shows the texture by lighting from left, the upper part shows the texture by lighting from right...

Share this post


Link to post
Share on other sites
looks like the fault lies with the model ( eg normals ) try loading it in another program eg http://www.typhoonlabs.com/

Share this post


Link to post
Share on other sites
That was my thougths too, so i've tested my environment with an easyer model, a simple tris where i can check the vertices an normals... There is the same failure ...

Share this post


Link to post
Share on other sites
Hey, I just saw that:
Quote:

// bind tangents to vertex-shader (Param2 TEXTURECOORDS1)
for (int i=0; i<m_pMeshesToRender->getVertexCount(); i++) {
glMultiTexCoord3fARB(GL_TEXTURE1_ARB, m_pMeshesToRender->m_pvTangents.m_fX, m_pMeshesToRender->m_pvTangents.m_fY, m_pMeshesToRender->m_pvTangents.m_fZ);
}


This can't be right, especially since you use VBOs. It's just glMultiTexCoord commands one after another, with no vertices send to the GPU. It basically does nothing but constantly changing the current texcoord, and in the end it "stays" with the last value.

Share this post


Link to post
Share on other sites
Sorry Mikeman, this line of code is obsolete, i've forgotten to post it. Since several days i use the following statements to hook the tangents with the vertex shader:

// Hook Tangents with vertex Shader
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glClientActiveTextureARB(GL_TEXTURE1_ARB);
glTexCoordPointer(3, GL_FLOAT, 0, m_pvTangents);

Tell me if i am wrong, but i think this must be working too. When i am back from work, i will try your statement.

Share this post


Link to post
Share on other sites
Don't forget to set your VBO buffer before calling glTexCoordPointer function.

Share this post


Link to post
Share on other sites
dont know if thats what u want to achieve but

glEnableClientState(GL_TEXTURE_COORD_ARRAY); // this is enabling a different array
glClientActiveTextureARB(GL_TEXTURE1_ARB);
glTexCoordPointer(3, GL_FLOAT, 0, m_pvTangents); // than this one

perhaps u want
glClientActiveTextureARB(GL_TEXTURE1_ARB);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(3, GL_FLOAT, 0, m_pvTangents);

Share this post


Link to post
Share on other sites
I've done my duty and checked all your replies after work ;o). First, the order of my calls (Bind Vertex, Bind Normal, Bind Textures -> render) but this have no effect ...

Second i've tested the glMultiTexCoord3fARB with one interesting result, the hole texture failure is mirrored horizontal by 180 degrees, but it is still alive...

And final i've tested the the order of the calls by zedzeek. Holy sh** i don't know why, but this order crashes my complete system. I've tested it 5 times, with 5 hard system crashes! Could it be possible, that cg hangs up on my tanget values? These values are stored in a special vector object with the coords as public values ... (I've no problems with this calls during the OpenGL render passes)

Share this post


Link to post
Share on other sites
its quite common for it to crash there in nvoglnt.dll (or whatever driver youre using)

whats happening is an array is enabled, but doesnt have valid data
prolly in your case one of the texture coord arrays,

first make sure all the others that youre not using are disabled
then i see here
float2 TextureCoords : TEXCOORD0;
float3 Tangents : TEXCOORD1;
both of these are different sizes perhaps youre mixing them up

Share this post


Link to post
Share on other sites
The problem with the dll is new for me. Ok, but i've checked the data + array bounds and they are valid...

The use of float3 and float2 constitute in the kind of data values. The float2 values are for normaly 2D TextureCoords and the float3 is for the vertices tangents... When you look at the shaders, there is no mix with these values, they are strictly separated used. I have no idea what could be wrong up to this point...

Share this post


Link to post
Share on other sites
Just a question.. I don't really know anything about writing shaders.. but don't the vert2frag structures need to be the same? The vertex shader's structure holds 3 bits of info, and the fragment shader's only holds two. Just curious about it I guess, but it looked wrong.. maybe helped? :)

Edit: Ok, I started reading about shaders, and I know how silly this is now.. my bad?

[Edited by - Boruki on January 23, 2005 9:38:56 AM]

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement
  • Advertisement
  • Popular Tags

  • Advertisement
  • Popular Now

  • Similar Content

    • By LifeArtist
      Good Evening,
      I want to make a 2D game which involves displaying some debug information. Especially for collision, enemy sights and so on ...
      First of I was thinking about all those shapes which I need will need for debugging purposes: circles, rectangles, lines, polygons.
      I am really stucked right now because of the fundamental question:
      Where do I store my vertices positions for each line (object)? Currently I am not using a model matrix because I am using orthographic projection and set the final position within the VBO. That means that if I add a new line I would have to expand the "points" array and re-upload (recall glBufferData) it every time. The other method would be to use a model matrix and a fixed vbo for a line but it would be also messy to exactly create a line from (0,0) to (100,20) calculating the rotation and scale to make it fit.
      If I proceed with option 1 "updating the array each frame" I was thinking of having 4 draw calls every frame for the lines vao, polygons vao and so on. 
      In addition to that I am planning to use some sort of ECS based architecture. So the other question would be:
      Should I treat those debug objects as entities/components?
      For me it would make sense to treat them as entities but that's creates a new issue with the previous array approach because it would have for example a transform and render component. A special render component for debug objects (no texture etc) ... For me the transform component is also just a matrix but how would I then define a line?
      Treating them as components would'nt be a good idea in my eyes because then I would always need an entity. Well entity is just an id !? So maybe its a component?
      Regards,
      LifeArtist
    • By QQemka
      Hello. I am coding a small thingy in my spare time. All i want to achieve is to load a heightmap (as the lowest possible walking terrain), some static meshes (elements of the environment) and a dynamic character (meaning i can move, collide with heightmap/static meshes and hold a varying item in a hand ). Got a bunch of questions, or rather problems i can't find solution to myself. Nearly all are deal with graphics/gpu, not the coding part. My c++ is on high enough level.
      Let's go:
      Heightmap - i obviously want it to be textured, size is hardcoded to 256x256 squares. I can't have one huge texture stretched over entire terrain cause every pixel would be enormous. Thats why i decided to use 2 specified textures. First will be a tileset consisting of 16 square tiles (u v range from 0 to 0.25 for first tile and so on) and second a 256x256 buffer with 0-15 value representing index of the tile from tileset for every heigtmap square. Problem is, how do i blend the edges nicely and make some computationally cheap changes so its not obvious there are only 16 tiles? Is it possible to generate such terrain with some existing program?
      Collisions - i want to use bounding sphere and aabb. But should i store them for a model or entity instance? Meaning i have 20 same trees spawned using the same tree model, but every entity got its own transformation (position, scale etc). Storing collision component per instance grats faster access + is precalculated and transformed (takes additional memory, but who cares?), so i stick with this, right? What should i do if object is dynamically rotated? The aabb is no longer aligned and calculating per vertex min/max everytime object rotates/scales is pretty expensive, right?
      Drawing aabb - problem similar to above (storing aabb data per instance or model). This time in my opinion per model is enough since every instance also does not have own vertex buffer but uses the shared one (so 20 trees share reference to one tree model). So rendering aabb is about taking the model's aabb, transforming with instance matrix and voila. What about aabb vertex buffer (this is more of a cosmetic question, just curious, bumped onto it in time of writing this). Is it better to make it as 8 points and index buffer (12 lines), or only 2 vertices with min/max x/y/z and having the shaders dynamically generate 6 other vertices and draw the box? Or maybe there should be just ONE 1x1x1 cube box template moved/scaled per entity?
      What if one model got a diffuse texture and a normal map, and other has only diffuse? Should i pass some bool flag to shader with that info, or just assume that my game supports only diffuse maps without fancy stuff?
      There were several more but i forgot/solved them at time of writing
      Thanks in advance
    • By RenanRR
      Hi All,
      I'm reading the tutorials from learnOpengl site (nice site) and I'm having a question on the camera (https://learnopengl.com/Getting-started/Camera).
      I always saw the camera being manipulated with the lookat, but in tutorial I saw the camera being changed through the MVP arrays, which do not seem to be camera, but rather the scene that changes:
      Vertex Shader:
      #version 330 core layout (location = 0) in vec3 aPos; layout (location = 1) in vec2 aTexCoord; out vec2 TexCoord; uniform mat4 model; uniform mat4 view; uniform mat4 projection; void main() { gl_Position = projection * view * model * vec4(aPos, 1.0f); TexCoord = vec2(aTexCoord.x, aTexCoord.y); } then, the matrix manipulated:
      ..... glm::mat4 projection = glm::perspective(glm::radians(fov), (float)SCR_WIDTH / (float)SCR_HEIGHT, 0.1f, 100.0f); ourShader.setMat4("projection", projection); .... glm::mat4 view = glm::lookAt(cameraPos, cameraPos + cameraFront, cameraUp); ourShader.setMat4("view", view); .... model = glm::rotate(model, glm::radians(angle), glm::vec3(1.0f, 0.3f, 0.5f)); ourShader.setMat4("model", model);  
      So, some doubts:
      - Why use it like that?
      - Is it okay to manipulate the camera that way?
      -in this way, are not the vertex's positions that changes instead of the camera?
      - I need to pass MVP to all shaders of object in my scenes ?
       
      What it seems, is that the camera stands still and the scenery that changes...
      it's right?
       
       
      Thank you
       
    • By dpadam450
      Sampling a floating point texture where the alpha channel holds 4-bytes of packed data into the float. I don't know how to cast the raw memory to treat it as an integer so I can perform bit-shifting operations.

      int rgbValue = int(textureSample.w);//4 bytes of data packed as color
      // algorithm might not be correct and endianness might need switching.
      vec3 extractedData = vec3(  rgbValue & 0xFF000000,  (rgbValue << 8) & 0xFF000000, (rgbValue << 16) & 0xFF000000);
      extractedData /= 255.0f;
    • By Devashish Khandelwal
      While writing a simple renderer using OpenGL, I faced an issue with the glGetUniformLocation function. For some reason, the location is coming to be -1.
      Anyone has any idea .. what should I do?
  • Advertisement