Sign in to follow this  
d h k

[HLSL] Weird dark areas w/ simple diffuse light (screenshots included)

Recommended Posts

Hey there, I'm trying to get some super simple diffuse lighting to work (on a per-pixel level) and it looks absolutely right except that some areas of my level have a tendency to be pretty dark. Let these images explain:
As you can see in the first shot, the light refuses to brighten up that middle part of the wall there. This dark area moves with the light somewhat (second image). Sometimes it gives the impression that it might follow edges of the level geometry (ie. the faces) but it's kind of hard to tell. This is my shader:
float4x4	matWorldViewProj;
float4x4	matWorld;

float4		lightPosition;
float4		lightAmbientColor;
float4		lightDiffuseColor;

struct VS_Input
{
	float4 Position	: POSITION;
	float2 UV	: TEXCOORD0;
	float3 Normal	: NORMAL;
};

struct VS_Output
{
	float4 Position		: POSITION;
	float2 UV			: TEXCOORD0;
	float3 Normal		: TEXCOORD1;
	float3 PositionWorld	: TEXCOORD2;
};

VS_Output MyVertexShader ( VS_Input In )
{
	VS_Output Out;
    
	// transform vertex intro projection space for rendering
	Out.Position = mul ( In.Position, matWorldViewProj );
    
	// simply pass UV coordinates on
	Out.UV = In.UV;
    
	// pass the normal on
	Out.Normal = In.Normal;
    
	// get position in world space
	Out.PositionWorld = In.Position;
	
	return Out;
}

struct PS_Input
{
	float2 UV	: TEXCOORD0;
	float3 Normal	: TEXCOORD1;
	float3 Position	: TEXCOORD2;
};

struct PS_Output
{
	float4 Color	: COLOR;
};

PS_Output MyPixelShader ( PS_Input In, sampler2D tex0 )
{
	PS_Output Out;
	
	// normalize normal and bring it into world space
	In.Normal = normalize ( mul ( In.Normal, matWorld ) );
	
	// bring position into world space
	In.Position = mul ( In.Position, matWorld );
	
	// look up the diffuse color from the texture
	float4 texture_color = tex2D ( tex0, In.UV );
	
	// get the normalized vector from this pixel to light
	float3 LightVector = normalize ( lightPosition - In.Position );
	
	// calculate lighting
	float3 light = max ( dot ( LightVector, In.Normal ), 0.0f );
    
	// calculate the pixel color (ambient and diffuse)
	Out.Color.rgb = ( texture_color.rgb * lightAmbientColor.rgb ) + ( texture_color.rgb * lightDiffuseColor.rgb * light );
	
	// support transparency
	Out.Color.a = texture_color.a;
    
	return Out;
}


What I'm trying to do is, in the vertex shader, just pass on the position and normal (as TEXCOORDs so they are interpolated across the surface) and then in the pixel shader, bring them both into world space (vertex position should be in object space and the normal in normal space, right?), calculate a light vector from pixel position to light position (which is passed to the shader in world space), normalize that vector and do the dot product business between that same vector and the normal to get the lighting brightness. Before, I calculated the normal, the light vector and the position in the vertex shader and then passed these on via TEXCOORDs but that yielded exactly the same results. I moved these calculations to the pixel shader to ensure that everything was done at per-pixel and not per-vertex level. As you can probably tell, I'm still fairly new to shaders so maybe these is something obvious wrong. In any case, any help is greatly appreciated!

Share this post


Link to post
Share on other sites
Hm, my vertex normals don't really have a w-component, I'm afraid. My vertex struct contains a "D3DVECTOR normal", that only has x, y and z. And in my shader, the normals are all float3 as you can see. Will I have to add that?

Share this post


Link to post
Share on other sites
Well mathematically the product of a 3D vector and a 4x4 vector is undefined which might explain your unusual results. You might want to turn your 3D direction vector into 4D vectors and set the W component to zero for them.

Share this post


Link to post
Share on other sites
Tried that but the results are exactly the same, unfortunately.

Here's the version of the shader with explicit float4s:


float4x4 matWorldViewProj;
float4x4 matWorld;

float4 lightPosition;
float4 lightAmbientColor;
float4 lightDiffuseColor;
float lightIntensity;
float lightCutoff;

struct VS_Input
{
float4 Position : POSITION;
float2 UV : TEXCOORD0;
float3 Normal : NORMAL;
};

struct VS_Output
{
float4 Position : POSITION;
float2 UV : TEXCOORD0;
float4 Normal : TEXCOORD1;
float4 PositionWorld : TEXCOORD2;
};

VS_Output MyVertexShader ( VS_Input In )
{
VS_Output Out;

// transform vertex intro projection space for rendering
Out.Position = mul ( In.Position, matWorldViewProj );

// simply pass UV coordinates on
Out.UV = In.UV;

// pass the normal on
Out.Normal.xyz = In.Normal.xyz;

// but make sure the fourth component is 0
Out.Normal.w = 0;

// get position in world space
Out.PositionWorld = In.Position;

return Out;
}

struct PS_Input
{
float2 UV : TEXCOORD0;
float4 Normal : TEXCOORD1;
float4 Position : TEXCOORD2;
};

struct PS_Output
{
float4 Color : COLOR;
};

PS_Output MyPixelShader ( PS_Input In, sampler2D tex0 )
{
PS_Output Out;

In.Normal.w = 0.0f;

// normalize normal and bring it into world space
In.Normal = normalize ( mul ( In.Normal, matWorld ) );

In.Normal.w = 0.0f;

In.Position.w = 0.0f;

// bring position into world space
In.Position = mul ( In.Position, matWorld );

In.Position.w = 0.0f;

// look up the diffuse color from the texture
float4 texture_color = tex2D ( tex0, In.UV );

float4 LightVector;

LightVector.w = 0.0f;

// get the normalized vector from this pixel to light
LightVector = normalize ( lightPosition - In.Position );

LightVector.w = 0.0f;

// calculate lighting
float3 light = max ( dot ( LightVector, In.Normal ), 0.0f );

// calculate the pixel color (ambient and diffuse)
Out.Color.rgb = ( texture_color.rgb * lightAmbientColor.rgb ) + ( texture_color.rgb * lightDiffuseColor.rgb * light );

// support transparency
Out.Color.a = texture_color.a;

return Out;
}

technique EntryPoint
{
pass SinglePass
{
VertexShader = compile vs_2_0 MyVertexShader ( );
PixelShader = compile ps_2_0 MyPixelShader ( );
}
}



Any more ideas, anybody?

Share this post


Link to post
Share on other sites
If your multiplying with "matWorld" does that contain translation? you should only multiply a normal by the upper 3x3 portion of a matrix so it is only affected by rotation.

Share this post


Link to post
Share on other sites
"matWorld" is this:


// get world matrix and send it to the shader
device->GetTransform ( D3DTS_WORLD, &world_matrix );
SendMatrixToShader ( "matWorld", effect, world_matrix );


So I it does contain translation (and scaling) which I don't want to affect the normals with.

What is the best way to multiply a vector with the top-left 3x3 portion of a float4x4 in HLSL? I won't have to do it manually, right?

Share this post


Link to post
Share on other sites
Number of ways you could do it, make another matrix CPU side, that is the same as matWorld but without scale and translation so only applying any rotations when u make the matrix.

If you make the translation portion of the matrix (0,0,0) then you remove translations and can still multiply with a 4x4 matrix... to remove scale from the matrix I'm not 100% sure how to do that with just the matrix itself... if you have formulated matWorld from something like rotation * translation * scale, you can multiply with the inverse of the scale matrix and that will remove the scaling bits. but then again if you've done that you can just pass the rotation matrix in directly.

I usually just make a 3x3 matrix and pass to the shader, because it saves reconstructing a 3x3 from a 4x4 for each vertex or each pixel depending on where your doing it. I use 3x3 because my Normals are 3 component...there is no need to have a normal with a W unless you want to use the W component for something else or pack in some info like handedness,

Share this post


Link to post
Share on other sites
In.Position.w = 0.0f;

// bring position into world space
In.Position = mul ( In.Position, matWorld );

In.Position.w = 0.0f;




Positions need to have their W component set to 1.
Direction vectors need to have their W component set to 0

In.position will not get transformed by the translation component of matWorld if you set its W component to zero.

Share this post


Link to post
Share on other sites
Hm, I have read up on the whole thing and, having read this and this article, I now understand the following:

Quote:

The purpose of transforming a normal by the world matrix is to rotate it so that it's a direction in world-space. This is typically done by only using the 3x3 portion of the world matrix, since this includes all of the rotation data and none of translation (you don't want to translate a normal).

The 3x3 portion of the matrix also can contain scaling data. When it contains scaling data, the normal is scaled by the scaling factor. As long as the scale is uniform (which means that xScale == yScale == zScale), the scaling is okay provided your normalize the resultant normal vector before using it. However if the scale is non-uniform, this won't work. The solution in the case of non-uniform scaling is to calculatate the inverse transpose outside the shader, pass it in, and then transform normals (and also tangents/bitangents, if necessary) by that matrix.

So in summary: if you're using no scaling or uniform scaling in your world matrix you can use the world matrix, if you're using non-uniform scaling then you need to use the inverse transpose.


To be on the safe side, my new plan is to send the inverse transpose to the shader and multiply my normals with that. This is my new shader code:


float4x4 matWorldViewProj;
float4x4 matWorld;
float4x4 matWorldIT;

float4 lightPosition;
float4 lightAmbientColor;
float4 lightDiffuseColor;
float lightIntensity;
float lightCutoff;

struct VS_Input
{
float4 Position : POSITION;
float2 UV : TEXCOORD0;
float3 Normal : NORMAL;
};

struct VS_Output
{
float4 Position : POSITION;
float2 UV : TEXCOORD0;
float4 Normal : TEXCOORD1;
float4 PositionWorld : TEXCOORD2;
};

VS_Output MyVertexShader ( VS_Input In )
{
VS_Output Out;

// transform vertex into projection space for rendering
Out.Position = mul ( In.Position, matWorldViewProj );

// simply pass UV coordinates on
Out.UV = In.UV;

// pass the normal on
Out.Normal.xyz = In.Normal.xyz;

Out.Normal.w = 0.0f;

// get position in world space
Out.PositionWorld = In.Position;

return Out;
}

struct PS_Input
{
float2 UV : TEXCOORD0;
float4 Normal : TEXCOORD1;
float4 Position : TEXCOORD2;
};

struct PS_Output
{
float4 Color : COLOR;
};

PS_Output MyPixelShader ( PS_Input In, sampler2D tex0 )
{
PS_Output Out;

In.Normal.w = 0.0f;

// normalize normal and multiply it by the
// inverse transpose of the world matrix
In.Normal = normalize ( mul ( In.Normal, matWorldIT ) );

In.Position.w = 1.0f;

// bring position into world space
In.Position = mul ( In.Position, matWorld );

// look up the diffuse color from the texture
float4 texture_color = tex2D ( tex0, In.UV );

float4 lightPos = lightPosition;

lightPos.w = 1.0f;

// get the normalized vector from this pixel to light
float3 LightVector = normalize ( lightPos - In.Position );

// calculate lighting
float3 light = max ( dot ( LightVector, In.Normal ), 0.0f );

// calculate the pixel color (ambient and diffuse)
Out.Color.rgb = ( texture_color.rgb * lightAmbientColor.rgb ) + ( texture_color.rgb * lightDiffuseColor.rgb * light );

// support transparency
Out.Color.a = texture_color.a;

return Out;
}





In my application, what I do before rendering a model is I take the desired position, rotation and scale for that model, multiply these together and call "device->SetTransform ()" for the world matrix in order to place it. Then I retrieve the world-, view- and projection matrices and send the world matrix and the world-view-projection matrix to the shader. Then I do this:


D3DXMATRIX worldIT_matrix;
device->GetTransform ( D3DTS_WORLD, &worldIT_matrix );
D3DXMatrixInverse ( &worldIT_matrix, NULL, &worldIT_matrix );


...to calculate the inverse transpose of the world matrix and send that to the shader ("matWorldIT").

What this is does it looks almost like the lighting flipped but not entirely. Hard to explain but it's definitely still absolutely wrong.

Any more input/ideas? I don't really need non-uniform scaling at this point, so I also tried just casting the matWorld to (float3x3) for the multiplication with the normal but that doesn't pass my compiler (a post in one of the discussions I linked to earlier said you could).

Share this post


Link to post
Share on other sites
Quote:

I also tried just casting the matWorld to (float3x3) for the multiplication with the normal but that doesn't pass my compiler


normalize ( mul ( In.Normal.xyz, (float3x3)matWorldIT ) );

should work

Also you're doing a bunch of operations with float4 ... like this:

// get the normalized vector from this pixel to light
float3 LightVector = normalize ( lightPos - In.Position );


lightPos and In.Position are both float4, not sure if the normalize returns what you expect or if it's your problem tho :)

Try to use your original pixel shader (first post) but change the following:

// normalize normal and bring it into world space
In.Normal = normalize ( mul ( In.Normal, matWorld ) );

// bring position into world space
In.Position = mul ( In.Position, matWorld );

...

// get the normalized vector from this pixel to light
float3 LightVector = normalize ( lightPosition - In.Position );


for

// normalize normal and bring it into world space
In.Normal = normalize ( mul ( In.Normal.xyz, (float3x3)matWorld ) );

// bring position into world space
In.Position = mul ( float4(In.Position.xyz,1), matWorld );

...

// get the normalized vector from this pixel to light
float3 LightVector = normalize ( lightPosition.xyz - In.Position );



[Edited by - c3Dp on March 17, 2010 11:37:40 PM]

Share this post


Link to post
Share on other sites
Thanks for the help, guys. Really appreciated so far!

This is the current shader, produces exactly the same problem I described in the OP though:


float4x4 matWorldViewProj;
float4x4 matWorld;

float4 lightPosition;
float4 lightAmbientColor;
float4 lightDiffuseColor;

struct VS_Input
{
float4 Position : POSITION;
float2 UV : TEXCOORD0;
float3 Normal : NORMAL;
};

struct VS_Output
{
float4 Position : POSITION;
float2 UV : TEXCOORD0;
float3 Normal : TEXCOORD1;
float3 PositionWorld : TEXCOORD2;
};

VS_Output MyVertexShader ( VS_Input In )
{
VS_Output Out;

// transform vertex intro projection space for rendering
Out.Position = mul ( In.Position, matWorldViewProj );

// simply pass UV coordinates on
Out.UV = In.UV;

// pass the normal on
Out.Normal = In.Normal;

// get position in world space
Out.PositionWorld = In.Position;

return Out;
}

struct PS_Input
{
float2 UV : TEXCOORD0;
float3 Normal : TEXCOORD1;
float3 Position : TEXCOORD2;
};

struct PS_Output
{
float4 Color : COLOR;
};

PS_Output MyPixelShader ( PS_Input In, sampler2D tex0 )
{
PS_Output Out;

// normalize normal and bring it into world space
In.Normal = normalize ( mul ( In.Normal.xyz, (float3x3)matWorld ) );

// bring position into world space
In.Position = mul ( float4(In.Position.xyz,1), matWorld );

// look up the diffuse color from the texture
float4 texture_color = tex2D ( tex0, In.UV );

// get the normalized vector from this pixel to light
float3 LightVector = normalize ( lightPosition.xyz - In.Position );

// calculate lighting
float3 light = max ( dot ( LightVector, In.Normal ), 0.0f );

// calculate the pixel color (ambient and diffuse)
Out.Color.rgb = ( texture_color.rgb * lightAmbientColor.rgb ) + ( texture_color.rgb * lightDiffuseColor.rgb * light );

// support transparency
Out.Color.a = texture_color.a;

return Out;
}

technique EntryPoint
{
pass SinglePass
{
VertexShader = compile vs_2_0 MyVertexShader ( );
PixelShader = compile ps_2_0 MyPixelShader ( );
}
}




Any more ideas?

Share this post


Link to post
Share on other sites
Quote:
Original post by d h k
Thanks for the help, guys. Really appreciated so far!



Your shader is correct, renders fine, but you should take a careful look at your
normals in the mesh. You should for example run a smooth modifier in max on it, that will smooth large angels and not smooth sharp angels, the result is perfect.

You seem to have normals input(explicit art) data not correct.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this