[SOLVED] per pixel lighting showing as silhouette

Started by
8 comments, last by fkhan 13 years, 6 months ago
I have basic per pixel lightning setup and using the normals in the view space.
The model is lit fine if I pass tangents/binormal information in the vertex stream whereas just passing the normal data renders a silhouette.

Screenshot

// vertex shaderPixelLightingVSOutputVc VSBasicPixelLightingNmVc(VSInputNmVc vin){	PixelLightingVSOutputVc output;		// Transform vertex position into projection space	output.Position = mul(mul(mul(vin.Position, World), View), Projection);	output.Diffuse = vin.Diffuse;	float3 vNormal = mul(vin.Normal, View);	float3 PosViewr = mul(vin.Position, View);	float3 ViewDirection = (PosViewr - EyePosition);		output.NormalVS = vNormal;	output.Reflect = reflect(normalize(ViewDirection), normalize(vNormal));		return output;}// pixel shaderfloat4 PSBasicPixelLightingVc(PixelLightingPSInputVc pin) : COLOR{	float3 posToEye = EyePosition - pin.Position.xyz;		float3 N = normalize(pin.NormalVS);	float3 E = normalize(posToEye);		ColorPair lightResult = ComputePerPixelLights(E, N);	float4 diffuseColor = pin.Diffuse;	float4 reflectMat = ReflectionColor;	//AmbientColor is ambience of model	float4 ambient= float4(AmbientColor * AmbientLightColor,1);	float4 diffuse =  diffuseColor * reflectMat * float4(lightResult.Diffuse * diffuseColor.xyz, 1);	float4 color = ambient + diffuse + float4(lightResult.Specular, 0);			return color;}


[Edited by - fkhan on October 1, 2010 8:09:36 AM]
Advertisement
hmm, I tried transforming normals into world space instead of view space, but still get a silhouette.
Quote:float3 posToEye = EyePosition - pin.Position.xyz;

This seems strange, isn't pin.Position the projected position?
I guess it should be in the same coordinate space as EyePosition.
Quote:Original post by Erik Rufelt
Quote:float3 posToEye = EyePosition - pin.Position.xyz;

This seems strange, isn't pin.Position the projected position?
I guess it should be in the same coordinate space as EyePosition.


yes, but I changed it to world space.

float3 wNormal = mul(vin.Normal, World);
float4 PosWorldr = mul(vin.Position, World);

So do you use something like EyePosition - pin.PosWorldr then?
Perhaps I'm misunderstanding your code, but pin.Position will be in projected screen-space, so whether you use view or world space you need to pass another position to the pixel-shader apart from the projected position.
Quote:Original post by Erik Rufelt
So do you use something like EyePosition - pin.PosWorldr then?
Perhaps I'm misunderstanding your code, but pin.Position will be in projected screen-space, so whether you use view or world space you need to pass another position to the pixel-shader apart from the projected position.


I'll repost my shader code after I switched to world space.

struct VSInputNmVc{	float4	Position	: POSITION;	float3	Normal		: NORMAL;	float4  Diffuse		: COLOR0;};struct PixelLightingVSOutputVc{	float4	Position	: POSITION; 		float3  PositionWS	: TEXCOORD0;	float3	NormalWS	: TEXCOORD1;	float4	Diffuse		: COLOR0;	float3	Reflect		: TEXCOORD2;};PixelLightingVSOutputVc VSBasicPixelLightingNmVc(VSInputNmVc vin){	PixelLightingVSOutputVc output;		// Transform vertex position into projection space	output.Position = mul(mul(mul(vin.Position, World), View), Projection);	output.Diffuse = vin.Diffuse;	float3 wNormal = mul(vin.Normal, World);	float3 PosWorldr = mul(vin.Position, World);	float3 ViewDirection = PosWorldr - EyePosition;		output.NormalWS = wNormal;	output.PositionWS = PosWorldr;	output.Reflect = reflect(normalize(ViewDirection), normalize(wNormal));		return output;}float4 PSBasicPixelLightingVc(PixelLightingVSOutputVc pin) : COLOR{	float3 posToEye = pin.PositionWS - EyePosition ;		float3 N = normalize(pin.NormalWS);	float3 E = normalize(posToEye);		ColorPair lightResult = ComputePerPixelLights(E, N);	float4 diffuseColor = pin.Diffuse;	float4 reflectMat = ReflectionColor;	//AmbientColor is ambience of model	float4 ambient= float4(AmbientColor * AmbientLightColor,1);	float4 diffuse =  diffuseColor * reflectMat * float4(lightResult.Diffuse * diffuseColor.xyz, 1);	float4 color = ambient + diffuse + float4(lightResult.Specular, 0);			return color;}// helper method to compute per pixel lightningColorPair ComputePerPixelLights(float3 E, float3 N){	ColorPair result;	result.Diffuse = AmbientLightColor;	result.Specular = 0;	// Using Blinb-Phong illumination model		// Light0	float3 L = -DirLight0Direction;	float3 H = normalize(E + L);	float dt = max(0,dot(L,N)); // lamberfactor    result.Diffuse += DirLight0DiffuseColor * dt;    if (dt != 0)		result.Specular += DirLight0SpecularColor * pow(max(0,dot(H,N)), SpecularPower);	// Light1	L = -DirLight1Direction;	H = normalize(E + L);	dt = max(0,dot(L,N));    result.Diffuse += DirLight1DiffuseColor * dt;    if (dt != 0)	    result.Specular += DirLight1SpecularColor * pow(max(0,dot(H,N)), SpecularPower);    	// Light2	L = -DirLight2Direction;	H = normalize(E + L);	dt = max(0,dot(L,N));    result.Diffuse += DirLight2DiffuseColor * dt;    if (dt != 0)	    result.Specular += DirLight2SpecularColor * pow(max(0,dot(H,N)), SpecularPower);	// SpotLight0	L = -SpotLightDirection;	H = normalize(E + L);		dt = max(0,dot(L,N));    //dt = acos(dot(L,N));	result.Diffuse += SpotLightDiffuseColor * dt;    	if (dt > 0)	{		float spotEffect = dot(SpotLightDirection, L);				if (spotEffect > cos(Theta)) // dot product needs to be greater than cosine of inner cone. 		{			spotEffect = pow(spotEffect, FallOff);			float att = spotEffect / (SpotLightAttenuation0 +				  SpotLightAttenuation1 * Range +				  SpotLightAttenuation2 * Range * Range);							result.Diffuse += att * (SpotLightDiffuseColor * spotEffect) ;		}	    result.Specular += SpotLightSpecularColor * pow(max(0,dot(H,N)), SpecularPower);	}    result.Diffuse += EmissiveColor;	result.Specular *= SpecularColor;    	return result;}


Hopefully it's not too much code to go through.
ScreenShot
Any opinions? I have tried all that I can think of...
I got it! and the reason is so silly that I won't bother going into it.
Glad to hear you got it resolved, but you could still post what the issue was in case someone else runs into a similar problem.
In Collada, there is a model transform that defines the model position in the world. After loading the vertex info, I was transforming the vertices to their respective positions in the world. I can't think why I decided to transform the normals as well. This resulted in a silhouette model.

This topic is closed to new replies.

Advertisement