Sign in to follow this  
RaydenUni

Deferred rendering point light artifacts

Recommended Posts

RaydenUni    110
I am writing a deferred renderer for a game and while I have my whole G-buffer, shading, and final pass set up, and I got directional lighting working, I'm getting strange artifacts with a point light. I have taken a series of screenshots to show you what I have so far and then the relevant HLSL code. Maybe someone has run into this problem. This is the final output, colored, shaded, with 1 point light. This is the albedo map from the G-buffer pass. The normalized normals map from the albedo pass. The values are transformed from [-1,1] to [0,1] which is why it is all pastels. To prove that my normals are working properly, here it is rendered without it being normalized, so values are [-1,1]. Here is my depth map from the G-buffer. It is of course, super dark, so here it is scaled up 10 times so you can see it. A closer shot so you can see better. A different model that shows it off better. The output from the shading pass, 1 point light off to the right. Output from a shading pass with one direction light of <0,-1,1>. Pretty strange isn't it? Now for some code. This is the pass rendering the G-buffer, albedo, depth, normal.
//constant buffer or external variables
cbuffer Variables
{
	matrix World;
	matrix Projection;
	matrix View;
};
matrix Joints[40];

//texture
Texture2DArray baseTexture;

SamplerState samLinear
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = Wrap;
    AddressV = Wrap;
};

struct VS_INPUT
{
    float3 Pos : POSITION;
    float3 Normal : NORMAL;
    float3 TexCoord : TEXCOORD;
};
struct VS_INPUT_ANIM
{
    float3 Pos : POSITION;
    float3 Normal : NORMAL;
    float3 TexCoord : TEXCOORD;
    float4 BIndex : BLENDINDICES0;
    float4 BWeight : BLENDWEIGHT0;
};

struct PS_INPUT
{
    float4 Pos : SV_POSITION;
    float3 Normal : NORMAL;
    float4 ScreenPos : TEXCOORD2;
    float3 TexCoord : TEXCOORD;
};

struct PS_OUTPUT
{
	float4 Color	: COLOR0;
	float4 Normal	: COLOR1;
	float4 Depth	: COLOR2;
};


//------- Technique: MultipleTargets --------
//--- VERTEX SHADERS ---
//-- Vertex shader for static meshes --
PS_INPUT VS_Static(VS_INPUT input)
{
	PS_INPUT output = (PS_INPUT)0;
	
	float4x4 WorldViewProjection = mul(mul(World, View), Projection);
	output.Pos = mul(float4(input.Pos,1), WorldViewProjection);
	output.Normal = mul(input.Normal, World);

	output.ScreenPos = output.Pos;
	output.TexCoord = input.TexCoord;
	return output;
}

//-- Vertex shader for animated meshes --
PS_INPUT VS_Animate(VS_INPUT_ANIM input)
{
	VS_INPUT output = (VS_INPUT)0;
	float4 pos = float4(input.Pos,1);
	output.Pos = input.BWeight.x*mul(pos,Joints[input.BIndex.x]);
	output.Pos += input.BWeight.y*mul(pos,Joints[input.BIndex.y]);
	output.Pos += input.BWeight.z*mul(pos,Joints[input.BIndex.z]);
	output.Pos += input.BWeight.w*mul(pos,Joints[input.BIndex.w]);
	output.Normal = input.Normal;
	output.TexCoord = input.TexCoord;
	return VS_Static(output);
}

//--- PIXEL SHADERS ---
//-- One pixel shader for both, outputs to three render targets.
PS_OUTPUT PS(PS_INPUT input) : SV_Target
{
	PS_OUTPUT output = (PS_OUTPUT)0;
	
	//-Albedo-
	output.Color.rgb = baseTexture.Sample(samLinear, input.TexCoord);
	output.Color.a = 1;
	
	//-Normal-
	//Normals are from [-1,1]. To store in a texture, it needs to be [0-1]
	//Thus (n/2 + 1/2)
	output.Normal.xyz = input.Normal/2.0f + 0.5f;
	output.Normal.a = 1;
	
	//-Depth-
	//A 4x1 homogeneous vector, so divide by W
	output.Depth = (1 - input.ScreenPos.z/input.ScreenPos.w) * 1;
	output.Depth.a = 1;
	
	return output;
}

technique10 Render
{
	pass P0
	{
		SetVertexShader( CompileShader( vs_4_0, VS_Static() ) );
        SetGeometryShader( NULL );
        SetPixelShader( CompileShader( ps_4_0, PS() ) );
	}
}
technique10 Animate
{
	pass P0
	{
		SetVertexShader( CompileShader( vs_4_0, VS_Animate() ) );
        SetGeometryShader( NULL );
        SetPixelShader( CompileShader( ps_4_0, PS() ) );
	}
}



This is the second, lighting pass. There is one pass per light and they all render to the same render target using blending.
#include "phong.fxh"

//cbuffer GeneralInputs
//{
	matrix xViewProjectionInv;
	Texture2D xNormalMap;
	Texture2D xDepthMap;
//}

//these should be inputs
/*
float xLightStrength;
float3 xLightPosition;
float3 xConeDirection;
float xConeAngle;
float xConeDecay;
*/


//enum - to match the Light.h LightType enum
#define LT_AMBIENT 0
#define LT_DIRECTIONAL 1
#define LT_POINT 2
#define LT_SPOT 3
#define LT_QUANTITY 4

float3 CameraPosition;

cbuffer Light
{
	int LightType;
	float3 LAmbient;
	float3 LDiffuse;
	float3 LSpecular;
	
	//attenuation
	//gives us some more control over lights
	//	m = I/(C + L*d + Q*d*d)
	//
	//distance			//D
	float LConstant;	//C
	float LLinear;		//L
	float LQuadratic;	//Q
	float LIntensity;	//I
	
	//directional
	float3 LAngle;
	float LCosAngle;
	float LSinAngle;
	float LExponent;
	
	//geometry
	float3 LPosition;
	float3 LDirection;
};

SamplerState samLinear
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = Wrap;
    AddressV = Wrap;
};

struct VS_INPUT
{
	float4 pos: POSITION0;
	float2 texCoord: TEXCOORD0;
};

struct PS_INPUT
{
	float4 Position		: SV_POSITION;
	float2 TexCoord		: TEXCOORD0;
};

//------- Technique: DeferredSpotLight --------
PS_INPUT LightVS(VS_INPUT input)
{
	PS_INPUT Output = (PS_INPUT)0;		
	
	Output.Position = input.pos;	
	Output.Position.y *= -1;
	Output.TexCoord = input.texCoord;	

	return Output;
}

/*
float4 ConeLightPS(PS_INPUT input) : SV_Target
{
	float4 output = (float)0;
	
	
	float3 lightDirection = normalize(worldPos - xLightPosition);
	float coneDot = dot(lightDirection, normalize(xConeDirection));
	bool coneCondition = coneDot >= xConeAngle;
	
	float shading = 0;
	if (coneCondition)
	{
		float coneAttenuation = pow(abs(coneDot), abs(xConeDecay));
		shading = dot(normal, -lightDirection);
		shading *= xLightStrength;
		shading *= coneAttenuation;
	}
	
	output.rgb = shading;
	output.a = 1;
	
	return output;
}
*/

float4 AmbientLight() : SV_Target
{
	float4 output = (float)0;
	output.rgb = LAmbient;
	output.a = 1;
	return output;
}

float4 DirectionalLight(float3 lightVector, float3 viewVector, float3 normal) : SV_Target
{
	float4 output = (float)0;
	
	//LIGHTING	
	float3 diffuse = CalculateDiffuse(float3(1,1,1) , LDiffuse, normal, normalize(lightVector));
	float3 specular = CalculateSpecular(viewVector, lightVector, normal);
	output.rgb = LAmbient + diffuse * LDiffuse + specular * LSpecular;
	output.a = 1;
	//LIGHTING
	
	//ret lit(n_dot_l, n_dot_h, m)
	//returns the lighting coefficient vector
	//http://msdn.microsoft.com/en-us/library/bb509619(VS.85).aspx
	float nDotL = dot(normal, lightVector);
	float nDotH = (lightVector + viewVector)/2;
	float3 coVec = lit(nDotL, nDotH, LExponent);
	//output.rgb = coVec.x * LAmbient + coVec.y * LDiffuse + coVec.z * LSpecular;
		
	return output;
}

float4 PointLight(float3 position, float3 viewVector, float3 normal) : SV_TARGET
{
	float4 output = (float)0;
	//light vec = light - object pos
	float3 lightVector = LPosition - position;
	float dist = length(lightVector);
	lightVector = normalize(lightVector);
	
	//attenuation	
	float m = LIntensity / (LConstant + LLinear * dist + LQuadratic * pow(dist, 2));
	
	//LIGHTING	
	float3 diffuseValue = CalculateDiffuse(float3(1,1,1) , LDiffuse, normal, lightVector);
	float3 specularValue = CalculateSpecular(viewVector, lightVector, normal);
	output.rgb = LAmbient + diffuseValue * LDiffuse + specularValue * LSpecular;
	output.a = 1;
	//LIGHTING
	
	//output.rgb /= distpow(dist * 100, 20); //pow(dist, 2);
	//output.rgb *= m;
		
	//ret lit(n_dot_l, n_dot_h, m)
	//returns the lighting coefficient vector
	//http://msdn.microsoft.com/en-us/library/bb509619(VS.85).aspx
	float nDotL = dot(normal, lightVector);
	float nDotH = (lightVector + viewVector)/2;
	float3 coVec = lit(nDotL, nDotH, LExponent);
	//output.rgb = coVec.x * LAmbient + coVec.y * LDiffuse + coVec.z * LSpecular;

	return output;
}

float4 LightPS(PS_INPUT input) : SV_TARGET
{
	float4 output = (float)0;
	
	//NORMAL
	float3 normal = xNormalMap.Sample(samLinear, input.TexCoord.xy).rgb;
	normal = normal * 2.0f - 1.0f;
	normal = normalize(normal);
	
	//WORLD POSITION
	float depth = (1 - xDepthMap.Sample(samLinear, input.TexCoord.xy).r) / 1;
	
	float4 screenPos;
	screenPos.x = input.TexCoord.x * 2.0f - 1.0f;
	screenPos.y = -(input.TexCoord.y * 2.0f - 1.0f);
	screenPos.z = depth;
	screenPos.w = 1.0f;
	
	float4 worldPos = mul(screenPos, xViewProjectionInv);
	worldPos /= worldPos.w;
	
	////LIGHT VECTOR
	////lightPos - shadingPos
	//float3 lightVec = LPosition - worldPos;
	//lightVec = normalize(lightVec);
	
	//VIEW VECTOR
	//camPos - shadingPos
	float3 viewVec = CameraPosition - worldPos;
	viewVec = normalize(viewVec);
	
	
	switch (LightType)
	{
		case LT_AMBIENT:
			return AmbientLight();
			break;
			
		case LT_DIRECTIONAL:
			output = DirectionalLight(LDirection, viewVec, normal);
			break;
		
		case LT_POINT:
			output =  PointLight(worldPos, viewVec, normal);
			break;
		
		case LT_SPOT:
			break;
		
		case LT_QUANTITY:
			break;
	}
	return output;
}

BlendState blend
{
	BlendEnable[0] = TRUE;
	SrcBlend = ONE;
	Destblend = ONE;
	BlendOP = ADD;
	SrcBlendAlpha = SRC_ALPHA;
	DestBlendAlpha = INV_SRC_ALPHA;
	BlendOpAlpha = ADD;
	RenderTargetWriteMask[0] = 0x0F;
};

technique10 ShadingTechnique
{
	pass P0
	{
		//SetBlendState(blend, float4(0.0f, 0.0f, 0.0f, 0.0f), 0xffffffff);
		
		SetVertexShader( CompileShader( vs_4_0, LightVS() ) );
		SetGeometryShader( NULL );
		SetPixelShader( CompileShader( ps_4_0, LightPS() ) );
		
	}
}




My first thought is that this looks like a precision issue. I'm using R32G32B32A32 float for all values. One strange thing is that if I scale the depth output from the first pass and then unscale it in the second pass, my point light doesn't render at all. Not sure if this is relevant, but it's something I discovered. I tried debugging the HLSL in PIX but it doesn't want to step through properly for me.

Share this post


Link to post
Share on other sites
MJP    19753
Quote:
Original post by RaydenUni
I tried debugging the HLSL in PIX but it doesn't want to step through properly for me.


What do you mean by that? Does it appear as though it's "jumping around" from one instruction to another? If this is the case, then you're probably compiling the shader with optimizations enabled (which will reorder instructions).

Share this post


Link to post
Share on other sites
RaydenUni    110
I compile the shader with this:

[source "c"]
#ifdef _DEBUG
// Set the D3D10_SHADER_DEBUG flag to embed debug information in the shaders.
// Setting this flag improves the shader debugging experience, but still allows
// the shaders to be optimized and to run exactly the way they will run in
// the release configuration of this program.
dwShaderFlags |= D3D10_SHADER_DEBUG;
#endif



The issue is that it skips lines and doesn't give me info on certain variables that definitely are set. Stuff like the normal or the light vector.

I have new info: I'm not getting the world position properly. I changed my pass2 vertex shader to this:

[source "c"]
struct PS_INPUT
{
float4 Position : SV_POSITION;
float4 ScreenPos : TEXCOORD1;
float2 TexCoord : TEXCOORD0;
};

//------- Technique: DeferredSpotLight --------
PS_INPUT LightVS(VS_INPUT input)
{
PS_INPUT Output = (PS_INPUT)0;

Output.Position = input.pos;
Output.Position.y *= -1;
Output.TexCoord = input.texCoord;
Output.ScreenPos = Output.Position;
return Output;
}



And the unproject is now like this:

[source "c"]
float4 LightPS(PS_INPUT input) : SV_TARGET
{
float4 output = (float)0;

//NORMAL
float3 normal = xNormalMap.Sample(samLinear, input.TexCoord.xy).rgb;
normal = normal * 2.0f - 1.0f;
normal = normalize(normal);

//WORLD POSITION
/*
float depth = (1 - xDepthMap.Sample(samLinear, input.TexCoord.xy).r) / 1;

float4 screenPos;
screenPos.x = input.TexCoord.x * 2.0f - 1.0f;
screenPos.y = -(input.TexCoord.y * 2.0f - 1.0f);
screenPos.xy /= screenPos.w;
screenPos.z = depth;
screenPos.w = 1.0f;

float4 worldPos = mul(screenPos, xViewProjectionInv);
worldPos /= worldPos.w;
*/



////new version using screenpos instead of texcoords
input.ScreenPos.xy /= input.ScreenPos.w;
float depth = (1 - xDepthMap.Sample(samLinear, input.TexCoord.xy).r) / 1;

float4 screenPos;
screenPos.x = input.ScreenPos.x;
screenPos.y = input.ScreenPos.y;
screenPos.z = depth;
screenPos.w = 1.0f;

float4 worldPos = mul(screenPos, xViewProjectionInv);
worldPos /= worldPos.w;


////LIGHT VECTOR
////lightPos - shadingPos
//float3 lightVec = LPosition - worldPos;
//lightVec = normalize(lightVec);

//VIEW VECTOR
//camPos - shadingPos
float3 viewVec = CameraPosition - worldPos;
viewVec = normalize(viewVec);




When I output "output.rgb = worldPos;" the whole screen is the same shade as green, getting darker and lighter when I move the camera closer and farther from the plane. It's world coordinates, the color shouldn't change when I move the camera.

Share this post


Link to post
Share on other sites
MJP    19753
Yeah you need to disable optimizations if you want to be able to step through line by line. Add D3D10_SHADER_SKIP_OPTIMIZATION to your compile flags to do this.

Also I wrote a bit about reconstructing position from depth here.

Share this post


Link to post
Share on other sites
RaydenUni    110
A few days later I have determined that it was a depth precision thing. I was storing depth into an RGBA8, and I thought I had already changed it to be higher.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this