[Solved] Deferred Rendered Spotlight Problem

Started by
3 comments, last by MJP 11 years, 8 months ago
Hello,

I've been trying to implement a simple deferred shader with a spotlight.

The original forward rendered image looks like this:

gbdlight0.th.jpg

But in my deferred shader, when I transfer the lighting code into the deferred shader, it looks like this:

gbdlight1.th.jpg

Here is the normal and position buffers I use - both normalized:

gbdlight2.th.jpg
gbdlight3.th.jpg

The code for GBuffer.hlsl:


#pragma pack_matrix(row_major)
Texture2D Map : register(t0);

cbuffer Matrix : register(b0)
{
matrix world;
matrix view;
matrix proj;
};

SamplerState TextureSampler
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Wrap;
AddressV = Wrap;
};

struct VS_IN
{
float4 pos : POSITION;
float3 normal : NORMAL;
float2 tex0 : TEXCOORD;
};

struct PS_IN
{
float4 pos : SV_POSITION;
float2 tex0 : TEXCOORD0;
float3 normal : TEXCOORD1;
float4 worldPos : TEXCOORD2;
};

PS_IN VS_Effect(VS_IN vertex)
{
PS_IN vsOut = ( PS_IN )0;
float4 posWorld = mul(vertex.pos, world);
float4 viewPos = mul(posWorld, view );
vsOut.pos = mul(viewPos, proj );
vsOut.tex0 = vertex.tex0;
vsOut.normal = normalize(mul(vertex.normal, (float3x3)world));
vsOut.worldPos = posWorld;
return vsOut;
}

float4 PS_Color(PS_IN input) : SV_TARGET
{
float4 color = float4(0,0,0,0);
float4 texMap = Map.Sample(TextureSampler, input.tex0);
color = texMap;
return color;
}

float4 PS_Normal(PS_IN input) : SV_TARGET
{
float4 normal = float4(input.normal, 1);
return normal;
}

float4 PS_WorldPos(PS_IN input) : SV_TARGET
{
float4 color = normalize(input.worldPos);
return color;
}


The code for Combined.hlsl:


#pragma pack_matrix(row_major)

Texture2D ColorMap : register(t0);
Texture2D NormalMap : register(t1);
Texture2D WorldPosMap : register(t2);

cbuffer Light : register(b0)
{
float4 lightPos;
float4 lightDir;
float4 lightColor;
float lightRad;
float lightInt;
};

cbuffer Param : register(b1)
{
float4 camPos;
float4 diffColor;
};

SamplerState TextureSampler
{
Filter = MIN_MAG_MIP_POINT;
AddressU = Wrap;
AddressV = Wrap;
};

struct VS_IN
{
float4 pos : POSITION;
float2 tex0 : TEXCOORD;
};

struct PS_IN
{
float4 pos : SV_POSITION;
float2 tex0 : TEXCOORD;
};

PS_IN VS_Effect(uint id: SV_VertexID)
{
// form a full-screen triangle
float2 pos = float2(id == 1 ? 2 : 0, id == 2 ? 2 : 0);
PS_IN output;
output.pos = float4(pos.x * 2 - 1, 1 - pos.y * 2, 0, 1);
output.tex0 = pos.xy;
return output;
}

float4 PS_Effect(PS_IN input) : SV_TARGET
{
float4 color = {0,0,0,0};
float4 colorMap = ColorMap.Sample(TextureSampler, input.tex0);
float4 normal = normalize(NormalMap.Sample(TextureSampler, input.tex0));
float4 worldPos = WorldPosMap.Sample(TextureSampler, input.tex0);

float4 L = normalize(lightPos - worldPos);
float4 D = normalize(lightDir);
if(dot(-L, D) > 0.9f)
{
float4 N = normal;
float lambertTerm = max(dot(N,L), 0);

if(lambertTerm > 0)
{
color += colorMap*lightColor*diffColor*lambertTerm;
}
}
return color;
}


What am I doing wrong?
Advertisement
Okay, I've figured out what the problem was.

I used my quad texture format for the textures that I rendered for normals and world position:

DXGI_FORMAT_R8G8B8A8_UNORM

This format clamped my values to [0,1].

I have changed the format to:

DXGI_FORMAT_R16G16B16A16_FLOAT

Which doesn't clamp my values. It uses twice the amount of memory (about 14mb additional for 1280x720 image), but I guess its a sacrifice I'll have to make. Unless someone can suggest a more efficient format? Or more efficient way of getting my normal and position buffers?
Just map your normal components so they fit in (0,1) ... add 1, multiply by 0.5.

Then reverse the mapping when your reading the data.

Just map your normal components so they fit in (0,1) ... add 1, multiply by 0.5.

Then reverse the mapping when your reading the data.


Thank you - now I understand why my normals look different to a lot of other developer's normals.

Is it a similar thing for mapping my world coordinates? Except does it involve taking the maximum scene boundaries?
Storing normals as XYZ in an 8-bit texture will give you pretty poor results, unless you do some magic to remap the normals. Even 10-bit formats generally won't give you enough precision for specular lighting. However there are various methods for packing normals that can give better results.

As for position...don't bother storing position in a texture. Just reconstruct it from your depth buffer. It will give you better precision, and you won't have to waste another G-Buffer texture on it.

This topic is closed to new replies.

Advertisement