Deferred Rendering

Started by
7 comments, last by CursedTyrant 12 years, 2 months ago
So I'm having quite a bit of problem while converting my deferred rendering code (well, not mine exactly, I used Catalin Zima's tutorial) from XNA 4 to SlimDX's D3D9.

DeferredRenderer.cs:
[source lang="csharp"]
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using SlimDX;
using SlimDX.Direct3D9;
namespace VoxelSubmarineGameSlimDXTest.DeferredRendering
{
public static class DeferredRenderer
{
private static Sprite sprite;
private static Device device;
private static Texture colorRT;
private static Texture normalRT;
private static Texture depthRT;
private static Texture lightRT;
private static Surface colorRTSurface;
private static Surface normalRTSurface;
private static Surface depthRTSurface;
private static Surface lightRTSurface;
private static Effect clearGBufferEffect;
private static Effect renderGBufferEffect;
private static Effect pointLightEffect;
private static Effect directionalLightEffect;
private static Effect combineFinalEffect;
private static Vector2 halfPixel;
private static Mesh sphereMesh;
/// <summary>
/// A:0 R:0 G:0 B:0
/// </summary>
private static Color4 colorTransparent = new Color4(0.0f, 0.0f, 0.0f, 0.0f);
private static Surface backBuffer;
public static void Init(Device argDevice)
{
device = argDevice;
QuadRenderer.Init(device);
int tmpWidth = device.GetBackBuffer(0, 0).Description.Width;
int tmpHeight = device.GetBackBuffer(0, 0).Description.Height;

halfPixel = new Vector2(0.5f / (float)tmpWidth, 0.5f / (float)tmpHeight);
colorRT = new Texture(device, tmpWidth, tmpHeight, 1, Usage.RenderTarget, Format.A8R8G8B8, Pool.Default);
normalRT = new Texture(device, tmpWidth, tmpHeight, 1, Usage.RenderTarget, Format.A8R8G8B8, Pool.Default);
depthRT = new Texture(device, tmpWidth, tmpHeight, 1, Usage.RenderTarget, Format.R32F, Pool.Default);
lightRT = new Texture(device, tmpWidth, tmpHeight, 1, Usage.RenderTarget, Format.A8R8G8B8, Pool.Default);

colorRTSurface = colorRT.GetSurfaceLevel(0);
normalRTSurface = normalRT.GetSurfaceLevel(0);
depthRTSurface = depthRT.GetSurfaceLevel(0);
lightRTSurface = lightRT.GetSurfaceLevel(0);
clearGBufferEffect = Effect.FromFile(device, GameResources.ClearGBufferShaderPath, ShaderFlags.None);
renderGBufferEffect = Effect.FromFile(device, GameResources.RenderGBufferShaderPath, ShaderFlags.None);
pointLightEffect = Effect.FromFile(device, GameResources.PointLightShaderPath, ShaderFlags.None);
directionalLightEffect = Effect.FromFile(device, GameResources.DirectionalLightShaderPath, ShaderFlags.None);
combineFinalEffect = Effect.FromFile(device, GameResources.CombineFinalShaderPath, ShaderFlags.None);
sphereMesh = Mesh.CreateSphere(device, 1.0f, 8, 8);
sprite = new Sprite(device);
}
public static void CleanUp()
{
clearGBufferEffect.Dispose();
renderGBufferEffect.Dispose();
pointLightEffect.Dispose();
directionalLightEffect.Dispose();
combineFinalEffect.Dispose();
colorRTSurface.Dispose();
normalRTSurface.Dispose();
depthRTSurface.Dispose();
lightRTSurface.Dispose();
colorRT.Dispose();
normalRT.Dispose();
depthRT.Dispose();
lightRT.Dispose();
sphereMesh.Dispose();
}
private static void ClearGBuffer()
{
clearGBufferEffect.Begin();
clearGBufferEffect.BeginPass(0);
QuadRenderer.Render(new Vector2(-1.0f, -1.0f), new Vector2(1.0f, 1.0f));
clearGBufferEffect.EndPass();
clearGBufferEffect.End();
}
private static void RenderGBuffer()
{
int tmpTextureTileSizeReal = 16;
int tmpTextureTilesX = World.World.Tileset.GetLevelDescription(0).Width / tmpTextureTileSizeReal;
int tmpTextureTilesY = World.World.Tileset.GetLevelDescription(0).Height / tmpTextureTileSizeReal;
float tmpTextureTileSize = (float)tmpTextureTileSizeReal / (float)World.World.Tileset.GetLevelDescription(0).Width;
renderGBufferEffect.SetValue("World", Matrix.Identity);
renderGBufferEffect.SetValue("View", World.World.GameCamera.View);
renderGBufferEffect.SetValue("Projection", World.World.GameCamera.Projection);
renderGBufferEffect.SetTexture("Texture", World.World.Tileset);
renderGBufferEffect.SetValue("cameraPosition", World.World.GameCamera.Position);
renderGBufferEffect.SetValue("halfPixel", halfPixel);
renderGBufferEffect.SetValue("textureTilesX", tmpTextureTilesX);
renderGBufferEffect.SetValue("textureTilesY", tmpTextureTilesY);
renderGBufferEffect.SetValue("textureTileSize", tmpTextureTileSize);
renderGBufferEffect.Begin();
renderGBufferEffect.BeginPass(0);
//Draw the whole scene here
World.World.GameMap.RenderChunks();
renderGBufferEffect.EndPass();
renderGBufferEffect.End();
}
private static void DrawDirectionalLight(Vector3 argLightDirection, Color3 argColor)
{
directionalLightEffect.SetTexture("colorMap", colorRT);
directionalLightEffect.SetTexture("normalMap", normalRT);
directionalLightEffect.SetTexture("depthMap", depthRT);
directionalLightEffect.SetValue("lightDirection", argLightDirection);
directionalLightEffect.SetValue("Color", argColor);
directionalLightEffect.SetValue("cameraPosition", World.World.GameCamera.Position);
directionalLightEffect.SetValue("InvertViewProjection",
Matrix.Invert(World.World.GameCamera.View * World.World.GameCamera.Projection));
directionalLightEffect.SetValue("halfPixel", halfPixel);

directionalLightEffect.Begin();
directionalLightEffect.BeginPass(0);
QuadRenderer.Render(new Vector2(-1.0f, -1.0f), new Vector2(1.0f, 1.0f));
directionalLightEffect.EndPass();
directionalLightEffect.End();
}
/*private static void RenderPointLight(Vector3 argPosition)
{

sphereMesh.DrawSubset(0);
}*/
private static void RenderLights()
{
DrawDirectionalLight(new Vector3(0.5f, -0.5f, 0.5f), new Color3(1.0f, 0.5f, 0.0f));
//DrawDirectionalLight(new Vector3(0.5f, 0.5f, -0.5f), new Color3(0.0f, 0.0f, 0.5f));
//DrawDirectionalLight(new Vector3(-0.5f, -0.5f, -0.5f), new Color3(0.0f, 1.0f, 0.0f));
}
private static void CombineFinal()
{
combineFinalEffect.SetTexture("colorMap", colorRT);
combineFinalEffect.SetTexture("lightMap", lightRT);
combineFinalEffect.SetTexture("depthMap", depthRT);
combineFinalEffect.SetValue("halfPixel", halfPixel);
combineFinalEffect.SetValue("cameraPosition", World.World.GameCamera.Position);
combineFinalEffect.SetValue("InvertViewProjection", Matrix.Invert(World.World.GameCamera.View
* World.World.GameCamera.Projection));
combineFinalEffect.Begin();
combineFinalEffect.BeginPass(0);
QuadRenderer.Render(new Vector2(-1.0f, -1.0f), new Vector2(1.0f, 1.0f));
combineFinalEffect.EndPass();
combineFinalEffect.End();
}
public static void RenderFrame()
{
backBuffer = device.GetRenderTarget(0);
device.SetRenderTarget(0, colorRTSurface);
device.SetRenderTarget(1, normalRTSurface);
device.SetRenderTarget(2, depthRTSurface);

device.Clear(ClearFlags.All, colorTransparent, 1.0f, 0);
// Clear the GBuffer here
ClearGBuffer();
// Render the GBuffer here
RenderGBuffer();
device.SetRenderTarget(0, lightRTSurface);
device.SetRenderTarget(1, null);
device.SetRenderTarget(2, null);
device.Clear(ClearFlags.All, colorTransparent, 1.0f, 0);
device.SetRenderState(RenderState.AlphaBlendEnable, true);
device.SetRenderState(RenderState.SourceBlend, Blend.One);
device.SetRenderState(RenderState.SourceBlendAlpha, Blend.One);
device.SetRenderState(RenderState.DestinationBlend, Blend.One);
device.SetRenderState(RenderState.DestinationBlendAlpha, Blend.One);
// Render the lights
RenderLights();
device.SetRenderTarget(0, backBuffer);
device.SetRenderState(RenderState.AlphaBlendEnable, false);
// Combine the render target data into the final image
CombineFinal();
/*sprite.Begin(SpriteFlags.None);
sprite.Draw(lightRT, new Color4(1.0f, 1.0f, 1.0f, 1.0f));
sprite.End();*/
}
}
}
[/source]

I can post the shader files here too, but there's about 4 of them, so that's only if somebody wants me to, so as not to clutter the place.I basically copied them from the XNA example, and it being HLSL I didn't have to change it at all, since it's exactly the same both in XNA and SlimDX. It worked in XNA, so it should work here.

I'm getting this with the code above:
vGRwN.png

So the question is: what am I doing wrong and how do I fix it?

I'm using the Express edition of VS, so debugging this is quite difficult. I tried using PIX, but I'm not very experienced with it (or D3D) and it didn't really return anything too useful.
Advertisement
Do you get anything useful in the G-Buffer? Try drawing the contents of these render targets to the screen for real-time visualization:

colorRTSurface
normalRTSurface
depthRTSurface

You can also save them to disk and then view them offline. Do they have expected results?
They do, in fact. Here they are (depth not included, since it's mostly white, but it was the same in XNA and it worked):

colorRT

jJRbl.png

normalRT

aboZh.png
Hmm, after doing this:

[source lang="csharp"]
Surface.ToFile(colorRTSurface, "colorRTSurface.png", ImageFileFormat.Png);
Surface.ToFile(normalRTSurface, "normalRTSurface.png", ImageFileFormat.Png);
Surface.ToFile(depthRTSurface, "depthRTSurface.png", ImageFileFormat.Png);
[/source]

I get some weird results. I've attached the images.
I assume the QuadRenderer works correctly. Shouldn't the light direction vector be normalized?
I believe it does. When I tried passing 0.5f instead of 1.0f to the QuadRenderer, it covered half of the screen, so that works at least.

Here's the class, but I assume it works fine:

[source lang="csharp"]
using System;
using System.Collections.Generic;
using SlimDX;
using SlimDX.Direct3D9;

namespace VoxelSubmarineGameSlimDXTest.DeferredRendering
{
using VertexPositionTexture = VertexStructures.VertexPositionTexture;

public class QuadRenderer
{
private static Device device;
private static VertexPositionTexture[] verts = null;
private static short[] ib = null;

public static void Init(Device argDevice)
{
device = argDevice;

ib = new short[] { 0, 1, 2, 2, 3, 0 };
}

public static void Render(Vector2 v1, Vector2 v2)
{
device.SetSamplerState(0, SamplerState.MagFilter | SamplerState.MinFilter | SamplerState.MipFilter, TextureFilter.Point);
device.SetSamplerState(0, SamplerState.AddressU | SamplerState.AddressV, TextureAddress.Wrap);

verts = new VertexPositionTexture[]
{
new VertexPositionTexture(
new Vector3(v2.X, v1.Y, 1),
new Vector2(1, 1)),
new VertexPositionTexture(
new Vector3(v1.X, v1.Y, 1),
new Vector2(0, 1)),
new VertexPositionTexture(
new Vector3(v1.X, v2.Y, 1),
new Vector2(0, 0)),
new VertexPositionTexture(
new Vector3(v2.X, v2.Y, 1),
new Vector2(1, 0))
};

device.VertexDeclaration = VertexStructures.VertexPositionTextureDeclaration;
device.VertexFormat = VertexPositionTexture.Format;

device.DrawIndexedUserPrimitives<short, VertexPositionTexture>(PrimitiveType.TriangleList,
0, 4, 2, ib, Format.Index16, verts, VertexPositionTexture.SizeInBytes);
}
}
}
[/source]

As for the light direction vector - it's normalized in the shaders themselves, but I believe things which are within the 0.0 to 1.0 range don't have to be normalized, since that's basically what normalizing does?
Scratch that, when I changed the VertexFormat to VertexFormat.None in the QuadRenderer class, everything seems to work.

What I would like to know is why, and if you have a good link on the subject for me to read, it would be greatly appreciated. :)
Can you post the code for directionalLightEffect and combineFinalEffect?

And as for "but I believe things which are within the 0.0 to 1.0 range don't have to be normalized, since that's basically what normalizing does"...


Normalizing causes a vector to become unit length. Even if the individual XYZ are in the 0.0 to 1.0 range, that doesn't mean the vector's length == 1.
DirectionalLight.fx

[source]
float3 lightDirection;
float3 Color;
float3 cameraPosition;
float4x4 InvertViewProjection;
float2 halfPixel;

texture colorMap;
texture normalMap;
texture depthMap;

sampler colorSampler = sampler_state
{
Texture = (colorMap);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = LINEAR;
MipFilter = LINEAR;
};

sampler depthSampler = sampler_state
{
Texture = (depthMap);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = LINEAR;
MipFilter = LINEAR;
};

sampler normalSampler = sampler_state
{
Texture = (normalMap);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = LINEAR;
MipFilter = LINEAR;
};

struct VertexShaderInput
{
float3 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};

struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;

output.Position = float4(input.Position, 1);
output.TexCoord = input.TexCoord - halfPixel;

return output;
}

float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
// Get normal data from the normalMap
float4 normalData = tex2D(normalSampler, input.TexCoord);

// Transform normal back to [-1, 1] range
float3 normal = 2.0f * normalData.xyz - 1.0f;

// Get specular power and transform it into [0, 255] range
float specularPower = normalData.a * 255;

// Get specular intensity from the colorMap
float specularIntensity = tex2D(colorSampler, input.TexCoord).a;

// Read depth
float depthVal = tex2D(depthSampler, input.TexCoord).r;

float4 position;
position.x = input.TexCoord.x * 2.0f - 1.0f;
position.y = -(input.TexCoord.y * 2.0f - 1.0f);
position.z = depthVal;
position.w = 1.0f;

// Transform to world space
position = mul(position, InvertViewProjection);
position /= position.w;

// Surface-to-light vector
float3 lightVector = -normalize(lightDirection);

// Compute diffuse light
float NdL = max(0, dot(normal, lightVector));
float3 diffuseLight = NdL * Color.rgb;

// Reflection vector
float3 reflectionVector = normalize(reflect(lightVector, normal));

// Camera-to-surface vector
float3 directionToCamera = normalize(cameraPosition - position);

// Compute specular light
float specularLight = specularIntensity * pow(saturate(dot(
reflectionVector, directionToCamera)), specularPower);

float4 outColor = float4(diffuseLight, specularLight);

return saturate(outColor);
}

technique Technique1
{
pass Pass1
{
VertexShader = compile vs_3_0 VertexShaderFunction();
PixelShader = compile ps_3_0 PixelShaderFunction();
}
}
[/source]

CombineFinal.fx

[source]
texture colorMap;
texture lightMap;
texture depthMap;

float2 halfPixel;
float3 cameraPosition;
float4x4 InvertViewProjection;

float4 FogColor = float4(0.0f, 0.05f, 0.1f, 0.0f);

sampler colorSampler = sampler_state
{
Texture = (colorMap);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = LINEAR;
Mipfilter = LINEAR;
};

sampler lightSampler = sampler_state
{
Texture = (lightMap);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = LINEAR;
Mipfilter = LINEAR;
};

sampler depthSampler = sampler_state
{
Texture = (depthMap);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = LINEAR;
Mipfilter = LINEAR;
};

struct VertexShaderInput
{
float3 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};

struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;

output.Position = float4(input.Position, 1);
output.TexCoord = input.TexCoord - halfPixel;

return output;
}

float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float3 diffuseColor = tex2D(colorSampler, input.TexCoord).rgb;
float4 light = tex2D(lightSampler, input.TexCoord);
float3 diffuseLight = light.rgb;
float specularLight = light.a;

// Read depth
float depthVal = tex2D(depthSampler, input.TexCoord).r;

float4 position;
position.x = input.TexCoord.x * 2.0f - 1.0f;
position.y = -(input.TexCoord.y * 2.0f - 1.0f);
position.z = depthVal;
position.w = 1.0f;

// Transform to world space
position = mul(position, InvertViewProjection);
position /= position.w;

float Fog = 1.0f - (length(position - cameraPosition) / 500.0f);
Fog = clamp(Fog, 0.0f, 1.0f);
float4 Color = float4((diffuseColor * diffuseLight + diffuseColor * specularLight), 0.0f);
Color = lerp(FogColor, Color, Fog);

return saturate(Color);
}

technique Technique1
{
pass Pass1
{
VertexShader = compile vs_3_0 VertexShaderFunction();
PixelShader = compile ps_3_0 PixelShaderFunction();
}
}
[/source]

This topic is closed to new replies.

Advertisement