[D3D10, SSAO] Need help with finishing SSAO implementation

Started by
0 comments, last by rychu_elektryk 12 years, 2 months ago
Hi Everyone!

I'm seeking some help in implementing SSAO. I'm basing on John Chapman's tutorial -> http://www.john-chap...ontent.php?id=8 . Well the tutorial seems quite simple, and I think I've implemented each part of it correctly. Unfortunatelly results shows that somethings deffinitelly is wrong. If You could help, I'd appreciate that. So, here it goes:

HLSL code (shader 4.0 Dx10)

First part is storing:
linear normalized depth
viewspace normals
colour

[source]struct VS_I_POS_TEX
{
float4 position : POSITION;
float2 textureCoordinates : TEXCOORD;
};
//-------------------------------------------------------------------------
struct VS_O_POS_TEX_EYE
{
float4 position : SV_POSITION;
float2 textureCoordinates : TEXCOORD0;
float3 eyePosition : TEXCOORD1;
};
//-------------------------------------------------------------------------
struct PS_I_POS_TEX_EYE
{
float4 position : SV_POSITION;
float2 textureCoordinates : TEXCOORD0;
float3 eyePosition : TEXCOORD1;
};
//-------------------------------------------------------------------------
struct PS_O_COLOR_NORM_DEPTH
{
float4 color : SV_Target0;
float4 normal : SV_Target1;
float normalizedLinearDepth : SV_Target2;
};

VS_O_POS_TEX_EYE VS_MRTRendering(VS_I_POS_TEX vsInput)
{
VS_O_POS_TEX_EYE vsOutput = (VS_O_POS_TEX_EYE)0;

vsOutput.position = mul(vsInput.position, matrix_view);
vsOutput.eyePosition = vsOutput.position.xyz;
vsOutput.position = mul(vsOutput.position, matrix_projection);
vsOutput.textureCoordinates = vsInput.textureCoordinates;

return vsOutput;
}
//-------------------------------------------------------------------------
PS_O_COLOR_NORM_DEPTH PS_MRTRendering(PS_I_POS_TEX_EYE psInput)
{
PS_O_COLOR_NORM_DEPTH psOutput = (PS_O_COLOR_NORM_DEPTH)0;

if(!disableTextures) psOutput.color = texture_diffuse.Sample(Sampler_anisotropic, psInput.textureCoordinates);
else psOutput.color = float4(1.0f, 1.0f, 1.0f, 1.0f);

psOutput.normal.xyz = normalize(cross(ddx(psInput.eyePosition.xyz), ddy(psInput.eyePosition.xyz)));
psOutput.normalizedLinearDepth = psInput.eyePosition.z / farClip;

return psOutput;
}[/source]

And the second part is computing occlusion

[source]struct VS_I_POS_TEX_CORID
{
float3 position : POSITION;
float2 textureCoordinates : TEXCOORD;
float cornerId : CORNERID;
};
struct VS_O_POS_TEX_CORID_VSRAY
{
float4 position_SS : SV_POSITION;
float2 textureCoordinates : TEXCOORD0;
float cornerId : CORNERID;
float3 frustumPointCoordinateVS : TEXCOORD1;
};
VS_O_POS_TEX_CORID_VSRAY VS_RenderOcclusion(VS_I_POS_TEX_CORID vsInput)
{
VS_O_POS_TEX_CORID_VSRAY vsOutput = (VS_O_POS_TEX_CORID_VSRAY)0;

vsOutput.position_SS = float4(vsInput.position, 1.0f);
vsOutput.textureCoordinates = vsInput.textureCoordinates.xy;
vsOutput.frustumPointCoordinateVS = aFrustumCornerPosition_VS[vsInput.cornerId];
return vsOutput;
}
//-------------------------------------------------------------------------
float PS_RenderOcclusion(VS_O_POS_TEX_CORID_VSRAY psInput) : SV_Target
{
float3 viewRay = float3(psInput.frustumPointCoordinateVS.xy, farClip);

float3 origin = viewRay * (float)texture_linearDepth.Sample(Sampler_point, psInput.textureCoordinates);

//I've tested it, and it seems reconstruction works correctly.
//Here's how I tested it:
//I took viewRay.xy and muliplied it by projection matrix, and then divided by w component
//Then I've done some scaling and compared it to the psInput.position.xy
//They match so this part looks correct for me


float3 normal = texture_normals.Sample(Sampler_point, psInput.textureCoordinates).xyz;

normal = normalize(normal);

float3 noiseSample = float3(texture_noiseSamples.Sample(Sampler_point, psInput.textureCoordinates * noiseScale).xy, 0.0f) * 2.0f - 1.0f;

float3 tangent = normalize(noiseSample - normal * dot(noiseSample, normal));

float3 bitangent = cross(normal, tangent);

float3x3 tbn = float3x3(tangent, bitangent, normal);
tbn = transpose(tbn);

float occlusion = 0.0f;

int samplesCount = 16;
float radius = 0.5f;

for(int i = 0; i < samplesCount; ++i)
{
float3 sample = mul(aRandomSamples, tbn);

sample = sample * radius + origin;

float4 offset = float4(sample, 1.0);

offset = mul(offset, matrix_projection);
offset.xy /= offset.w;
offset.xy = offset.xy * 0.5 + 0.5;

float sample_depth = (float)texture_linearDepth.Sample(Sampler_point, offset.xy);

occlusion += (sample_depth <= sample.z ? 1.0 : 0.0);
}

occlusion = 1.0 - (occlusion / samplesCount);

return occlusion;
}[/source]

That's mainly it. Here's the results(fulscreen quad with only occlusion texture)

fZTs.jpeg

It's a sponza model, and the camera is looking from the flor to the sky. Only looking to the sky shows some results.



Well, what do You think of it? I have no idea what could be possibly wrong with this implementation.




[color=#fafafa !important]small-logo.png

Advertisement
Ok. I found one mistake.

On the line:
[source]float sample_depth = (float)texture_linearDepth.Sample(Sampler_point, offset.xy);[/source]

I forgot to multiply by farClip value. After correction the output is far from being ok.

Actual output:
g10G.jpeg


[color=#fafafa !important]

small-logo.png

This topic is closed to new replies.

Advertisement