Sign in to follow this  
Bombshell93

My first SSAO implementation

Recommended Posts

I think I'm still suffering from haloing and some artifacts on spheres causing dark / light rings, but I'm still over the moon that I've got it working to some degree, SSAO baffled me until a moment of doing nothing and it fit itself together! best feeling in the world is jumping over a programming hurdle.
Old Version
[spoiler]
myfirstssao_by_pushbombshell-d60lhx8.png

#define SAMPLECOUNT 8
float PI = 3.14159265f;

float4x4 WVP; //View Projection Matrix
float4x4 WVPI; //Inverse View Projection Matrix

float3 sampleVectors[SAMPLECOUNT];
float sampleRange;
float depthBias = 0.002f;

float2 GBufferSize;

sampler NormalSampler : register(s2);
sampler NoiseSampler : register(s3);

struct VertexShaderInput
{
    float4 Position : POSITION0;
	float2 UV : TEXCOORD0;
};

struct VertexShaderOutput
{
    float4 Position : POSITION0;
	float2 UV : TEXCOORD0;
	float4 ProjectPos : TEXCOORD1;
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
    VertexShaderOutput output;
	
	output.Position = input.Position; //using a Full Screen Quad which does not require projection
	output.UV = (output.Position.xy / output.Position.w) * float2(0.5f, -0.5f) + 0.5f; //get UV for GBuffers via screen position
	output.UV += float2(1/GBufferSize.x, 1/GBufferSize.y) * 0.5f; //half pixel offset
	output.ProjectPos = output.Position;
	
    return output;
}

float unpack(float2 packed)
{
	const float2 conversion = float2(1.0f, 1.0f / 256.0f);
	return dot(packed, conversion);
}

float3 decode (float2 enc)
{
    float4 nn = float4(enc, 0, 0) * float4(2,2,0,0) + float4(-1,-1,1,-1);
    float l = dot(nn.xyz,-nn.xyw);
    nn.z = l;
    nn.xy *= sqrt(l);
    return nn.xyz * 2 + float3(0,0,-1);
}

float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
	float4 normalSample = tex2D(NormalSampler, input.UV);
	float noiseSample = tex2D(NoiseSampler, input.UV * (float2(GBufferSize.x, GBufferSize.y) / 8)).r;
	float3 normal = decode(normalSample.xy); //Spheremap Transform normal compression, not my own work, based off a Cry3 implementation
	float depth = unpack(normalSample.zw); //nature of the game does not require a wide depth range so it has been compressed into 16-bit
	
	float4 position = float4(input.ProjectPos.xy / input.ProjectPos.w, depth, 1); //reconstruct position from normal
	position = mul(position, WVPI);
	position /= position.w;
	
	float4 output = float4(1,1,1,1);
	
	float angle = noiseSample * PI * 2; //convert [0-1] range noise into radians
	float cosAngle = 1-cos(angle);
	float sinAngle = sin(angle);
	
	float3 unit = normalize(normal); //I used its own variable incase I'd need to change it.
	
	float3x3 rotationMat = float3x3( //Rotation matrix to rotate the sample vector by angle
		1 + cosAngle * (unit.x * unit.x - 1), 
			-unit.z * sinAngle + cosAngle * unit.x * unit.y, 
				unit.y * sinAngle + cosAngle * unit.x * unit.z,
		unit.z * sinAngle + cosAngle * unit.x * unit.y, 
			1 + cosAngle * (unit.y * unit.y - 1), 
				-unit.x * sinAngle + cosAngle * unit.y * unit.z,
		-unit.y * sinAngle + cosAngle * unit.x * unit.z, 
			unit.x * sinAngle + cosAngle * unit.y * unit.z, 
				1 + cosAngle * (unit.z * unit.z - 1)
		);
		
	for (int i = 0; i < SAMPLECOUNT; i++)
	{
			//make sure sample vector is within surface normal hemisphere
		float3 sampleVector = dot(sampleVectors[i], normal) < 0 ? -sampleVectors[i] : sampleVectors[i];
		sampleVector *= sampleRange;
			//transform sample vector by angle around normal
		sampleVector = mul(sampleVector, rotationMat);
			//get sample vecotors world position > projected position > UV
		float4 samplePosition = mul(float4(position.xyz + sampleVector, 1), WVP);
		float2 sampleUV = (samplePosition.xy / samplePosition.w) * float2(0.5f, -0.5f) + 0.5f;
		sampleUV += float2(1/GBufferSize.x, 1/GBufferSize.y) * 0.5f;
			//sample depth
		float sample = unpack(tex2D(NormalSampler, sampleUV).zw);
			//modify final value by dot product
		float mod = dot(normalize(sampleVector), normal);
			//if sample is closer to view than origin caculate occlusion value
		if (sample < depth - depthBias)
		output -= (saturate(1 - ((depth - depthBias) - sample)) * mod) / SAMPLECOUNT;
	}
	
	return output;
}

technique Technique1
{
    pass Pass1
    {
        VertexShader = compile vs_3_0 VertexShaderFunction();
        PixelShader = compile ps_3_0 PixelShaderFunction();
    }
}

[/spoiler]
EDIT:
Okay so I kept working on it and I've got it looking better without the bugs too, I made some silly mistakes, correcting them I've now had to go down to 6 samples but I'm more than happy with the quality of the occlusion, I'm using a Min BlendState so only the darkest comes through, and I'm running multiple passes giving a smoother looking final occlusion.
myfirstssao_by_pushbombshell-d60lhx8.png

as you can see the haloing issue isn't as glaring and the samples normals are now actual normals and no the samples direction (a mistake on my part) which has fixed the issue with artifacts on rounded surfaces.
 

#define SAMPLECOUNT 6

float4x4 WVP; //View Projection Matrix
float4x4 WVPI; //Inverse View Projection Matrix

float3 sampleVectors[SAMPLECOUNT];
float sampleRange;
float depthBias = 0.0f;
float PI = 3.14159265f;

float2 GBufferSize;

sampler NormalSampler : register(s2);
sampler NoiseSampler : register(s3);

struct VertexShaderInput
{
    float4 Position : POSITION0;
	float2 UV : TEXCOORD0;
};

struct VertexShaderOutput
{
    float4 Position : POSITION0;
	float2 UV : TEXCOORD0;
	float4 ProjectPos : TEXCOORD1;
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
    VertexShaderOutput output;
	
	output.Position = input.Position; //using a Full Screen Quad which does not require projection
	output.UV = (output.Position.xy / output.Position.w) * float2(0.5f, -0.5f) + 0.5f; //get UV for GBuffers via screen position
	output.UV += float2(1/GBufferSize.x, 1/GBufferSize.y) * 0.5f; //half pixel offset
	output.ProjectPos = output.Position;
	
    return output;
}

float unpack(float2 packed)
{
	const float2 conversion = float2(1.0f, 1.0f / 256.0f);
	return dot(packed, conversion);
}

float3 decode (float2 enc)
{
    float4 nn = float4(enc, 0, 0) * float4(2,2,0,0) + float4(-1,-1,1,-1);
    float l = dot(nn.xyz,-nn.xyw);
    nn.z = l;
    nn.xy *= sqrt(l);
    return nn.xyz * 2 + float3(0,0,-1);
}

float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
	float4 normalSample = tex2D(NormalSampler, input.UV);
	float noiseSample = tex2D(NoiseSampler, input.UV * (float2(GBufferSize.x, GBufferSize.y) / 8)).r;
	float3 normal = decode(normalSample.xy); //Spheremap Transform normal compression, not my own work, based off a Cry3 implementation
	float depth = unpack(normalSample.zw); //nature of the game does not require a wide depth range so it has been compressed into 16-bit
	
	float4 position = float4(input.ProjectPos.xy / input.ProjectPos.w, depth, 1); //reconstruct position from normal
	position = mul(position, WVPI);
	position /= position.w;
	
	float4 output = float4(1,1,1,1);
	
	float angle = noiseSample * PI * 2; //convert [0-1] range noise into radians
	float cosAngle = 1-cos(angle);
	float sinAngle = sin(angle);
	
	float3 unit = normalize(normal); //I used its own variable incase I'd need to change it.
	
	float3x3 rotationMat = float3x3( //Rotation matrix to rotate the sample vector by angle
		1 + cosAngle * (unit.x * unit.x - 1), 
			-unit.z * sinAngle + cosAngle * unit.x * unit.y, 
				unit.y * sinAngle + cosAngle * unit.x * unit.z,
		unit.z * sinAngle + cosAngle * unit.x * unit.y, 
			1 + cosAngle * (unit.y * unit.y - 1), 
				-unit.x * sinAngle + cosAngle * unit.y * unit.z,
		-unit.y * sinAngle + cosAngle * unit.x * unit.z, 
			unit.x * sinAngle + cosAngle * unit.y * unit.z, 
				1 + cosAngle * (unit.z * unit.z - 1)
		);
		
	for (int i = 0; i < SAMPLECOUNT; i++)
	{
		float3 sampleVector = sampleVectors[i];
			//transform sample vector by angle around normal
		sampleVector = mul(sampleVector, rotationMat);
		sampleVector =  dot(sampleVector, normal) < 0 ? -sampleVector : sampleVector;
		sampleVector *= sampleRange;
			
			//get sample vecotors world position > projected position > UV
		float4 samplePosition = mul(float4(position.xyz + sampleVector, 1), WVP);
		float2 sampleUV = (samplePosition.xy / samplePosition.w) * float2(0.5f, -0.5f) + 0.5f;
		sampleUV += float2(1/GBufferSize.x, 1/GBufferSize.y) * 0.5f;
			//sample depth
		float sample = unpack(tex2D(NormalSampler, sampleUV).zw);
			//modify final value by dot product
		float mod = 1 - dot(decode(tex2D(NormalSampler, sampleUV).xy), normal);
			//if sample is closer to view than origin caculate occlusion value
		if (sample < depth - depthBias)
		output -= (saturate(1 - ((depth - depthBias) - sample)) * mod) / SAMPLECOUNT;
	}
	
	return output;
}

technique Technique1
{
    pass Pass1
    {
        VertexShader = compile vs_3_0 VertexShaderFunction();
        PixelShader = compile ps_3_0 PixelShaderFunction();
    }
}
 

 

I'm mostly concerned about the haloing artifacts but the odd occlusion on spheres is a bit worrying too.

Any and all comments are greatly appreciated,
Thanks for reading,
Bombshell

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this