So, i'm currently trying to implement an SSAO shader from THIS tutorial and i'm running into a few issues here.
Now, this SSAO method requires view space positions and normals. I'm storing the normals in my deferred renderer in world-space so i had to do a conversion and reconstruct the position from the depth buffer.
And something there goes horribly wrong (which has probably to do with worldspace to viewspace transformations).
(here is the full shader source code if someone wants to take a look at it)
#version 330
in vec2 vTexcoord;
out vec4 outputF;
uniform sampler2D sNormals;
uniform sampler2D sDepth;//for reconstructing world position
uniform sampler2D sNoise;
uniform mat4 uInverseViewProjectionBiased;//for position reconstruction
uniform mat4 uProjection; // camera projection which was used to render the buffers
uniform vec3 samples[64];
uniform mat4 uViewMatrix; // transforming coordinates to viewspace
uniform mat4 uNormalViewMatrix; // transforming normals to viewspace (same as viewmatrix, but without position transform)
uniform float uZnear;
uniform float uZfar;
int kernelSize = 64;
float radius = 0.5;
float bias = 0.025;
// tile noise texture over screen based on screen dimensions divided by noise size
const vec2 noiseScale = vec2(1280.0/4.0, 720.0/4.0);
//-----------------------
//returns world space coordinates from depthbuffer
vec3 depthToWorld(sampler2D depthMap,vec2 texcoord,mat4 biasedInverseProjView){
float depth = texture2D(depthMap,texcoord).r;
vec4 position = vec4(texcoord* 2.0 - 1.0,depth* 2.0 - 1.0,1.0);
position = ((biasedInverseProjView)*position);
return vec3(position/position.w);
}
void main()
{
//view space position and normal
vec3 fragPos = (uViewMatrix*vec4(depthToWorld(sDepth,vTexcoord,uInverseViewProjectionBiased),1.0f)).xyz;
fragPos.z = -fragPos.z;//multiplying with viewmatrix results in z coordinate being negative. multiply with -1 to make it positive
vec3 normal = ((uNormalViewMatrix*vec4(normalize(texture2D(sNormals, vTexcoord).rgb),1.0)).xyz);
vec3 randomVec = normalize(texture2D(sNoise, vTexcoord * noiseScale).xyz);
// create TBN change-of-basis matrix: from tangent-space to view-space
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = (cross(normal, tangent));
mat3 TBN = mat3(tangent, bitangent, normal);
// iterate over the sample kernel and calculate occlusion factor
float occlusion = 0.0;
for(int i = 0; i < kernelSize; i++)
{
// get sample position
vec3 sample = TBN * samples[i]; // from tangent to view-space
sample = sample * radius + fragPos;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = uProjection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = -(uViewMatrix*vec4(depthToWorld(sDepth,offset.xy,uInverseViewProjectionBiased),1.0f)).z;//here again, depth is negative so multiply by -1
// range check & accumulate
float rangeCheck= abs(fragPos.z - sampleDepth) < radius ? 1.0 : 0.0;
occlusion += (sampleDepth >= sample.z ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
outputF = vec4(occlusion,occlusion,occlusion,1.0f);
}
Now, i suspect that the normals are the culprit.
vec3 normal = ((uNormalViewMatrix*vec4(normalize(texture2D(sNormals, vTexcoord).rgb),1.0)).xyz);
"sNormals" is a 2D texture which stores the normals in world space in a RGB FP16 buffer.
Now i can't use the camera viewspace matrix to transform the normals into viewspace as the cameras position isn't set at (0,0,0), thus skewing the result.
So what i did is to create a new viewmatrix specifically for this normal without the position at vec3(0,0,0);
//"camera" is the camera which was used for rendering the normal buffer
renderer.setUniform4m(ressources->shaderSSAO->getUniform("uNormalViewMatrix"),
glmExt::createViewMatrix(glm::vec3(0,0,0),camera.getForward(),camera.getUp())//parameters are (position,forwardVector,upVector)
);
Though i have the feeling this is the wrong approach. Is this right or is there a better/correct way of transforming a world space normal into viewspace?