Hey Guys,

I'm doing Exercise5 Ch22 SSAO on Frank Luna's DX11 book, I used DXGI_FORMAT_R8G8B8A8_UNORM to replace DXGI_FORMAT_R16G16B16A16_FLOAT when building normalDepth texture map.

When using DXGI_FORMAT_R16G16B16A16_FLOAT, I store view space normal to RGB channel, the alpha channel stores the view space depth(z-coordinate). Now using DXGI_FORMAT_R8G8B8A8_UNORM, I store normal vector x- and y- coordinate to RG channel, and BA combined store 16-bit depth value.

I construct the normal z-coordinate by nz = -sqrt(1-x^2-y^2).

To store the view space depth over two 8-bit UNORM channels, I normalized z to [0, 1] by dividing by the far plane depth zFar. Then I used a little tricks to save 8 most and 8 least significant digits to BA 16-bit channels(following code below).

When rendering normal and depth values of the scene to the DXGI_FORMAT_R8G8B8A8_UNORM 2D texture, the main code is

cbuffer cbPerScene { float gZFar; }; struct VertexIn { float3 PosL : POSITION; float3 NormalL : NORMAL; float2 Tex : TEXCOORD; }; struct VertexOut { float4 PosH : SV_POSITION; float3 PosV : POSITION; float3 NormalV : NORMAL; float2 Tex : TEXCOORD0; }; VertexOut VS(VertexIn vin) { VertexOut vout; // Transform to view space. vout.PosV = mul(float4(vin.PosL, 1.0f), gWorldView).xyz; vout.NormalV = mul(vin.NormalL, (float3x3)gWorldInvTransposeView); // Transform to homogeneous clip space. vout.PosH = mul(float4(vin.PosL, 1.0f), gWorldViewProj); // Output vertex attributes for interpolation across triangle. vout.Tex = mul(float4(vin.Tex, 0.0f, 1.0f), gTexTransform).xy; return vout; } float4 PS(VertexOut pin, uniform bool gAlphaClip) : SV_Target { // Interpolating normal can unnormalize it, so normalize it. pin.NormalV = normalize(pin.NormalV); if(gAlphaClip) { float4 texColor = gDiffuseMap.Sample( samLinear, pin.Tex ); clip(texColor.a - 0.1f); } float4 normalDepth = float4(0, 0, 0, 0); normalDepth.rg = pin.NormalV.rg; float depth = pin.PosV.b; float z = depth / gZFar; normalDepth.ba = float2(z, frac(256.0f*z)); return normalDepth; } technique11 NormalDepth { pass P0 { SetVertexShader( CompileShader( vs_5_0, VS() ) ); SetGeometryShader( NULL ); SetPixelShader( CompileShader( ps_5_0, PS(false) ) ); } }

When using this DXGI_FORMAT_R8G8B8A8_UNORM texture to build SSAO, the main code is

cbuffer cbPerFrame { float4x4 gViewToTexSpace; // Proj*Texture float4 gOffsetVectors[14]; float4 gFrustumCorners[4]; float gZFar; // Coordinates given in view space. float gOcclusionRadius = 0.5f; float gOcclusionFadeStart = 0.2f; float gOcclusionFadeEnd = 2.0f; float gSurfaceEpsilon = 0.05f; }; Texture2D gNormalDepthMap; Texture2D gRandomVecMap; SamplerState samNormalDepth { Filter = MIN_MAG_LINEAR_MIP_POINT; // Set a very far depth value if sampling outside of the NormalDepth map // so we do not get false occlusions. AddressU = BORDER; AddressV = BORDER; BorderColor = float4(0.0f, 0.0f, 0.0f, 1e5f); }; SamplerState samRandomVec { Filter = MIN_MAG_LINEAR_MIP_POINT; AddressU = WRAP; AddressV = WRAP; }; struct VertexIn { float3 PosL : POSITION; float3 ToFarPlaneIndex : NORMAL; float2 Tex : TEXCOORD; }; struct VertexOut { float4 PosH : SV_POSITION; float3 ToFarPlane : TEXCOORD0; float2 Tex : TEXCOORD1; }; VertexOut VS(VertexIn vin) { VertexOut vout; // Already in NDC space. vout.PosH = float4(vin.PosL, 1.0f); // We store the index to the frustum corner in the normal x-coord slot. vout.ToFarPlane = gFrustumCorners[vin.ToFarPlaneIndex.x].xyz; // Pass onto pixel shader. vout.Tex = vin.Tex; return vout; } // Determines how much the sample point q occludes the point p as a function // of distZ. float OcclusionFunction(float distZ) { // // If depth(q) is "behind" depth(p), then q cannot occlude p. Moreover, if // depth(q) and depth(p) are sufficiently close, then we also assume q cannot // occlude p because q needs to be in front of p by Epsilon to occlude p. // // We use the following function to determine the occlusion. // // // 1.0 -------------\ // | | \ // | | \ // | | \ // | | \ // | | \ // | | \ // ------|------|-----------|-------------|---------|--> zv // 0 Eps z0 z1 // float occlusion = 0.0f; if(distZ > gSurfaceEpsilon) { float fadeLength = gOcclusionFadeEnd - gOcclusionFadeStart; // Linearly decrease occlusion from 1 to 0 as distZ goes // from gOcclusionFadeStart to gOcclusionFadeEnd. occlusion = saturate( (gOcclusionFadeEnd-distZ)/fadeLength ); } return occlusion; } float4 PS(VertexOut pin, uniform int gSampleCount) : SV_Target { // p -- the point we are computing the ambient occlusion for. // n -- normal vector at p. // q -- a random offset from p. // r -- a potential occluder that might occlude p. // Get viewspace normal and z-coord of this pixel. The tex-coords for // the fullscreen quad we drew are already in uv-space. float4 normalDepth = gNormalDepthMap.SampleLevel(samNormalDepth, pin.Tex, 0.0f); float2 nxy = normalDepth.rg; float nz = sqrt(1 - pow(nxy.r, 2) - pow(nxy.g, 2)); nz = -nz; float3 n = float3(nxy, nz); float pz = normalDepth.b + normalDepth.a/256.0f; pz *= gZFar; // // Reconstruct full view space position (x,y,z). // Find t such that p = t*pin.ToFarPlane. // p.z = t*pin.ToFarPlane.z // t = p.z / pin.ToFarPlane.z // float3 p = (pz/pin.ToFarPlane.z)*pin.ToFarPlane; // Extract random vector and map from [0,1] --> [-1, +1]. float3 randVec = 2.0f*gRandomVecMap.SampleLevel(samRandomVec, 4.0f*pin.Tex, 0.0f).rgb - 1.0f; float occlusionSum = 0.0f; // Sample neighboring points about p in the hemisphere oriented by n. [unroll] for(int i = 0; i < gSampleCount; ++i) { // Are offset vectors are fixed and uniformly distributed (so that our offset vectors // do not clump in the same direction). If we reflect them about a random vector // then we get a random uniform distribution of offset vectors. float3 offset = reflect(gOffsetVectors[i].xyz, randVec); // Flip offset vector if it is behind the plane defined by (p, n). float flip = sign( dot(offset, n) ); // Sample a point near p within the occlusion radius. float3 q = p + flip * gOcclusionRadius * offset; // Project q and generate projective tex-coords. float4 projQ = mul(float4(q, 1.0f), gViewToTexSpace); projQ /= projQ.w; // Find the nearest depth value along the ray from the eye to q (this is not // the depth of q, as q is just an arbitrary point near p and might // occupy empty space). To find the nearest depth we look it up in the depthmap. float2 rz = gNormalDepthMap.SampleLevel(samNormalDepth, projQ.xy, 0.0f).ba; float rpz = rz.r + rz.g/256.0f; rpz *= gZFar; // Reconstruct full view space position r = (rx,ry,rz). We know r // lies on the ray of q, so there exists a t such that r = t*q. // r.z = t*q.z ==> t = r.z / q.z float3 r = (rpz / q.z) * q; // // Test whether r occludes p. // * The product dot(n, normalize(r - p)) measures how much in front // of the plane(p,n) the occluder point r is. The more in front it is, the // more occlusion weight we give it. This also prevents self shadowing where // a point r on an angled plane (p,n) could give a false occlusion since they // have different depth values with respect to the eye. // * The weight of the occlusion is scaled based on how far the occluder is from // the point we are computing the occlusion of. If the occluder r is far away // from p, then it does not occlude it. // float distZ = p.z - r.z; float dp = max(dot(n, normalize(r - p)), 0.0f); float occlusion = dp * OcclusionFunction(distZ); occlusionSum += occlusion; } occlusionSum /= gSampleCount; float access = 1.0f - occlusionSum; // Sharpen the contrast of the SSAO map to make the SSAO affect more dramatic. return saturate(pow(access, 4.0f)); } technique11 Ssao { pass P0 { SetVertexShader( CompileShader( vs_5_0, VS() ) ); SetGeometryShader( NULL ); SetPixelShader( CompileShader( ps_5_0, PS(14) ) ); } }

When I check the SSAO texture before bluring with camera to an angle, the image is

and then I move camera to the right, the image is

Basically, when I move camera, the black and white areas vary heavily in the SSAO image.It's like getting an annoying amount of halo-ing on these surfaces.

The image below is the original SSAO image before bluring using DXGI_FORMAT_R16G16B16A16_FLOAT

The false display has something to do with view position and orientation, I tried to modify the cosntants value in OcclusionFunction, such as gOcclusionRadius, but it didn't work, not apparently..

How can I wipe out the wrong dark display when it's not occluded? What could be causing this?

Thank you very much.