# DX11 DX11 SSAO - Is this right? Again...

## Recommended Posts

Migi0027    4628

Hi guys,

this is kind of a re post and I am sorry for that, but this just isn't really working, or I don't think it is. Here is the results:

Without SSAO diffuse:

Normals:

Depth:

With SSAO:

Now some HLSL code:

Texture2D t_dffmap : register(t0);
Texture2D t_depthmap : register(t1);
Texture2D t_normalmap : register(t2);
Texture2D t_random : register(t3);
Texture2D t_blmextract : register(t4);
SamplerState ss;

cbuffer PARAMSBUFFER : register(b0)
{
float time;
float hblur;
float bloomExtract;
float bloom;
float pixelDisortion;
float pixelDisorterAmount;
float ssao;
float space;
};

cbuffer BloomBuffer : register(b1)
{
float BloomThreshold;
float BloomSaturation;
float BaseSaturation;
float BloomIntensity;
float BaseIntensity;
};

cbuffer SSAOBuffer : register(b2)
{
float g_scale;
float g_bias;
float g_intensity;
};

struct VS_Output
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD0;
float2 texCoord1 : TEXCOORD1;
float2 texCoord2 : TEXCOORD2;
float2 texCoord3 : TEXCOORD3;
float2 texCoord4 : TEXCOORD4;
float2 texCoord5 : TEXCOORD5;
float2 texCoord6 : TEXCOORD6;
float2 texCoord7 : TEXCOORD7;
float2 texCoord8 : TEXCOORD8;
float2 texCoord9 : TEXCOORD9;
};

{
VS_Output Output;
Output.Tex = float2((id << 1) & 2, id & 2);
Output.Pos = float4(Output.Tex * float2(2,-2) + float2(-1,1), 0, 1);

if (hblur == 1)
{
float texelSize = 1.0f / 800;

// Create UV coordinates for the pixel and its four horizontal neighbors on either side.
Output.texCoord1 = Output.Tex + float2(texelSize * -4.0f, 0.0f);
Output.texCoord2 = Output.Tex + float2(texelSize * -3.0f, 0.0f);
Output.texCoord3 = Output.Tex + float2(texelSize * -2.0f, 0.0f);
Output.texCoord4 = Output.Tex + float2(texelSize * -1.0f, 0.0f);
Output.texCoord5 = Output.Tex + float2(texelSize *  0.0f, 0.0f);
Output.texCoord6 = Output.Tex + float2(texelSize *  1.0f, 0.0f);
Output.texCoord7 = Output.Tex + float2(texelSize *  2.0f, 0.0f);
Output.texCoord8 = Output.Tex + float2(texelSize *  3.0f, 0.0f);
Output.texCoord9 = Output.Tex + float2(texelSize *  4.0f, 0.0f);
}

return Output;
}

// Helper for modifying the saturation of a color.
{
// The constants 0.3, 0.59, and 0.11 are chosen because the
// human eye is more sensitive to green light, and less to blue.
float grey = dot(color, float3(0.3, 0.59, 0.11));

return lerp(grey, color, saturation);
}

// Ambient Occlusion Stuff --------------------------------------------------

float3 getPosition(in float2 uv)
{
return t_depthmap.Sample(ss, uv).xyz;
}

float3 getNormal(in float2 uv)
{
return normalize(t_normalmap.Sample(ss, uv).xyz * 2.0f - 1.0f);
}

float2 getRandom(in float2 uv)
{
return normalize(t_random.Sample(ss, float2(800, 600) * uv / float2(64, 64)).xy * 2.0f - 1.0f);
}

float doAmbientOcclusion(in float2 tcoord,in float2 uv, in float3 p, in float3 cnorm)
{
float3 diff = getPosition(tcoord + uv) - p;
const float3 v = normalize(diff);
const float d = length(diff)*g_scale;
return max(0.0,dot(cnorm,v)-g_bias)*(1.0/(1.0+d))*g_intensity;
}

// End

{
if (bloomExtract == 1)
{
// Look up the original image color.
float4 c = t_dffmap.Sample(ss, input.Tex);

// Adjust it to keep only values brighter than the specified threshold.
return saturate((c - BloomThreshold) / (1 - BloomThreshold));
}

float4 color = float4(1.0f, 1.0f, 1.0f, 1.0f);

if (pixelDisortion == 1)
{
// Distortion factor
float NoiseX = pixelDisorterAmount * (time/1000) * sin(input.Tex.x * input.Tex.y+time/1000);
NoiseX=fmod(NoiseX,8) * fmod(NoiseX,4);

// Use our distortion factor to compute how much it will affect each
// texture coordinate
float DistortX = fmod(NoiseX,5);
float DistortY = fmod(NoiseX,5+0.002);

// Create our new texture coordinate based on our distortion factor
input.Tex = float2(DistortX,DistortY);
}

float4 dffMAP = t_dffmap.Sample(ss, input.Tex);

if (hblur == 1)
{
float weight0, weight1, weight2, weight3, weight4;
float normalization;

// Create the weights that each neighbor pixel will contribute to the blur.
weight0 = 1.0f;
weight1 = 0.9f;
weight2 = 0.55f;
weight3 = 0.18f;
weight4 = 0.1f;

// Create a normalized value to average the weights out a bit.
normalization = (weight0 + 2.0f * (weight1 + weight2 + weight3 + weight4));

// Normalize the weights.
weight0 = weight0 / normalization;
weight1 = weight1 / normalization;
weight2 = weight2 / normalization;
weight3 = weight3 / normalization;
weight4 = weight4 / normalization;

// Add the nine horizontal pixels to the color by the specific weight of each.
color += t_dffmap.Sample(ss, input.texCoord1) * weight4;
color += t_dffmap.Sample(ss, input.texCoord2) * weight3;
color += t_dffmap.Sample(ss, input.texCoord3) * weight2;
color += t_dffmap.Sample(ss, input.texCoord4) * weight1;
color += t_dffmap.Sample(ss, input.texCoord5) * weight0;
color += t_dffmap.Sample(ss, input.texCoord6) * weight1;
color += t_dffmap.Sample(ss, input.texCoord7) * weight2;
color += t_dffmap.Sample(ss, input.texCoord8) * weight3;
color += t_dffmap.Sample(ss, input.texCoord9) * weight4;
}
else
color *= dffMAP;

if (ssao == 1)
{
// Apply SSAO

const float2 vec[4] = {float2(1,0),float2(-1,0),
float2(0,1),float2(0,-1)};

float3 p = getPosition(input.Tex);
float3 n = getNormal(input.Tex);
float2 rand = getRandom(input.Tex);

float ao = 0.0f;

//**SSAO Calculation**//
int iterations = 1;
for (int j = 0; j < iterations; ++j)
{
float2 coord2 = float2(coord1.x*0.707 - coord1.y*0.707,
coord1.x*0.707 + coord1.y*0.707);

ao += doAmbientOcclusion(input.Tex,coord1*0.25, p, n);
ao += doAmbientOcclusion(input.Tex,coord2*0.5, p, n);
ao += doAmbientOcclusion(input.Tex,coord1*0.75, p, n);
ao += doAmbientOcclusion(input.Tex,coord2, p, n);
}
ao/=(float)iterations*4.0;
color.rgb *= ao;
}

if(bloom == 1)
{
// Look up the bloom and original base image colors.
float4 cbloom = t_blmextract.Sample(ss, input.Tex);
float4 base = color;

// Adjust color saturation and intensity.
cbloom = AdjustSaturation(cbloom, BloomSaturation) * BloomIntensity;
base = AdjustSaturation(base, BaseSaturation) * BaseIntensity;

// Darken down the base image in areas where there is a lot of bloom,
// to prevent things looking excessively burned-out.
base *= (1 - saturate(cbloom));

// Combine the two images.
color = base + cbloom;
}

return color;
}


The variables for the SSAO is the following:

SSAOParameters.g_scale = 1;
SSAOParameters.g_intensity = 1;
SSAOParameters.g_bias = 0.001f;

Now what on earth, if, am I doing wrong?

Thank You
Edited by Migi0027

##### Share on other sites
Jason Z    6434

Do you have an image with SSAO enabled?  The only thing that looks odd is the fact that your shadow isn't filled at all, but that isn't really relevant to SSAO at all.

##### Share on other sites
Migi0027    4628

Sorry, some kind of error occurred so the rest of the post wasn't shown. Please re read the post, or at least the bottom.

Sorry

##### Share on other sites
Migi0027    4628

Do you have an image with SSAO enabled?  The only thing that looks odd is the fact that your shadow isn't filled at all, but that isn't really relevant to SSAO at all.

Ohh and the box, it's because receive shadows (variable) has been disabled for that box, I was just unlucky.

##### Share on other sites
Jason Z    6434

The basics of SSAO appear to be generally working, as the samples tend to be darker near a corner.  However, you seem to have some sort of a depth bias in your calculations, because further into the scene things are getting darker.  If you look at the floor plane then you can see the difference between the foreground and the background, even though they have the exact same occluders in their local areas.

When you select the samples to take, are they being done in screen space or are they reprojected into world space?

##### Share on other sites
Migi0027    4628

The darkness in the scene with SSAO, can it be because of my depth buffer?

I'm just trying to find a possible solution for this problem.

##### Share on other sites
Migi0027    4628

Ohh, this might be useful:

The shader for normal and depth rendering:

cbuffer ConstantObjectBuffer : register (b0)
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;

float state;
float _instance;
float _alphamap;
};

struct VOut
{
float4 position : SV_POSITION;
float4 depthPosition : TEXTURE0;
float4 normal : NORMAL;
float2 texcoord : TEXCOORD;
float Depth : DEPTH;
};

Texture2D t_alphamap;
SamplerState ss;

VOut VShader(float4 position : POSITION, float4 normal : NORMAL, float2 texcoord : TEXCOORD, float3 instancePosition : INSTANCEPOS)
{
VOut output;

if (_instance == 1)
{
position.x += instancePosition.x;
position.y += instancePosition.y;
position.z += instancePosition.z;
}

position.w = 1.0f;
output.texcoord = texcoord;

// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);

output.normal = normal;

// Store the position value in a second input value for depth value calculations.
output.depthPosition = output.position;
output.Depth = output.position.z;

return output;
}

{
float4 color = float4(1,1,1,1);

if (state == 5 || state == 2) // DEPTH
{
float depthValue = input.depthPosition.z / input.depthPosition.w;

color = float4(depthValue, depthValue, depthValue, 1);
}
else if (state == 6) // NORMALS
{
float3 viewSpaceNormalizedNormals = 0.5 * normalize (input.normal) + 0.5;
color = float4(viewSpaceNormalizedNormals, 1);
}

return color;
}


Don't worry about the ifs, what's really important is if I'm mapping the depth and normal buffers correctly?

##### Share on other sites
Jason Z    6434

It looks like you are using perspective depth, which would probably explain the increasing darkness as you move into the scene.  You are using screen space offsets from the pixel being processed, which aren't linear with z, so your pixels that are far away are skewed toward being occluded.

What you should be doing is to find the linear space position of the current pixel, then add a linear space vector offset to it, then reproject that point back to projection space and sample the point.  That would (most likely) make the darkening go away.

##### Share on other sites
Migi0027    4628

Sorry, but how would I achieve that Jason Z?

Some of these topics are still quite new for me as this is self taught, so I'm still trying to understand every single bit.

##### Share on other sites
Jason Z    6434

Are you familiar with the various spaces in the rendering pipeline?  For example, your transform typically goes from object space to model space to view space, and they are all simple transformations that just change the orientation and location of the origin of their previous spaces.

The projection matrix is different though, since it warps the geometry of the scene so that a frustum shaped chunk of the scene fits into a cube.  This non linear behavior is what I suspect is your issue.

So the steps in the process that you need to implement in order to find out if this is the case are all in your shader:

1. For the pixel currently being calculated, find its view space position.  You will need to instrument your shader for this - either passing the view space position in your attributes, or passing an inverse projection matrix in your constant buffers.
2. When you do the offsets in your depth samples, these are now being applied to that view space position.  They will also be in your regular units as well (i.e. meters or whatever unit you use), so it is more logical to think about how large the radius is.
3. However, to look up the where that 3D view space offset location is in your depth buffer, you need to re-project the point and find its location in the depth buffer.  This can either use the projection matrix, or you can just do the simple math version on the xy coordinates (since those are what is needed to find the depth buffer location).

Have you tried to implement any of these steps yet?  If so, which ones are you getting hung up on?

##### Share on other sites
Migi0027    4628

So I pass the inverse projection matrix to the post process shader (with ssao)?

And then I'm stuck in the 3rd step. So I'm supposed to somehow edit this:

const float2 vec[4] = {float2(1,0),float2(-1,0),
float2(0,1),float2(0,-1)};

float3 p = getPosition(input.Tex);
float3 n = getNormal(input.Tex);
float2 rand = getRandom(input.Tex);

float ao = 0.0f;

//**SSAO Calculation**//
int iterations = 1;
for (int j = 0; j < iterations; ++j)
{
float2 coord2 = float2(coord1.x*0.707 - coord1.y*0.707,
coord1.x*0.707 + coord1.y*0.707);

ao += doAmbientOcclusion(input.Tex,coord1*0.25, p, n);
ao += doAmbientOcclusion(input.Tex,coord2*0.5, p, n);
ao += doAmbientOcclusion(input.Tex,coord1*0.75, p, n);
ao += doAmbientOcclusion(input.Tex,coord2, p, n);
}
ao/=(float)iterations*4.0;
color.rgb *= ao;


But exactly how?

##### Share on other sites
Jason Z    6434

I'm sorry, but I am not going to write the shader for you.  Do you have specific questions about how it works?

##### Share on other sites
Migi0027    4628

It's more that I don't understand exactly what I'm supposed to do...

##### Share on other sites
Jason Z    6434

Do you have specific questions about how it works?

That's why I'm asking you if you have any specific questions about how it works!  That is also why I listed the process in steps, so that you can direct questions about a particular portion of the process.  You need to think about each step, and ask us a question about it - there are many people here willing to help, but I doubt anyone is going to just write the shader for you and say here is your solution.

If you have absolutely no idea what those process steps mean, then ask a question about them, don't ask for a code example showing it.

##### Share on other sites
Migi0027    4628

Sorry for the trouble!

It's in the step 3:

However, to look up the where that 3D view space offset location is in your depth buffer, you need to re-project the point and find its location in the depth buffer.  This can either use the projection matrix, or you can just do the simple math version on the xy coordinates (since those are what is needed to find the depth buffer location).

So how can i re-project a certain point and then find it's position in my depth buffer?

##### Share on other sites
Jason Z    6434

In this case, you can either directly use a projection matrix (which must be by itself with no view matrix multiplied) that is supplied through a constant buffer, or you can do some of the math that the projection matrix normally does in your own code.  The latter is more efficient, since you are only worried about the xy coordinates so you know where to sample the buffer.

So to do the math on only the xy coordinates, try taking a look at the formula for the projection matrix that you are using, and write out the equation for only the x and y.  This will guide you on what math is required to get back to clips space coordinates.  Once you have these clip space coordinates, you just need to remap them to texture coordinates and sample the texture.

If you aren't too familiar with matrix math, then check out the Wikipedia page for how a vector is multiplied by a matrix, and give it a shot.  You can always post questions here if something isn't clear to you.

## Create an account

Register a new account

• ### Similar Content

• Hello!

A have an issue with my point light shadows realisation.

First of all, the pixel shader path:
<source lang="Cpp">
//....
float3 toLight = plPosW.xyz - input.posW;
float3 fromLight = -toLight;
//...
float depthL = abs(fromLight.x);
if(depthL < abs(fromLight.y))
depthL = abs(fromLight.y);
if(depthL < abs(fromLight.z))
depthL = abs(fromLight.z);
float4 pH = mul(float4(0.0f, 0.0f, depthL, 1.0f), lightProj);
pH /= pH.w;
isVisible = lightDepthTex.SampleCmpLevelZero(lightDepthSampler, normalize(fromLight), pH.z).x;
</source>

lightProj matrix creation
<source lang="Cpp">
Matrix4x4 projMat = Matrix4x4::PerspectiveFovLH(0.5f * Pi, 0.01f, 1000.0f, 1.0f);
</source>
thats how i create Depth cube texture

<source lang="Cpp">
viewport->TopLeftX = 0.0f;
viewport->TopLeftY = 0.0f;
viewport->Width    = static_cast<float>(1024);
viewport->Height   = static_cast<float>(1024);
viewport->MinDepth = 0.0f;
viewport->MaxDepth = 1.0f;
D3D11_TEXTURE2D_DESC textureDesc;
textureDesc.Width = 1024;
textureDesc.Height = 1024;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 6;
textureDesc.Format = DXGI_FORMAT_R24G8_TYPELESS;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = D3D11_RESOURCE_MISC_TEXTURECUBE;
ID3D11Texture2D* texturePtr;
HR(DeviceKeeper::GetDevice()->CreateTexture2D(&textureDesc, NULL, &texturePtr));
for(int i = 0; i < 6; ++i){
D3D11_DEPTH_STENCIL_VIEW_DESC dsvDesc;
dsvDesc.Flags = 0;
dsvDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
dsvDesc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DARRAY;
dsvDesc.Texture2DArray = D3D11_TEX2D_ARRAY_DSV{0, i, 1};
ID3D11DepthStencilView *outDsv;
HR(DeviceKeeper::GetDevice()->CreateDepthStencilView(texturePtr, &dsvDesc, &outDsv));
edgeDsv = outDsv;
}
srvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
srvDesc.TextureCube = D3D11_TEXCUBE_SRV{0, 1};
</source>
then i create six target oriented cameras and finally draw scene to cube depth according to each camera
I will be very gratefull for any help!
P.s sorry for my poor inglish)

• By isu diss
HRESULT FBXLoader::Open(HWND hWnd, char* Filename) { HRESULT hr = S_OK; if (FBXM) { FBXIOS = FbxIOSettings::Create(FBXM, IOSROOT); FBXM->SetIOSettings(FBXIOS); FBXI = FbxImporter::Create(FBXM, ""); if (!(FBXI->Initialize(Filename, -1, FBXIOS))) MessageBox(hWnd, (wchar_t*)FBXI->GetStatus().GetErrorString(), TEXT("ALM"), MB_OK); FBXS = FbxScene::Create(FBXM, "MCS"); if (!FBXS) MessageBox(hWnd, TEXT("Failed to create the scene"), TEXT("ALM"), MB_OK); if (!(FBXI->Import(FBXS))) MessageBox(hWnd, TEXT("Failed to import fbx file content into the scene"), TEXT("ALM"), MB_OK); if (FBXI) FBXI->Destroy(); FbxNode* MainNode = FBXS->GetRootNode(); int NumKids = MainNode->GetChildCount(); FbxNode* ChildNode = NULL; for (int i=0; i<NumKids; i++) { ChildNode = MainNode->GetChild(i); FbxNodeAttribute* NodeAttribute = ChildNode->GetNodeAttribute(); if (NodeAttribute->GetAttributeType() == FbxNodeAttribute::eMesh) { FbxMesh* Mesh = ChildNode->GetMesh(); NumVertices = Mesh->GetControlPointsCount();//number of vertices MyV = new FBXVTX[NumVertices]; for (DWORD j = 0; j < NumVertices; j++) { FbxVector4 Vertex = Mesh->GetControlPointAt(j);//Gets the control point at the specified index. MyV[j].Position = XMFLOAT3((float)Vertex.mData[0], (float)Vertex.mData[1], (float)Vertex.mData[2]); } NumIndices = Mesh->GetPolygonVertexCount();//number of indices; for cube 20 MyI = new DWORD[NumIndices]; MyI = (DWORD*)Mesh->GetPolygonVertices();//index array NumFaces = Mesh->GetPolygonCount(); MyF = new FBXFACEX[NumFaces]; for (int l=0;l<NumFaces;l++) { MyF[l].Vertices[0] = MyI[4*l]; MyF[l].Vertices[1] = MyI[4*l+1]; MyF[l].Vertices[2] = MyI[4*l+2]; MyF[l].Vertices[3] = MyI[4*l+3]; } UV = new XMFLOAT2[NumIndices]; for (int i = 0; i < Mesh->GetPolygonCount(); i++)//polygon(=mostly rectangle) count { FbxLayerElementArrayTemplate<FbxVector2>* uvVertices = NULL; Mesh->GetTextureUV(&uvVertices); for (int j = 0; j < Mesh->GetPolygonSize(i); j++)//retrieves number of vertices in a polygon { FbxVector2 uv = uvVertices->GetAt(Mesh->GetTextureUVIndex(i, j)); UV[4*i+j] = XMFLOAT2((float)uv.mData[0], (float)uv.mData[1]); } } } } } else MessageBox(hWnd, TEXT("Failed to create the FBX Manager"), TEXT("ALM"), MB_OK); return hr; } I've been trying to load fbx files(cube.fbx) into my programme. but I get this. Can someone pls help me?

• Hi Guys,
I am having a bit of a problem with a dynamic texture.
It is creating without error and I am attempting to initialize the first pixel to white to make sure I am mapping correctly. But when I draw the texture to the quad it displays the whole quad white (instead of just one pixel).
This is how I am creating, mapping, and setting the first pixel to white. But as mentioned, when I draw the quad, the entire quad is white.

// Create dynamic texture D3D11_TEXTURE2D_DESC textureDesc = { 0 }; textureDesc.Width = 2048; textureDesc.Height = 2048; textureDesc.MipLevels = 1; textureDesc.ArraySize = 1; textureDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; textureDesc.SampleDesc.Count = 1; textureDesc.Usage = D3D11_USAGE_DYNAMIC; textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE; textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; textureDesc.MiscFlags = 0; HRESULT result = d3dDevice->CreateTexture2D(&textureDesc, NULL, &textureDynamic); if (FAILED(result)) return -1; result = d3dDevice->CreateShaderResourceView(textureDynamic, 0, &textureRV); if (FAILED(result)) return -2; D3D11_MAPPED_SUBRESOURCE resource; if (FAILED(d3dContext->Map(textureDynamic, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource))) return -1; memset(resource.pData, 255, 4); d3dContext->Unmap(textureDynamic, 0);
Hopefully I have just made an oversight somewhere.
Any assistance would be greatly appreciated
(If I change the 255 value to 128 the quad then turns grey, so the mapping is definitely doing something. Just can't work out why it is colouring the whole quad and not the first pixel)

• Just a really quick question - is there any overhead to using DrawIndexedInstanced even for geometry you just render once vs using DrawIndexed? Or is the details obfuscated by the graphics driver?
I would assume no but you never know
• By isu diss
I'm trying to code Rayleigh part of Nishita's model (Display Method of the Sky Color Taking into Account Multiple Scattering). I get black screen no colors. Can anyone find the issue for me?