Sign in to follow this  
Migi0027

DX11 DX11 SSAO - Is this right? Again...

Recommended Posts

Migi0027    4628

Hi guys,

 

this is kind of a re post and I am sorry for that, but this just isn't really working, or I don't think it is. Here is the results:

 

Without SSAO diffuse:

 

1eubli.png

 

Normals:

LgJBxqq.png

 

Depth:

UeZqRES.png

 

With SSAO:

FtCntNn.png

 

Now some HLSL code:

Texture2D t_dffmap : register(t0);
Texture2D t_depthmap : register(t1);
Texture2D t_normalmap : register(t2);
Texture2D t_random : register(t3);
Texture2D t_blmextract : register(t4);
SamplerState ss;

cbuffer PARAMSBUFFER : register(b0)
{
	float time;
	float hblur;
	float bloomExtract;
	float bloom;
	float pixelDisortion;
	float pixelDisorterAmount;
	float ssao;
	float space;
};

cbuffer BloomBuffer : register(b1)
{
	float BloomThreshold;
	float BloomSaturation;
	float BaseSaturation;
	float BloomIntensity;
	float BaseIntensity;
};

cbuffer SSAOBuffer : register(b2)
{
	float g_scale;
	float g_bias;
	float g_sample_rad;
	float g_intensity;
};

struct VS_Output
{  
    float4 Pos : SV_POSITION;              
    float2 Tex : TEXCOORD0;
    float2 texCoord1 : TEXCOORD1;
    float2 texCoord2 : TEXCOORD2;
    float2 texCoord3 : TEXCOORD3;
    float2 texCoord4 : TEXCOORD4;
    float2 texCoord5 : TEXCOORD5;
    float2 texCoord6 : TEXCOORD6;
    float2 texCoord7 : TEXCOORD7;
    float2 texCoord8 : TEXCOORD8;
    float2 texCoord9 : TEXCOORD9;
};
 
VS_Output VShader(uint id : SV_VertexID)
{
    VS_Output Output;
    Output.Tex = float2((id << 1) & 2, id & 2);
    Output.Pos = float4(Output.Tex * float2(2,-2) + float2(-1,1), 0, 1);

	if (hblur == 1)
	{
		float texelSize = 1.0f / 800;

		// Create UV coordinates for the pixel and its four horizontal neighbors on either side.
		Output.texCoord1 = Output.Tex + float2(texelSize * -4.0f, 0.0f);
		Output.texCoord2 = Output.Tex + float2(texelSize * -3.0f, 0.0f);
		Output.texCoord3 = Output.Tex + float2(texelSize * -2.0f, 0.0f);
		Output.texCoord4 = Output.Tex + float2(texelSize * -1.0f, 0.0f);
		Output.texCoord5 = Output.Tex + float2(texelSize *  0.0f, 0.0f);
		Output.texCoord6 = Output.Tex + float2(texelSize *  1.0f, 0.0f);
		Output.texCoord7 = Output.Tex + float2(texelSize *  2.0f, 0.0f);
		Output.texCoord8 = Output.Tex + float2(texelSize *  3.0f, 0.0f);
		Output.texCoord9 = Output.Tex + float2(texelSize *  4.0f, 0.0f);
	}
	
    return Output;
}

// Helper for modifying the saturation of a color.
float4 AdjustSaturation(float4 color, float saturation)
{
    // The constants 0.3, 0.59, and 0.11 are chosen because the
    // human eye is more sensitive to green light, and less to blue.
    float grey = dot(color, float3(0.3, 0.59, 0.11));

    return lerp(grey, color, saturation);
}

// Ambient Occlusion Stuff --------------------------------------------------

float3 getPosition(in float2 uv)
{
	return t_depthmap.Sample(ss, uv).xyz;
}

float3 getNormal(in float2 uv)
{
	return normalize(t_normalmap.Sample(ss, uv).xyz * 2.0f - 1.0f);
}

float2 getRandom(in float2 uv)
{
	return normalize(t_random.Sample(ss, float2(800, 600) * uv / float2(64, 64)).xy * 2.0f - 1.0f);
}

float doAmbientOcclusion(in float2 tcoord,in float2 uv, in float3 p, in float3 cnorm)
{
	float3 diff = getPosition(tcoord + uv) - p;
	const float3 v = normalize(diff);
	const float d = length(diff)*g_scale;
	return max(0.0,dot(cnorm,v)-g_bias)*(1.0/(1.0+d))*g_intensity;
}

// End

float4 PShader(VS_Output input) : SV_TARGET
{
	if (bloomExtract == 1)
	{
		// Look up the original image color.
		float4 c = t_dffmap.Sample(ss, input.Tex);

		// Adjust it to keep only values brighter than the specified threshold.
		return saturate((c - BloomThreshold) / (1 - BloomThreshold));
	}
	
	float4 color = float4(1.0f, 1.0f, 1.0f, 1.0f);
	
	if (pixelDisortion == 1)
	{
		// Distortion factor
		float NoiseX = pixelDisorterAmount * (time/1000) * sin(input.Tex.x * input.Tex.y+time/1000);
		NoiseX=fmod(NoiseX,8) * fmod(NoiseX,4); 

		// Use our distortion factor to compute how much it will affect each
		// texture coordinate
		float DistortX = fmod(NoiseX,5);
		float DistortY = fmod(NoiseX,5+0.002);
 
		// Create our new texture coordinate based on our distortion factor
		input.Tex = float2(DistortX,DistortY);
	}

	float4 dffMAP = t_dffmap.Sample(ss, input.Tex);

	if (hblur == 1)
	{
		float weight0, weight1, weight2, weight3, weight4;
		float normalization;

		// Create the weights that each neighbor pixel will contribute to the blur.
		weight0 = 1.0f;
		weight1 = 0.9f;
		weight2 = 0.55f;
		weight3 = 0.18f;
		weight4 = 0.1f;

		 // Create a normalized value to average the weights out a bit.
		normalization = (weight0 + 2.0f * (weight1 + weight2 + weight3 + weight4));

		// Normalize the weights.
		weight0 = weight0 / normalization;
		weight1 = weight1 / normalization;
		weight2 = weight2 / normalization;
		weight3 = weight3 / normalization;
		weight4 = weight4 / normalization;

		// Add the nine horizontal pixels to the color by the specific weight of each.
		color += t_dffmap.Sample(ss, input.texCoord1) * weight4;
		color += t_dffmap.Sample(ss, input.texCoord2) * weight3;
		color += t_dffmap.Sample(ss, input.texCoord3) * weight2;
		color += t_dffmap.Sample(ss, input.texCoord4) * weight1;
		color += t_dffmap.Sample(ss, input.texCoord5) * weight0;
		color += t_dffmap.Sample(ss, input.texCoord6) * weight1;
		color += t_dffmap.Sample(ss, input.texCoord7) * weight2;
		color += t_dffmap.Sample(ss, input.texCoord8) * weight3;
		color += t_dffmap.Sample(ss, input.texCoord9) * weight4;
	}
	else
		color *= dffMAP;
	
	if (ssao == 1)
	{
		// Apply SSAO

		const float2 vec[4] = {float2(1,0),float2(-1,0),
				float2(0,1),float2(0,-1)};

		float3 p = getPosition(input.Tex);
		float3 n = getNormal(input.Tex);
		float2 rand = getRandom(input.Tex);

		float ao = 0.0f;
		float rad = g_sample_rad/p.z; // g_s_r

		//**SSAO Calculation**//
		int iterations = 1;
		for (int j = 0; j < iterations; ++j)
		{
		  float2 coord1 = reflect(vec[j],rand)*rad;
		  float2 coord2 = float2(coord1.x*0.707 - coord1.y*0.707,
					  coord1.x*0.707 + coord1.y*0.707);
  
		  ao += doAmbientOcclusion(input.Tex,coord1*0.25, p, n);
		  ao += doAmbientOcclusion(input.Tex,coord2*0.5, p, n);
		  ao += doAmbientOcclusion(input.Tex,coord1*0.75, p, n);
		  ao += doAmbientOcclusion(input.Tex,coord2, p, n);
		}
		ao/=(float)iterations*4.0;
		color.rgb *= ao;
	}

	if(bloom == 1)
	{
		// Look up the bloom and original base image colors.
		float4 cbloom = t_blmextract.Sample(ss, input.Tex);
		float4 base = color;
    
		// Adjust color saturation and intensity.
		cbloom = AdjustSaturation(cbloom, BloomSaturation) * BloomIntensity;
		base = AdjustSaturation(base, BaseSaturation) * BaseIntensity;
    
		// Darken down the base image in areas where there is a lot of bloom,
		// to prevent things looking excessively burned-out.
		base *= (1 - saturate(cbloom));
    
		// Combine the two images.
		color = base + cbloom;
	}

	return color;
}

 

 

The variables for the SSAO is the following:

 

 

SSAOParameters.g_sample_rad = 3;
SSAOParameters.g_scale = 1;
SSAOParameters.g_intensity = 1;
SSAOParameters.g_bias = 0.001f;
 
Now what on earth, if, am I doing wrong?
 
Thank You
Edited by Migi0027

Share this post


Link to post
Share on other sites
Jason Z    6434

Do you have an image with SSAO enabled?  The only thing that looks odd is the fact that your shadow isn't filled at all, but that isn't really relevant to SSAO at all.

Share this post


Link to post
Share on other sites
Migi0027    4628

Do you have an image with SSAO enabled?  The only thing that looks odd is the fact that your shadow isn't filled at all, but that isn't really relevant to SSAO at all.

 

Ohh and the box, it's because receive shadows (variable) has been disabled for that box, I was just unlucky. biggrin.png

Share this post


Link to post
Share on other sites
Jason Z    6434

The basics of SSAO appear to be generally working, as the samples tend to be darker near a corner.  However, you seem to have some sort of a depth bias in your calculations, because further into the scene things are getting darker.  If you look at the floor plane then you can see the difference between the foreground and the background, even though they have the exact same occluders in their local areas.

 

When you select the samples to take, are they being done in screen space or are they reprojected into world space?

Share this post


Link to post
Share on other sites
Migi0027    4628

Ohh, this might be useful:

 

The shader for normal and depth rendering:

cbuffer ConstantObjectBuffer : register (b0)
{
	matrix worldMatrix;
	matrix viewMatrix;
	matrix projectionMatrix;

	float state;
	float _instance;
	float _alphamap;
	float pad;
};

struct VOut
{
    float4 position : SV_POSITION;
    float4 depthPosition : TEXTURE0;
	float4 normal : NORMAL;
	float2 texcoord : TEXCOORD;
	float Depth : DEPTH;
};

Texture2D t_alphamap;
SamplerState ss;

VOut VShader(float4 position : POSITION, float4 normal : NORMAL, float2 texcoord : TEXCOORD, float3 instancePosition : INSTANCEPOS)
{
    VOut output;

	if (_instance == 1)
	{
		position.x += instancePosition.x;
		position.y += instancePosition.y;
		position.z += instancePosition.z;
	}

    position.w = 1.0f;
	output.texcoord = texcoord;

	// Calculate the position of the vertex against the world, view, and projection matrices.
    output.position = mul(position, worldMatrix);
    output.position = mul(output.position, viewMatrix);
    output.position = mul(output.position, projectionMatrix);

	output.normal = normal;

	// Store the position value in a second input value for depth value calculations.
	output.depthPosition = output.position;
	output.Depth = output.position.z;

    return output;
}

float4 PShader(VOut input) : SV_TARGET
{
	float4 color = float4(1,1,1,1);

	if (state == 5 || state == 2) // DEPTH
	{
		float depthValue = input.depthPosition.z / input.depthPosition.w;

		color = float4(depthValue, depthValue, depthValue, 1);
	}
	else if (state == 6) // NORMALS
	{
		float3 viewSpaceNormalizedNormals = 0.5 * normalize (input.normal) + 0.5;
		color = float4(viewSpaceNormalizedNormals, 1);
	}

	return color;
}

 

Don't worry about the ifs, what's really important is if I'm mapping the depth and normal buffers correctly?

Share this post


Link to post
Share on other sites
Jason Z    6434

It looks like you are using perspective depth, which would probably explain the increasing darkness as you move into the scene.  You are using screen space offsets from the pixel being processed, which aren't linear with z, so your pixels that are far away are skewed toward being occluded.

 

What you should be doing is to find the linear space position of the current pixel, then add a linear space vector offset to it, then reproject that point back to projection space and sample the point.  That would (most likely) make the darkening go away.

Share this post


Link to post
Share on other sites
Jason Z    6434

Are you familiar with the various spaces in the rendering pipeline?  For example, your transform typically goes from object space to model space to view space, and they are all simple transformations that just change the orientation and location of the origin of their previous spaces.

 

The projection matrix is different though, since it warps the geometry of the scene so that a frustum shaped chunk of the scene fits into a cube.  This non linear behavior is what I suspect is your issue.

 

So the steps in the process that you need to implement in order to find out if this is the case are all in your shader:

 

  1. For the pixel currently being calculated, find its view space position.  You will need to instrument your shader for this - either passing the view space position in your attributes, or passing an inverse projection matrix in your constant buffers.
  2. When you do the offsets in your depth samples, these are now being applied to that view space position.  They will also be in your regular units as well (i.e. meters or whatever unit you use), so it is more logical to think about how large the radius is.
  3. However, to look up the where that 3D view space offset location is in your depth buffer, you need to re-project the point and find its location in the depth buffer.  This can either use the projection matrix, or you can just do the simple math version on the xy coordinates (since those are what is needed to find the depth buffer location).

Have you tried to implement any of these steps yet?  If so, which ones are you getting hung up on?

Share this post


Link to post
Share on other sites
Migi0027    4628

So I pass the inverse projection matrix to the post process shader (with ssao)?

 

And then I'm stuck in the 3rd step. So I'm supposed to somehow edit this:

 

const float2 vec[4] = {float2(1,0),float2(-1,0),
		float2(0,1),float2(0,-1)};

float3 p = getPosition(input.Tex);
float3 n = getNormal(input.Tex);
float2 rand = getRandom(input.Tex);

float ao = 0.0f;
float rad = g_sample_rad/p.z; // g_s_r

//**SSAO Calculation**//
int iterations = 1;
for (int j = 0; j < iterations; ++j)
{
	float2 coord1 = reflect(vec[j],rand)*rad;
	float2 coord2 = float2(coord1.x*0.707 - coord1.y*0.707,
				coord1.x*0.707 + coord1.y*0.707);
  
	ao += doAmbientOcclusion(input.Tex,coord1*0.25, p, n);
	ao += doAmbientOcclusion(input.Tex,coord2*0.5, p, n);
	ao += doAmbientOcclusion(input.Tex,coord1*0.75, p, n);
	ao += doAmbientOcclusion(input.Tex,coord2, p, n);
}
ao/=(float)iterations*4.0;
color.rgb *= ao;

 

 

But exactly how?

Share this post


Link to post
Share on other sites
Jason Z    6434

Do you have specific questions about how it works?

That's why I'm asking you if you have any specific questions about how it works!  That is also why I listed the process in steps, so that you can direct questions about a particular portion of the process.  You need to think about each step, and ask us a question about it - there are many people here willing to help, but I doubt anyone is going to just write the shader for you and say here is your solution.

 

If you have absolutely no idea what those process steps mean, then ask a question about them, don't ask for a code example showing it.

Share this post


Link to post
Share on other sites
Migi0027    4628

Sorry for the trouble!

 

It's in the step 3:

 

However, to look up the where that 3D view space offset location is in your depth buffer, you need to re-project the point and find its location in the depth buffer.  This can either use the projection matrix, or you can just do the simple math version on the xy coordinates (since those are what is needed to find the depth buffer location).

 

So how can i re-project a certain point and then find it's position in my depth buffer?

Share this post


Link to post
Share on other sites
Jason Z    6434

In this case, you can either directly use a projection matrix (which must be by itself with no view matrix multiplied) that is supplied through a constant buffer, or you can do some of the math that the projection matrix normally does in your own code.  The latter is more efficient, since you are only worried about the xy coordinates so you know where to sample the buffer.

 

So to do the math on only the xy coordinates, try taking a look at the formula for the projection matrix that you are using, and write out the equation for only the x and y.  This will guide you on what math is required to get back to clips space coordinates.  Once you have these clip space coordinates, you just need to remap them to texture coordinates and sample the texture.

 

If you aren't too familiar with matrix math, then check out the Wikipedia page for how a vector is multiplied by a matrix, and give it a shot.  You can always post questions here if something isn't clear to you.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Similar Content

    • By gsc
      Hi! I am trying to implement simple SSAO postprocess. The main source of my knowledge on this topic is that awesome tutorial.
      But unfortunately something doesn't work... And after a few long hours I need some help. Here is my hlsl shader:
      float3 randVec = _noise * 2.0f - 1.0f; // noise: vec: {[0;1], [0;1], 0} float3 tangent = normalize(randVec - normalVS * dot(randVec, normalVS)); float3 bitangent = cross(tangent, normalVS); float3x3 TBN = float3x3(tangent, bitangent, normalVS); float occlusion = 0.0; for (int i = 0; i < kernelSize; ++i) { float3 samplePos = samples[i].xyz; // samples: {[-1;1], [-1;1], [0;1]} samplePos = mul(samplePos, TBN); samplePos = positionVS.xyz + samplePos * ssaoRadius; float4 offset = float4(samplePos, 1.0f); offset = mul(offset, projectionMatrix); offset.xy /= offset.w; offset.y = -offset.y; offset.xy = offset.xy * 0.5f + 0.5f; float sampleDepth = tex_4.Sample(textureSampler, offset.xy).a; sampleDepth = vsPosFromDepth(sampleDepth, input.uv).z; const float threshold = 0.025f; float rangeCheck = abs(positionVS.z - sampleDepth) < ssaoRadius ? 1.0 : 0.0; occlusion += (sampleDepth <= samplePos.z + threshold ? 1.0 : 0.0) * rangeCheck; } occlusion = saturate(1 - (occlusion / kernelSize)); And current result: http://imgur.com/UX2X1fc
      I will really appreciate for any advice!
    • By isu diss
       I'm trying to code Rayleigh part of Nishita's model (Display Method of the Sky Color Taking into Account Multiple Scattering). I get black screen no colors. Can anyone find the issue for me?
       
      #define InnerRadius 6320000 #define OutterRadius 6420000 #define PI 3.141592653 #define Isteps 20 #define Ksteps 10 static float3 RayleighCoeffs = float3(6.55e-6, 1.73e-5, 2.30e-5); RWTexture2D<float4> SkyColors : register (u0); cbuffer CSCONSTANTBUF : register( b0 ) { float fHeight; float3 vSunDir; } float Density(float Height) { return exp(-Height/8340); } float RaySphereIntersection(float3 RayOrigin, float3 RayDirection, float3 SphereOrigin, float Radius) { float t1, t0; float3 L = SphereOrigin - RayOrigin; float tCA = dot(L, RayDirection); if (tCA < 0) return -1; float lenL = length(L); float D2 = (lenL*lenL) - (tCA*tCA); float Radius2 = (Radius*Radius); if (D2<=Radius2) { float tHC = sqrt(Radius2 - D2); t0 = tCA-tHC; t1 = tCA+tHC; } else return -1; return t1; } float RayleighPhaseFunction(float cosTheta) { return ((3/(16*PI))*(1+cosTheta*cosTheta)); } float OpticalDepth(float3 StartPosition, float3 EndPosition) { float3 Direction = normalize(EndPosition - StartPosition); float RayLength = RaySphereIntersection(StartPosition, Direction, float3(0, 0, 0), OutterRadius); float SampleLength = RayLength / Isteps; float3 tmpPos = StartPosition + 0.5 * SampleLength * Direction; float tmp; for (int i=0; i<Isteps; i++) { tmp += Density(length(tmpPos)-InnerRadius); tmpPos += SampleLength * Direction; } return tmp*SampleLength; } static float fExposure = -2; float3 HDR( float3 LDR) { return 1.0f - exp( fExposure * LDR ); } [numthreads(32, 32, 1)] //disptach 8, 8, 1 it's 256 by 256 image void ComputeSky(uint3 DTID : SV_DispatchThreadID) { float X = ((2 * DTID.x) / 255) - 1; float Y = 1 - ((2 * DTID.y) / 255); float r = sqrt(((X*X)+(Y*Y))); float Theta = r * (PI); float Phi = atan2(Y, X); static float3 Eye = float3(0, 10, 0); float ViewOD = 0, SunOD = 0, tmpDensity = 0; float3 Attenuation = 0, tmp = 0, Irgb = 0; //if (r<=1) { float3 ViewDir = normalize(float3(sin(Theta)*cos(Phi), cos(Theta),sin(Theta)*sin(Phi) )); float ViewRayLength = RaySphereIntersection(Eye, ViewDir, float3(0, 0, 0), OutterRadius); float SampleLength = ViewRayLength / Ksteps; //vSunDir = normalize(vSunDir); float cosTheta = dot(normalize(vSunDir), ViewDir); float3 tmpPos = Eye + 0.5 * SampleLength * ViewDir; for(int k=0; k<Ksteps; k++) { float SunRayLength = RaySphereIntersection(tmpPos, vSunDir, float3(0, 0, 0), OutterRadius); float3 TopAtmosphere = tmpPos + SunRayLength*vSunDir; ViewOD = OpticalDepth(Eye, tmpPos); SunOD = OpticalDepth(tmpPos, TopAtmosphere); tmpDensity = Density(length(tmpPos)-InnerRadius); Attenuation = exp(-RayleighCoeffs*(ViewOD+SunOD)); tmp += tmpDensity*Attenuation; tmpPos += SampleLength * ViewDir; } Irgb = RayleighCoeffs*RayleighPhaseFunction(cosTheta)*tmp*SampleLength; SkyColors[DTID.xy] = float4(Irgb, 1); } }  
    • By amadeus12
      I made my obj parser
      and It also calculate tagent space for normalmap.
      it seems calculation is wrong..
      any good suggestion for this?
      I can't upload my pics so I link my question.
      https://gamedev.stackexchange.com/questions/147199/how-to-debug-calculating-tangent-space
      and I uploaded my code here


      ObjLoader.cpp
      ObjLoader.h
    • By Alessandro Pozzer
      Hi guys, 

      I dont know if this is the right section, but I did not know where to post this. 
      I am implementing a day night cycle on my game engine and I was wondering if there was a nice way to interpolate properly between warm colors, such as orange (sunset) and dark blue (night) color. I am using HSL format.
      Thank  you.
    • By thefoxbard
      I am aiming to learn Windows Forms with the purpose of creating some game-related tools, but since I know absolutely nothing about Windows Forms yet, I wonder:
      Is it possible to render a Direct3D 11 viewport inside a Windows Form Application? I see a lot of game editors that have a region of the window reserved for displaying and manipulating a 3D or 2D scene. That's what I am aiming for.
      Otherwise, would you suggest another library to create a GUI for game-related tools?
       
      EDIT:
      I've found a tutorial here in gamedev that shows a solution:
      Though it's for D3D9, I'm not sure if it would work for D3D11?
       
  • Popular Now