Sign in to follow this  

DX11 DX11 - Depth Mapping - How?

This topic is 1698 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hi guys,

 

right now I'm having a small issue, the issue might be small, but it's blocking my development wacko.png , which is about depth mapping:

 

Screenshot:

ejdbog.png

 

As you can see, there are some problems...

 

The shader(This shader is not optimized in any way as that is not my current goal, depth mapping occurs in this statement: "if (state == 5 || state == 2)" ):

cbuffer ConstantObjectBuffer : register (b0)
{
	matrix worldMatrix;
	matrix viewMatrix;
	matrix projectionMatrix;

	float state;
	float _instance;
	float _alphamap;
	float _diffusealpha;
};

struct VOut
{
	float4 position : SV_POSITION;
	float4 normal : NORMAL;
	float2 texcoord : TEXCOORD;
    float4 depthPosition : TEXTURE0;
};

Texture2D t_alphamap;
Texture2D t_dffalpha;
SamplerState ss;

VOut VShader(float4 position : POSITION, float4 normal : NORMAL, float2 texcoord : TEXCOORD, float3 instancePosition : INSTANCEPOS)
{
	VOut output;

	if (_instance == 1)
	{
		position.x += instancePosition.x;
		position.y += instancePosition.y;
		position.z += instancePosition.z;
	}

	position.w = 1.0f;
	output.texcoord = texcoord;

	// Calculate the position of the vertex against the world, view, and projection matrices.
	output.position = mul(position, worldMatrix);
	output.position = mul(output.position, viewMatrix);
	output.position = mul(output.position, projectionMatrix);

	output.normal = normal;
	output.depthPosition = output.position;

	return output;
}

float4 PShader(VOut input) : SV_TARGET
{
	float4 color = float4(1,1,1,1);

	if (state == 5 || state == 2)
	{
		float depthValue;
		depthValue = input.depthPosition.z / 25.0f;
		color = float4(depthValue, depthValue, depthValue, 1.0f);
	}
	else if (state == 6)
	{
		float3 viewSpaceNormalizedNormals = input.normal; //0.5 * normalize (input.normal) + 0.5
		color = float4(viewSpaceNormalizedNormals, 1);
	}

	if (_alphamap == 1)
	{
		color.a *= t_alphamap.Sample(ss, input.texcoord).a;
	}

	if (_diffusealpha == 1)
	{
		color.a *= t_dffalpha.Sample(ss, input.texcoord).a;
	}

	return color;
}

 

 

Now what on earth did I do wrong? huh.png

Share this post


Link to post
Share on other sites

I'm not really sure what your exact problem is or what you're trying to accomplish here. However I can tell you that dividing post-perspective z by 25.0 is not going to give you anything meaningful. Normally you would divide by w in order to get the same [0, 1] depth value that's stored in the depth buffer. However this value isn't typically useful for visualizing, since it's non-linear. Instead you usually want to use your view-space z value (which is the w component of mul(position, projectionMatrix), AKA depthPosition.w) and divide it by your far-clip distance. This gives you a linear [0, 1] value.

Share this post


Link to post
Share on other sites

Objective: To visualize depth in a texture

___

 

So instead I should do the following:

 

depthValue = input.depthPosition.z / (input.depthPosition.w / 1000.0f);

 

or

 

depthValue = (input.depthPosition.w / 1000.0f);

 

Is this correct?

Share this post


Link to post
Share on other sites

why not pass the depth of the vertex for the vertex shader in the color to the pixel shader?

 

vertex()

{

   out.color = posttransformvertex.z;

}

 

frag()

{

   return in.color;

}

Share this post


Link to post
Share on other sites

Thank you, the depth buffer now works perfectly.

 

Sorry for bringing in an old topic, about ssao. The depth problem initially lied in SSAO, and I believe that the normals are being outputted correctly as well.

 

SSAO Extraction shader:

Texture2D t_depthmap : register(t0);
Texture2D t_normalmap : register(t1);
Texture2D t_random : register(t2);
SamplerState ss;

cbuffer SSAOBuffer : register(c0)
{
	float g_scale;
	float g_bias;
	float g_sample_rad;
	float g_intensity;
	float ssaoIterations;
	float3 pppspace;

	matrix view;
};

struct VS_Output
{  
	float4 Pos : SV_POSITION;              
	float2 Tex : TEXCOORD0;
};
 
VS_Output VShader(uint id : SV_VertexID)
{
	VS_Output Output;
	Output.Tex = float2((id << 1) & 2, id & 2);
	Output.Pos = float4(Output.Tex * float2(2,-2) + float2(-1,1), 0, 1);
	return Output;
}

// Helper for modifying the saturation of a color.
float4 AdjustSaturation(float4 color, float saturation)
{
	// The constants 0.3, 0.59, and 0.11 are chosen because the
	// human eye is more sensitive to green light, and less to blue.
	float grey = dot(color, float3(0.3, 0.59, 0.11));

	return lerp(grey, color, saturation);
}

// Ambient Occlusion Stuff --------------------------------------------------

float3 getPosition(in float2 uv)
{
	return mul(t_depthmap.Sample(ss, uv).xyz, view);
}

float3 getNormal(in float2 uv)
{
	return normalize(mul(t_normalmap.Sample(ss, uv).xyz * 2.0f - 1.0f, view));
}

float2 getRandom(in float2 uv)
{
	//return normalize(t_random.Sample(ss, uv ).xy * 2.0f - 1.0f); // ~100FPS
	return normalize(t_random.Sample(ss, float2(600, 800) * uv / float2(60, 60)).xy * 2.0f - 1.0f);
}

float doAmbientOcclusion(in float2 tcoord,in float2 uv, in float3 p, in float3 cnorm)
{
	float3 diff = getPosition(tcoord + uv) - p;
	const float3 v = normalize(diff);
	const float d = length(diff)*g_scale;
	return max(0.0,dot(cnorm,v)-g_bias)*(1.0/(1.0+d))*g_intensity;
}

// End

float4 PShader(VS_Output input) : SV_TARGET
{
	// ADD SSAO ---------------------------------------------------------------
	const float2 vec[4] = {float2(1,0),float2(-1,0),
				float2(0,1),float2(0,-1)};

	float3 p = getPosition(input.Tex);
	float3 n = getNormal(input.Tex);
	float2 rand = getRandom(input.Tex);

	float ao = 0.0f;
	float rad = g_sample_rad/p.z; // g_s_r

	//**SSAO Calculation**//
	int iterations = 4;
	for (int j = 0; j < iterations; ++j)
	{
	  float2 coord1 = reflect(vec[j], rand)*rad;
	  float2 coord2 = float2(coord1.x*0.707 - coord1.y*0.707,
				  coord1.x*0.707 + coord1.y*0.707);
	  
	  ao += doAmbientOcclusion(input.Tex, coord1*0.25, p, n);
	  ao += doAmbientOcclusion(input.Tex, coord2*0.5, p, n);
	  ao += doAmbientOcclusion(input.Tex, coord1*0.75, p, n);
	  ao += doAmbientOcclusion(input.Tex, coord2, p, n);
	}
	ao/=(float)iterations*4.0;

	return ao;
}

 

 

The ssao looks as the following:

 

df82o9.png

 

But on the bottom there is a plane, which is being rendered, but the SSAO seems to be affected by depth somehow, but why? And I have no idea if those values I used are correct, please say if their not.

Edited by Migi0027

Share this post


Link to post
Share on other sites

This topic is 1698 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Similar Content

    • By mister345
      Hi, can somebody please tell me in clear simple steps how to debug and step through an hlsl shader file?
      I already did Debug > Start Graphics Debugging > then captured some frames from Visual Studio and
      double clicked on the frame to open it, but no idea where to go from there.
       
      I've been searching for hours and there's no information on this, not even on the Microsoft Website!
      They say "open the  Graphics Pixel History window" but there is no such window!
      Then they say, in the "Pipeline Stages choose Start Debugging"  but the Start Debugging option is nowhere to be found in the whole interface.
      Also, how do I even open the hlsl file that I want to set a break point in from inside the Graphics Debugger?
       
      All I want to do is set a break point in a specific hlsl file, step thru it, and see the data, but this is so unbelievably complicated
      and Microsoft's instructions are horrible! Somebody please, please help.
       
       
       

    • By mister345
      I finally ported Rastertek's tutorial # 42 on soft shadows and blur shading. This tutorial has a ton of really useful effects and there's no working version anywhere online.
      Unfortunately it just draws a black screen. Not sure what's causing it. I'm guessing the camera or ortho matrix transforms are wrong, light directions, or maybe texture resources not being properly initialized.  I didnt change any of the variables though, only upgraded all types and functions DirectX3DVector3 to XMFLOAT3, and used DirectXTK for texture loading. If anyone is willing to take a look at what might be causing the black screen, maybe something pops out to you, let me know, thanks.
      https://github.com/mister51213/DX11Port_SoftShadows
       
      Also, for reference, here's tutorial #40 which has normal shadows but no blur, which I also ported, and it works perfectly.
      https://github.com/mister51213/DX11Port_ShadowMapping
       
    • By xhcao
      Is Direct3D 11 an api function like glMemoryBarrier in OpenGL? For example, if binds a texture to compute shader, compute shader writes some values to texture, then dispatchCompute, after that, read texture content to CPU side. I know, In OpenGL, we could call glMemoryBarrier before reading to assure that texture all content has been updated by compute shader.
      How to handle incoherent memory access in Direct3D 11? Thank you.
    • By _Engine_
      Atum engine is a newcomer in a row of game engines. Most game engines focus on render
      techniques in features list. The main task of Atum is to deliver the best toolset; that’s why,
      as I hope, Atum will be a good light weighted alternative to Unity for indie games. Atum already
      has fully workable editor that has an ability to play test edited scene. All system code has
      simple ideas behind them and focuses on easy to use functionality. That’s why code is minimized
      as much as possible.
      Currently the engine consists from:
      - Scene Editor with ability to play test edited scene;
      - Powerful system for binding properties into the editor;
      - Render system based on DX11 but created as multi API; so, adding support of another GAPI
        is planned;
      - Controls system based on aliases;
      - Font system based on stb_truetype.h;
      - Support of PhysX 3.0, there are samples in repo that use physics;
      - Network code which allows to create server/clinet; there is some code in repo which allows
        to create a simple network game
      I plan to use this engine in multiplayer game - so, I definitely will evolve the engine. Also
      I plan to add support for mobile devices. And of course, the main focus is to create a toolset
      that will ease games creation.
      Link to repo on source code is - https://github.com/ENgineE777/Atum
      Video of work process in track based editor can be at follow link: 
       
       

    • By mister345
      I made a spotlight that
      1. Projects 3d models onto a render target from each light POV to simulate shadows
      2. Cuts a circle out of the square of light that has been projected onto the render target
      as a result of the light frustum, then only lights up the pixels inside that circle 
      (except the shadowed parts of course), so you dont see the square edges of the projected frustum.
       
      After doing an if check to see if the dot product of light direction and light to vertex vector is greater than .95
      to get my initial cutoff, I then multiply the light intensity value inside the resulting circle by the same dot product value,
      which should range between .95 and 1.0.
       
      This should give the light inside that circle a falloff from 100% lit to 0% lit toward the edge of the circle. However,
      there is no falloff. It's just all equally lit inside the circle. Why on earth, I have no idea. If someone could take a gander
      and let me know, please help, thank you so much.
      float CalculateSpotLightIntensity(     float3 LightPos_VertexSpace,      float3 LightDirection_WS,      float3 SurfaceNormal_WS) {     //float3 lightToVertex = normalize(SurfacePosition - LightPos_VertexSpace);     float3 lightToVertex_WS = -LightPos_VertexSpace;          float dotProduct = saturate(dot(normalize(lightToVertex_WS), normalize(LightDirection_WS)));     // METALLIC EFFECT (deactivate for now)     float metalEffect = saturate(dot(SurfaceNormal_WS, normalize(LightPos_VertexSpace)));     if(dotProduct > .95 /*&& metalEffect > .55*/)     {         return saturate(dot(SurfaceNormal_WS, normalize(LightPos_VertexSpace)));         //return saturate(dot(SurfaceNormal_WS, normalize(LightPos_VertexSpace))) * dotProduct;         //return dotProduct;     }     else     {         return 0;     } } float4 LightPixelShader(PixelInputType input) : SV_TARGET {     float2 projectTexCoord;     float depthValue;     float lightDepthValue;     float4 textureColor;     // Set the bias value for fixing the floating point precision issues.     float bias = 0.001f;     // Set the default output color to the ambient light value for all pixels.     float4 lightColor = cb_ambientColor;     /////////////////// NORMAL MAPPING //////////////////     float4 bumpMap = shaderTextures[4].Sample(SampleType, input.tex);     // Expand the range of the normal value from (0, +1) to (-1, +1).     bumpMap = (bumpMap * 2.0f) - 1.0f;     // Change the COORDINATE BASIS of the normal into the space represented by basis vectors tangent, binormal, and normal!     float3 bumpNormal = normalize((bumpMap.x * input.tangent) + (bumpMap.y * input.binormal) + (bumpMap.z * input.normal));     //////////////// LIGHT LOOP ////////////////     for(int i = 0; i < NUM_LIGHTS; ++i)     {     // Calculate the projected texture coordinates.     projectTexCoord.x =  input.vertex_ProjLightSpace[i].x / input.vertex_ProjLightSpace[i].w / 2.0f + 0.5f;     projectTexCoord.y = -input.vertex_ProjLightSpace[i].y / input.vertex_ProjLightSpace[i].w / 2.0f + 0.5f;     if((saturate(projectTexCoord.x) == projectTexCoord.x) && (saturate(projectTexCoord.y) == projectTexCoord.y))     {         // Sample the shadow map depth value from the depth texture using the sampler at the projected texture coordinate location.         depthValue = shaderTextures[6 + i].Sample(SampleTypeClamp, projectTexCoord).r;         // Calculate the depth of the light.         lightDepthValue = input.vertex_ProjLightSpace[i].z / input.vertex_ProjLightSpace[i].w;         // Subtract the bias from the lightDepthValue.         lightDepthValue = lightDepthValue - bias;         float lightVisibility = shaderTextures[6 + i].SampleCmp(SampleTypeComp, projectTexCoord, lightDepthValue );         // Compare the depth of the shadow map value and the depth of the light to determine whether to shadow or to light this pixel.         // If the light is in front of the object then light the pixel, if not then shadow this pixel since an object (occluder) is casting a shadow on it.             if(lightDepthValue < depthValue)             {                 // Calculate the amount of light on this pixel.                 float lightIntensity = saturate(dot(bumpNormal, normalize(input.lightPos_LS[i])));                 if(lightIntensity > 0.0f)                 {                     // Determine the final diffuse color based on the diffuse color and the amount of light intensity.                     float spotLightIntensity = CalculateSpotLightIntensity(                         input.lightPos_LS[i], // NOTE - this is NOT NORMALIZED!!!                         cb_lights[i].lightDirection,                          bumpNormal/*input.normal*/);                     lightColor += cb_lights[i].diffuseColor*spotLightIntensity* .18f; // spotlight                     //lightColor += cb_lights[i].diffuseColor*lightIntensity* .2f; // square light                 }             }         }     }     // Saturate the final light color.     lightColor = saturate(lightColor);    // lightColor = saturate( CalculateNormalMapIntensity(input, lightColor, cb_lights[0].lightDirection));     // TEXTURE ANIMATION -  Sample pixel color from texture at this texture coordinate location.     input.tex.x += textureTranslation;     // BLENDING     float4 color1 = shaderTextures[0].Sample(SampleTypeWrap, input.tex);     float4 color2 = shaderTextures[1].Sample(SampleTypeWrap, input.tex);     float4 alphaValue = shaderTextures[3].Sample(SampleTypeWrap, input.tex);     textureColor = saturate((alphaValue * color1) + ((1.0f - alphaValue) * color2));     // Combine the light and texture color.     float4 finalColor = lightColor * textureColor;     /////// TRANSPARENCY /////////     //finalColor.a = 0.2f;     return finalColor; }  
      Light_vs.hlsl
      Light_ps.hlsl
  • Popular Now