Jump to content

  • Log In with Google      Sign In   
  • Create Account

FREE SOFTWARE GIVEAWAY

We have 4 x Pro Licences (valued at $59 each) for 2d modular animation software Spriter to give away in this Thursday's GDNet Direct email newsletter.


Read more in this forum topic or make sure you're signed up (from the right-hand sidebar on the homepage) and read Thursday's newsletter to get in the running!


NoMonkey89

Member Since 13 Feb 2013
Offline Last Active Mar 13 2013 07:19 AM

Topics I've Started

Calculating viewspace coordinates from depth and texture coordinates

13 February 2013 - 12:15 PM

Hello,

I'm trying to implement Nvidias Screen Space Fluid Rendering in Ogre using Direct3D9 as my rendering system. (http://developer.download.nvidia.com/presentations/2010/gdc/Direct3D_Effects.pdf for slides and https://www.google.de/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0CDoQFjAA&url=http%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fdownload%3Fdoi%3D10.1.1.157.909%26rep%3Drep1%26type%3Dpdf&ei=dlrsUJX6Jq364QSdxIHgAg&usg=AFQjCNEXlV2bFFL0uSrDofS65HAHbtpUHg&sig2=02fSfNNvdlQEO6Pjq2LNQA&bvm=bv.1357316858,d.Yms for paper).

 

I got my billboard quad particles and using a shader they look like spheres. I save the depth from those spheres in a texture.

At the next step I have to blur the depth.

Now I put this blurred texture on a screen filling quad and let the next shader run on it.

Now I have to create the normals at this point. To get the normals I have to calculate the view-space coordinates from the depth and texture coordinates (see slide 18).

 

My problem is the mysterious uvToEye-function.

 

I found some code to recalculate the coordinates here: http://mynameismjp.wordpress.com/2009/03/10/reconstructing-position-from-depth/ but it doesn't work (all I seem to get is (0,0,0) for every point).

 

This is my code: I skipped the blur-step since I figured there is no need to blur the depth before I cant calculate normals from it.

 

My vertex shader for depth and making the billboardquads spherical:

struct VertexIn
{
	float4 pos    	: POSITION;
	float2 texco    : TEXCOORD0;
};
 
struct VertexOut
{
	float4 	pos			: POSITION;
	float2 	texco			: TEXCOORD0;
	float3	eyeSpacePos		: TEXCOORD1;
};
 
struct PixelInput
{	
	float2	texco			: TEXCOORD0;
	float3	eyeSpacePos		: TEXCOORD1;
};

struct PixelOutput
{
	float4	color		: COLOR0;
};

 
VertexOut mainVS(	VertexIn	input,
			uniform float4x4	worldViewProj_m,
			uniform float4x4	worldView_m,
			uniform float		sphereRadius
   	    )
{
	VertexOut output = (VertexOut)0;
	input.pos.y += sphereRadius; //partikel nicht im Boden versinken lassen
	output.pos = mul( worldViewProj_m, input.pos );
	output.texco = input.texco;
	output.eyeSpacePos = mul(worldView_m, input.pos).xyz; 
	return output;
	
}

    PixelOutput mainPS(PixelInput input, 
                   uniform float4x4 projMatrix,
		   uniform float sphereRadius
           )
    {
       PixelOutput output = (PixelOutput)0;
       // calculate eye-space sphere normal from texture coordinates
       float3 N;
       N.xy = input.texco*2.0-1.0;
       float r2 = dot(N.xy, N.xy);
       if (r2 > 1.0) discard;   // kill pixels outside circle
       N.z = sqrt(1.0 - r2);
       
       // calculate depth
       float4 pixelPos = float4(input.eyeSpacePos + N*sphereRadius, 1.0); //approximation of a sphere
       float4 clipSpacePos = mul(projMatrix,pixelPos);  
       float fragDepth = clipSpacePos.z / clipSpacePos.w;
       output.color.r = fragDepth;
       return output;
    }



 

And this is my shader for calculating the normals:

struct VertexIn
{
	float4 pos    	: POSITION;
	float2 texco    : TEXCOORD0;
};
 
struct VertexOut
{
	float4 	pos	: POSITION;
	float2 	texco	: TEXCOORD0;
};
 
struct PixelInput
{	
	float2	texco	: TEXCOORD0;
};

struct PixelOutput
{
	float4	color	: COLOR0;
};

 
VertexOut mainVS(VertexIn input,
		uniform float4x4 worldViewProj_m)
{
	VertexOut output = (VertexOut)0;
	output.pos = mul( worldViewProj_m, input.pos );
	output.texco = input.texco;	
	return output;
}



float3 uvToEye(float2 texCoord, float depth, float4x4 iP_m){
	// Get x/w and y/w from the viewport position
	float x = texCoord.x * 2.0 - 1.0;
	float y = (1 - texCoord.y) * 2.0 - 1.0;
	//float y = texCoord.y * 2.0 - 1.0;
	float4 clipPos = float4(x , y, depth, 1.0f);
	// Transform by the inverse projection matrix
	//float4 viewPos = mul(clipPos, iP_m); 
	float4 viewPos = mul(iP_m , clipPos);
	// Divide by w to get the view-space position
	return viewPos.xyz / viewPos.w;
}


PixelOutput mainPS(PixelInput input,
			   uniform sampler	depthTex: register(s0),
			   uniform float4x4 invProj_m)
{
	PixelOutput output = (PixelOutput)0;	
	

	// read eye-space depth from texture
	float depth = tex2D(depthTex, input.texco).r;
	
	
	// calculate eye-space position from depth
	float texelSize = 0.0001; // correct value?


	
	// calculate differences

	 
	float3 ddx1 = uvToEye(input.texco + float2(texelSize, 0), tex2D(depthTex, input.texco + float2(texelSize, 0)).r, invProj_m) - posEye;
	float3 ddx2 = posEye - uvToEye(input.texco + float2(-texelSize, 0), tex2D(depthTex, input.texco + float2(-texelSize, 0)).r, invProj_m);
	ddx1 = -ddx2 + ddx1;
		
	if (abs(ddx1.z) > abs(ddx2.z)) {
		ddx1 = ddx2;
	}
	
	float3 ddy1 = uvToEye(input.texco + float2(0, texelSize), tex2D(depthTex, input.texco + float2(0, texelSize)).r, invProj_m) - posEye;
	float3 ddy2 = posEye - uvToEye(input.texco + float2(0, -texelSize), tex2D(depthTex, input.texco + float2(0, -texelSize)).r, invProj_m);
	ddy1 = -ddy2 + ddy1;
	
	if (abs(ddy2.z) < abs(ddy1.z)) {
		ddy1 = ddy2;
	}
	


	// calculate normal
	float3 n = cross(ddx1, ddy1);
	n = normalize(n);
	

	//output.color = float4(posEye, 1.0); // for testing, results in a black screen
	
	
	output.color = float4(n,1);
	return output;
}


I hope someone here can tell me what I'm doing wrong and what is the right way to do it.

Regards,

Kilian

 


PARTNERS