Calculating viewspace coordinates from depth and texture coordinates

Started by
10 comments, last by Helo7777 11 years, 1 month ago

Hello,

I'm trying to implement Nvidias Screen Space Fluid Rendering in Ogre using Direct3D9 as my rendering system. (http://developer.download.nvidia.com/presentations/2010/gdc/Direct3D_Effects.pdf for slides and https://www.google.de/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0CDoQFjAA&url=http%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fdownload%3Fdoi%3D10.1.1.157.909%26rep%3Drep1%26type%3Dpdf&ei=dlrsUJX6Jq364QSdxIHgAg&usg=AFQjCNEXlV2bFFL0uSrDofS65HAHbtpUHg&sig2=02fSfNNvdlQEO6Pjq2LNQA&bvm=bv.1357316858,d.Yms for paper).

I got my billboard quad particles and using a shader they look like spheres. I save the depth from those spheres in a texture.

At the next step I have to blur the depth.

Now I put this blurred texture on a screen filling quad and let the next shader run on it.

Now I have to create the normals at this point. To get the normals I have to calculate the view-space coordinates from the depth and texture coordinates (see slide 18).

My problem is the mysterious uvToEye-function.

I found some code to recalculate the coordinates here: http://mynameismjp.wordpress.com/2009/03/10/reconstructing-position-from-depth/ but it doesn't work (all I seem to get is (0,0,0) for every point).

This is my code: I skipped the blur-step since I figured there is no need to blur the depth before I cant calculate normals from it.

My vertex shader for depth and making the billboardquads spherical:


struct VertexIn
{
	float4 pos    	: POSITION;
	float2 texco    : TEXCOORD0;
};
 
struct VertexOut
{
	float4 	pos			: POSITION;
	float2 	texco			: TEXCOORD0;
	float3	eyeSpacePos		: TEXCOORD1;
};
 
struct PixelInput
{	
	float2	texco			: TEXCOORD0;
	float3	eyeSpacePos		: TEXCOORD1;
};

struct PixelOutput
{
	float4	color		: COLOR0;
};

 
VertexOut mainVS(	VertexIn	input,
			uniform float4x4	worldViewProj_m,
			uniform float4x4	worldView_m,
			uniform float		sphereRadius
   	    )
{
	VertexOut output = (VertexOut)0;
	input.pos.y += sphereRadius; //partikel nicht im Boden versinken lassen
	output.pos = mul( worldViewProj_m, input.pos );
	output.texco = input.texco;
	output.eyeSpacePos = mul(worldView_m, input.pos).xyz; 
	return output;
	
}

    PixelOutput mainPS(PixelInput input, 
                   uniform float4x4 projMatrix,
		   uniform float sphereRadius
           )
    {
       PixelOutput output = (PixelOutput)0;
       // calculate eye-space sphere normal from texture coordinates
       float3 N;
       N.xy = input.texco*2.0-1.0;
       float r2 = dot(N.xy, N.xy);
       if (r2 > 1.0) discard;   // kill pixels outside circle
       N.z = sqrt(1.0 - r2);
       
       // calculate depth
       float4 pixelPos = float4(input.eyeSpacePos + N*sphereRadius, 1.0); //approximation of a sphere
       float4 clipSpacePos = mul(projMatrix,pixelPos);  
       float fragDepth = clipSpacePos.z / clipSpacePos.w;
       output.color.r = fragDepth;
       return output;
    }



And this is my shader for calculating the normals:


struct VertexIn
{
	float4 pos    	: POSITION;
	float2 texco    : TEXCOORD0;
};
 
struct VertexOut
{
	float4 	pos	: POSITION;
	float2 	texco	: TEXCOORD0;
};
 
struct PixelInput
{	
	float2	texco	: TEXCOORD0;
};

struct PixelOutput
{
	float4	color	: COLOR0;
};

 
VertexOut mainVS(VertexIn input,
		uniform float4x4 worldViewProj_m)
{
	VertexOut output = (VertexOut)0;
	output.pos = mul( worldViewProj_m, input.pos );
	output.texco = input.texco;	
	return output;
}



float3 uvToEye(float2 texCoord, float depth, float4x4 iP_m){
	// Get x/w and y/w from the viewport position
	float x = texCoord.x * 2.0 - 1.0;
	float y = (1 - texCoord.y) * 2.0 - 1.0;
	//float y = texCoord.y * 2.0 - 1.0;
	float4 clipPos = float4(x , y, depth, 1.0f);
	// Transform by the inverse projection matrix
	//float4 viewPos = mul(clipPos, iP_m); 
	float4 viewPos = mul(iP_m , clipPos);
	// Divide by w to get the view-space position
	return viewPos.xyz / viewPos.w;
}


PixelOutput mainPS(PixelInput input,
			   uniform sampler	depthTex: register(s0),
			   uniform float4x4 invProj_m)
{
	PixelOutput output = (PixelOutput)0;	
	

	// read eye-space depth from texture
	float depth = tex2D(depthTex, input.texco).r;
	
	
	// calculate eye-space position from depth
	float texelSize = 0.0001; // correct value?


	
	// calculate differences

	 
	float3 ddx1 = uvToEye(input.texco + float2(texelSize, 0), tex2D(depthTex, input.texco + float2(texelSize, 0)).r, invProj_m) - posEye;
	float3 ddx2 = posEye - uvToEye(input.texco + float2(-texelSize, 0), tex2D(depthTex, input.texco + float2(-texelSize, 0)).r, invProj_m);
	ddx1 = -ddx2 + ddx1;
		
	if (abs(ddx1.z) > abs(ddx2.z)) {
		ddx1 = ddx2;
	}
	
	float3 ddy1 = uvToEye(input.texco + float2(0, texelSize), tex2D(depthTex, input.texco + float2(0, texelSize)).r, invProj_m) - posEye;
	float3 ddy2 = posEye - uvToEye(input.texco + float2(0, -texelSize), tex2D(depthTex, input.texco + float2(0, -texelSize)).r, invProj_m);
	ddy1 = -ddy2 + ddy1;
	
	if (abs(ddy2.z) < abs(ddy1.z)) {
		ddy1 = ddy2;
	}
	


	// calculate normal
	float3 n = cross(ddx1, ddy1);
	n = normalize(n);
	

	//output.color = float4(posEye, 1.0); // for testing, results in a black screen
	
	
	output.color = float4(n,1);
	return output;
}


I hope someone here can tell me what I'm doing wrong and what is the right way to do it.

Regards,

Kilian

Advertisement

I've used some reconstruction code from MJP's website too. Specifically from this page. The relevant code from there is as follows:


//In the vertex shader:

void VSBoundingVolume(  in float3 in_vPositionOS       : POSITION,
                        out float4 out_vPositionCS     : POSITION,
                        out float3 out_vPositionVS    : TEXCOORD0 )
{
    out_vPositionCS = mul(in_vPositionOS, g_matWorldViewProj);    

    // Pass along the view-space vertex position to the pixel shader
    out_vPositionVS = mul(in_vPositionOS, g_matWorldView);
}

//Then in our pixel shader, we calculate the ray and reconstruct position like this:

float3 VSPositionFromDepth(float2 vTexCoord, float3 vPositionVS)
{
    // Calculate the frustum ray using the view-space position.
    // g_fFarCip is the distance to the camera's far clipping plane.
    // Negating the Z component only necessary for right-handed coordinates
    float3 vFrustumRayVS = vPositionVS.xyz * (g_fFarClip/-vPositionVS.z);
    return tex2D(DepthSampler, vTexCoord).x * vFrustumRayVS;
}

Also,

float texelSize = 0.0001; // correct value?

I think instead you want:


float texelSize = 1.0 / textureSize; //eg: 1.0 / 1024.0

I think maybe our over thinking the problem though :)

So you want the normals of the billboard to always face the light I'm assuming? So you could do something like the following in the vertex shader:


//note : swap mul order depending on your matrices
float3 normal = mul(viewMatrix, lightWorldPosition - billboardedVertexPos);
normal = normalize(normal);

Thanks for your advice. I will definitly look into it.

Regards,

Kilian

Edit:

You recommended:

float texelSize = 1.0 / textureSize; //eg: 1.0 / 1024.0

What should I do if my texture is not square?

Sorry, in the above code I should have written the following since you'll be in view space anyway:


float3 normal = lightPosInViewSpace - billboardedVertexPosInViewSpace;
normal = normalize(normal);

What should I do if my texture is not square?

Then you would write:


float2 texelSize = float2(1.0 / texSizeX, 1.0 / texSizeY); 

Hey, I just came a little bit further using the code in my first post. It turned out I gave a wrong inverted projection matrix. I fixed that, I guess. At least my screen isn't black anymore :-D This is what it looks like:

http://img221.imageshack.us/img221/8892/closeupr.png
http://img833.imageshack.us/img833/7654/wrongnormals.png

My guess is there is something wrong with my depth texture. When I view it, all the spheres are just white except when I come really close. That would explain why my normals are black inside the spheres. There is no difference in the depth, so my derivatives will be zero and so will be the calculated normal.

I don't think that I want to let the normals face the light. I just want to have the normals on my spheres so I can use them and their position to shade the surface.

Regards,

Kilian

double post, can't find a button to delete it..

I don't think that I want to let the normals face the light. I just want to have the normals on my spheres so I can use them and their position to shade the surface.

Ah, sorry I misunderstood

Ok just looking over your code compared to that in the slides... Why are you doing the following, I don't see it in the example code from the slides?

ddx1 = -ddx2 + ddx1;

And in regards to the following and what you asked earlier:

+ float2(texelSize, 0)

What this code does is sample neighbouring texels. So if you have a non-square texture then replace float2(texelSize, 0) with float2(1.0 / texSizeX, 0) and replace float2(0, texelSize) with float2(0, 1.0 / texSizeY).

I'd also check your depth format. Check you're using a 32bit floating point target as 16bit may not be enough.

I also assume you are calculating posEye as follows (cause it's not defined in your original post)


float3 posEye = uvToEye(texCoord, depth);

Ok just looking over your code compared to that in the slides... Why are you doing the following, I don't see it in the example code from the slides?

ddx1 = -ddx2 + ddx1;

I tried my own approach of a derivative to see if it changes anything. I now put it back to the original code from the slides

And in regards to the following and what you asked earlier:

+ float2(texelSize, 0)

What this code does is sample neighbouring texels. So if you have a non-square texture then replace float2(texelSize, 0) with float2(1.0 / texSizeX, 0) and replace float2(0, texelSize) with float2(0, 1.0 / texSizeY).

Allright, I figured that out but even if I'm using not the correct texelSize i figure it's not the cause of my problem. But I will fix this now.

I'd also check your depth format. Check you're using a 32bit floating point target as 16bit may not be enough.

It's a 32-bit target (called Ogre::PF_FLOAT32_R)

I also assume you are calculating posEye as follows (cause it's not defined in your original post)


float3 posEye = uvToEye(texCoord, depth);

Yeah, it's float3 posEye = uvToEye(input.texco, depth, invProj_m);

Dont know why it's not in my original post, It's definitively in my program blink.png

This topic is closed to new replies.

Advertisement