[SOLVED] Pixel world position from deph data (HLSL)

Started by
9 comments, last by CppCoder 15 years, 3 months ago
Hi, I'm trying to implement a light pre pass renderer, but something goes wrong with my pixel reconstruction described by fpuig in this thread: http://www.gamedev.net/community/forums/topic.asp?topic_id=506573 First I render the depth to a R32F RT and the normal data to a A8R8G8B8 like this:

// VS
float4x4 matWorldViewProjection;
float4x4 matWorldView;
float4x4 matWorld;

struct VS_INPUT 
{
   float3 Pos		: POSITION0;
   float3 Normal	: NORMAL;
};

struct VS_OUTPUT 
{
   float4 hPos		: POSITION0;
   float4 Position	: TEXCOORD0;
   float3 Normal	: TEXCOORD1;
};


VS_OUTPUT vs_main(VS_INPUT Input)
{
   VS_OUTPUT Output;

   Output.hPos = mul(float4(Input.Pos, 1.0), matWorldViewProjection);
   
   Output.Position = mul(Input.Pos, matWorldView);
   Output.Normal = mul(float4(Input.Normal, 1.0), matWorld);
   
   return Output;
   
}


// PS
struct VS_INPUT
{
   float4 hPos		: POSITION0;
   float4 Position	: TEXCOORD0;
   float3 Normal	: TEXCOORD1;
};

struct VS_OUTPUT
{
   float4 Depth  : COLOR0;
   float4 Normal : COLOR1;
};


VS_OUTPUT ps_main(VS_INPUT IN) : COLOR0
{  
  VS_OUTPUT Output = (VS_OUTPUT)0;
	 
   //return PackDepthNormal(IN.Position.z / IN.Position.w, IN.Normal);
   Output.Depth	 = IN.Position.z;
   Output.Normal = float4((IN.Normal), 1);
   
   return Output;
}



Then I want to light the scene by one point light like this:

// VS
struct VS_INPUT 
{
   float3 Position : POSITION0;
};


struct VS_OUTPUT 
{
   float4 Position    : POSITION;
   float3 LightPos    : TEXCOORD0;
   float4 vPos        : TEXCOORD1;
   float3 vEyeRay     : TEXCOORD2;
};


float4 ConvertToVPos(float4 p)
{
   return float4(0.5 * (float2(p.x + p.w, p.w - p.y) + p.w * InvScreenDim.xy), p.zw);
}


float3 CreateEyeRay(float4 p)
{ 
   float ViewAspect = ScreenDim.x / ScreenDim.y;
   float TanHalfFOV = tan(radians(FOV*0.5));
	
   float3 ViewSpaceRay = float3(p.x * TanHalfFOV * ViewAspect, p.y * TanHalfFOV, p.w);

   return mul(ViewSpaceRay, matWorldViewInverse);
}


VS_OUTPUT vs_main( VS_INPUT Input )
{
   VS_OUTPUT Output = (VS_OUTPUT)0;

   Output.Position = mul(float4(Input.Position, 1.0), matWorldViewProjection);
   Output.LightPos = mul(float4(LightPos0, 1.0), matWorld);
   Output.vPos = ConvertToVPos(Output.Position);
   Output.vEyeRay = CreateEyeRay(Output.Position);

   
   return Output;
}

// PS
struct PS_INPUT
{
   float4 Position    : POSITION;
   float3 LightPos    : TEXCOORD0;
   float4 vPos        : TEXCOORD1;
   float4 vEyeRay     : TEXCOORD2;
};

float4 ps_main(VS_INPUT IN) : COLOR0
{  
   float  Depth  = tex2Dproj(DepthSampler, IN.vPos).r;
   float3 Normal = tex2Dproj(NormalSampler, IN.vPos).rgb;
	
   IN.vEyeRay.xyz /= IN.vEyeRay.z;
	
   // Compute pixel position
   float3 PixelWorldPos = normalize(IN.vEyeRay).xyz * Depth;

   //Normal = normalize(Normal);
	
   // Compute light attenuation and direction 
   float3 LightDir = (IN.LightPos - PixelWorldPos) * InvSqrLightRange;
   float Attenuation = saturate(1.0 - dot(LightDir/LightAttenuation0,
      LightDir / LightAttenuation0));
   LightDir = normalize(LightDir);
	
#ifdef USE_BLINNPHONG
   float3 Half = normalize(LightDir + EyePos);
   float specular = pow(saturate(dot(Normal, Half)), LightSpec0);
#endif

   float NL = saturate(dot(Normal, LightDir));
	
#ifdef USE_PHONG
   float3 R = -reflect(LightDir, Normal);
 	
   float specular = pow(saturate(dot(normalize(R), normalize(EyePos))), LightSpec0);
#endif
	
   float4 Color = LightColor0 * NL * Attenuation;

   return float4(Color.rgb, specular * NL);
}



I thought that all data should now be in world space, and that I can do a normal phong equation with that data. But something is wrong with this piece of code?! Have I misunderstand smth? [Edited by - CppCoder on January 12, 2009 2:08:48 PM]
Advertisement
You're transforming the normals incorrectly, and computing z incorrectly too.

To transform normals, you can't multiply the normal by the full world matrix because this matrix has translations and rotations (I hope it doesnt have scaling, or the thing will get much more complex) but you only rotate normals, you dont translate them. Since translation information is at the 41, 42, 43 components of the matrix and rotation is at the first top left 3x3 block of the matrix, you need to cast it to float3x3 like this

Output.Normal = mul(Input.Normal, (float3x3)matWorld);


You can also set the w coordinate to 0 for vectors and to 1 for points. But when you do the cast to float3x3 you get a m3x3 instruction call which calls 3 dp3 instructions. On the other hand multiplying a float4 by a float4x4 yields a m4x4 instruction which calls 4 dp4 instruction. Then, I suppose its better to cast to float3x3, you may gain some microseconds.

After multiplying a vector/point by a projection matrix, you don't get the vector in projection space (the cube {[-1,1],[-1,1],[0,1]}), you need to do the perspective divide to get the final result. Then, to get the depth or z' you need to do z' = z/w.
.
Thank you for your reply xissburg!

Ok now, I corrected my shader, but I also get wired results. :(

Depth:
// VSfloat4x4 matWorldViewProjection;float4x4 matWorldView;float4x4 matWorld;struct VS_INPUT {   float3 Pos		: POSITION0;   float3 Normal	: NORMAL;};struct VS_OUTPUT {   float4 hPos		: POSITION0;   float4 Position	: TEXCOORD0;   float3 Normal	: TEXCOORD1;};VS_OUTPUT vs_main(VS_INPUT Input){   VS_OUTPUT Output;   Output.hPos = mul(float4(Input.Pos, 1.0), matWorldViewProjection);      Output.Position = mul(float4(Input.Pos, 1.0), matWorldViewProjection);   Output.Normal = mul(Input.Normal, (float3x3)matWorld);      return Output; }// PSfloat FarZ;struct VS_INPUT{   float4 hPos			: POSITION0;   float4 Position	: TEXCOORD0;   float3 Normal		: TEXCOORD1;};struct VS_OUTPUT{   float4 Depth  : COLOR0;   float4 Normal : COLOR1;};VS_OUTPUT ps_main(VS_INPUT IN) : COLOR0{     VS_OUTPUT Output = (VS_OUTPUT)0;	    Output.Depth	 = IN.Position.z / IN.Position.w;   Output.Normal = float4(IN.Normal, 0);      return Output;}


Light Pass:
// VSfloat4x4 matWorldViewProjection;float4x4 matWorldViewInverse;float4x4 matWorldView;float4x4 matWorld;float FOV;float2 InvScreenDim;float2 ScreenDim;float3 ObjPos;float3 ObjSize;float3 LightPos0;struct VS_INPUT {   float3 Position : POSITION0;};struct VS_OUTPUT {   float4 Position    : POSITION;   float3 LightPos    : TEXCOORD0;   float4 vPos        : TEXCOORD1;   float3 vEyeRay     : TEXCOORD2;};float4 ConvertToVPos(float4 p){   return float4(0.5 * (float2(p.x + p.w, p.w - p.y) + p.w * InvScreenDim.xy), p.zw);}float3 CreateEyeRay(float4 p){    float ViewAspect = ScreenDim.x / ScreenDim.y;   float TanHalfFOV = tan(radians(FOV*0.5));	   float3 ViewSpaceRay = float3(p.x * TanHalfFOV * ViewAspect, p.y * TanHalfFOV, p.w);   return mul(ViewSpaceRay, matWorldViewInverse);}VS_OUTPUT vs_main( VS_INPUT Input ){   VS_OUTPUT Output = (VS_OUTPUT)0;   // translate the sphere to prefered position   //Output.Position = mul(float4(Input.Position * ObjSize + ObjPos, 1), matWorldViewProjection);   Output.Position = mul(float4(Input.Position, 1.0), matWorldViewProjection);   Output.LightPos = mul(float4(LightPos0, 1.0), matWorld);   Output.vPos = ConvertToVPos(Output.Position);   Output.vEyeRay = CreateEyeRay(Output.Position);   return Output;}// PS//#define USE_PHONG#define USE_BLINNPHONGsampler2D DepthSampler;sampler2D NormalSampler;float4 EyePos;float4 LightColor0;float LightAttenuation0;float LightSpec0;float InvSqrLightRange;struct VS_INPUT{   float4 Position    : POSITION;   float3 LightPos    : TEXCOORD0;   float4 vPos        : TEXCOORD1;   float4 vEyeRay     : TEXCOORD2;};float4 ps_main(VS_INPUT IN) : COLOR0{     float  Depth	 = tex2Dproj(DepthSampler, IN.vPos).r;   float3 Normal = tex2Dproj(NormalSampler, IN.vPos).rgb;	   IN.vEyeRay.xyz /= IN.vEyeRay.z;	   // Compute pixel position   float3 PixelWorldPos = normalize(IN.vEyeRay.xyz) * Depth;   //Normal = normalize(Normal);	   // Compute light attenuation and direction    float3 LightDir = (IN.LightPos - PixelWorldPos) * InvSqrLightRange;   float Attenuation = saturate(1.0 - dot(LightDir/LightAttenuation0,	LightDir / LightAttenuation0));   LightDir = normalize(LightDir);	#ifdef USE_BLINNPHONG   float3 Half = normalize(EyePos - LightDir);   float specular = pow(saturate(dot(Normal, Half)), LightSpec0);#endif   float NL = saturate(dot(Normal, LightDir));	#ifdef USE_PHONG   float3 R = -reflect(LightDir, Normal); 	   float specular = pow(saturate(dot(normalize(R), normalize(IN.vEyeRay))), LightSpec0);#endif	   float4 Color = LightColor0 * NL * Attenuation;   return float4(Color.rgb, specular * NL);}


Please help me... I think I'm totally confused now... :(
Thanks!
I think I'm now one step forward in the right direction, but something is still wrong with my shader. :(

Free Image Hosting at www.ImageShack.us

Here is my short code:

- Depth/Normal Pass
// Vertex shaderfloat4x4 matWorldViewProjection;float4x4 matWorldView;float4x4 matWorldViewInverseTranspose;struct VS_INPUT {   float3 Position : POSITION0;   float3 Normal   : NORMAL0;   };struct VS_OUTPUT {   float4 hPosition     : POSITION0;   float4 Position      : TEXCOORD0;   float3 Normal     : TEXCOORD1;};VS_OUTPUT vs_main( VS_INPUT Input ){   VS_OUTPUT Output;   Output.hPosition = mul(float4(Input.Position, 1.0), matWorldViewProjection);   Output.Normal =  mul(Input.Normal, matWorldViewInverseTranspose);   Output.Position = mul(float4(Input.Position, 1.0), matWorldView);       return( Output );   }// Pixel Shaderfloat FarZ;struct PS_INPUT {   float4 hPosition   : POSITION0;   float4 Position   : TEXCOORD0;   float3 Normal     : TEXCOORD1;};struct PS_OUTPUT{   float4 Depth : COLOR0;   float4 Normal : COLOR1;};PS_OUTPUT ps_main(PS_INPUT IN){   PS_OUTPUT Output = (PS_OUTPUT)0;      Output.Depth = IN.Position.z;   Output.Normal = float4(normalize(IN.Normal), 0);      return Output;}


- Pass for one point light
// Vertex shaderfloat4x4 matWorldViewProjection;float4x4 matWorldView;float4x4 matWorld;float FOV;float fTime;float2 InvScreenDim;float2 ScreenDim;float3 LightPos;struct VS_INPUT {   float4 Position : POSITION0;   };struct VS_OUTPUT {   float4 Position    : POSITION;   float3 LightPos    : TEXCOORD0;   float4 vPos        : TEXCOORD1;   float3 vEyeRay     : TEXCOORD2;};float4 ConvertToVPos(float4 p){   return float4(0.5 * (float2(p.x + p.w, p.w - p.y) + p.w * InvScreenDim.xy), p.zw);}float3 CreateEyeRay(float4 p){   float ViewAspect = ScreenDim.x / ScreenDim.y;   float TanHalfFOV = tan(radians(FOV * 0.5));   float3 ViewSpaceRay = float3( p.x*TanHalfFOV*ViewAspect, p.y*TanHalfFOV, p.w);   return mul(matWorldView, ViewSpaceRay); // or multiply by the ViewInverse in the normal order}VS_OUTPUT vs_main( VS_INPUT Input ){   VS_OUTPUT Output = (VS_OUTPUT)0;   Output.Position = mul(Input.Position, matWorldViewProjection);      Output.LightPos.x = LightPos.x + 10 * sin(fTime);   Output.LightPos.y = LightPos.y + 10 * cos(fTime);   Output.LightPos.z = LightPos.z;      Output.vPos = ConvertToVPos(Output.Position);   Output.vEyeRay = CreateEyeRay(Output.Position);      return Output;}// Pixel shadersampler2D Depth_RT;sampler2D Normal_RT;float4 EyePos;struct VS_OUTPUT {   float4 Position    : POSITION;   float3 LightPos    : TEXCOORD0;   float4 vPos        : TEXCOORD1;   float3 vEyeRay     : TEXCOORD2;};float4 ps_main(VS_OUTPUT Input) : COLOR0{   float LightIntensity = 1.0;   float SpecularPower = 0.8;   float SpecularIntensity = 1.0;      float3 DiffColor = { 0.75, 0.75, 0.75 };      float Depth = tex2Dproj(Depth_RT, Input.vPos).r;   float3 Normal = tex2Dproj(Normal_RT, Input.vPos).rgb;      //Normal = 2.0f * Normal.xyz - 1.0f;   //Input.vEyeRay.xyz /= Input.vEyeRay.z;      float3 PixelPos = Input.vEyeRay * Depth + EyePos;      float3 vLight = Input.LightPos - PixelPos;   float Attenuation = saturate(1.0f - length(vLight)/100);   vLight = normalize(vLight);      float NL = max(0, dot(Normal, vLight));      float3 Diffuse = NL * DiffColor.rgb;      float3 vReflection = normalize(reflect(-vLight, Normal));      float3 vViewer = normalize(EyePos - PixelPos);      float Specular = SpecularIntensity * pow(saturate(dot(vReflection, vViewer)),      SpecularPower);      return Attenuation * LightIntensity * float4(Diffuse.rgb, Specular);   //return float4(Normal.xyz, 0);}


Please help! I'm really stuck now...
Like xissburg said, you forgot the perspective division for your depth.

Replace

 Output.Depth = IN.Position.z;


With

 Output.Depth = IN.Position.z / IN.Position.w;


And it should work fine.
Also, if you store your normals in a ARGB8 texture, you need to replace

Output.Normal = float4(normalize(IN.Normal), 0);


with

Output.Normal = float4(normalize(IN.Normal) * 0.5f + 0.5f, 0);


Because you need to convert values from [-1..1] to [0..1].

Warning: You will need to reconvert the normals from the texture later from [0..1] to [-1..1]. To achieve this, just do

  float3 Normal = normalize(tex2D(NormalTexture, iTex).rgb * 2.0f - 1.0f);


Where NormalTexture is the result of Output.Normal.
Guys he doesn't need to divide by w, he's storing depth as view-space Z (not post-projection Z). If you have view-space Z, you can obtain view-space or world-space position by multiplying your depth with a ray that goes from the camera position to the far plane of the frustum.

Anyway CppCoder there's 2 problems I see:

1. You want to multiply your ray vector by the View matrix, not your WorldView. What you're doing brings the ray from view-space back to object space, and you want a world-space ray.

2. You need to divide your ray vector by Z in the pixel shader.
Oh, that's true. Forget the perspective division.

Anyway, your normal output code is wrong, so my previous post applies on that one.

Cheers.
First thank you guys for your help!
I've corrected my code now with your posted solutions in mind, but the strange thing is that if I don't comment out this line...
IN.vEyeRay.xyz /= IN.vEyeRay.z;

...I get a black screen?!
Also I noticed that the specular light seems to be wrong, because it's not centered at the light positon?!
I think something is wrong with the vReflection calculation, but what?!

Here is my final result:
Free Image Hosting at www.ImageShack.us

And here an screenshot of all passes:
Free Image Hosting at www.ImageShack.us


My corrected shader code now looks like this:

- Depth/Normal Pass
// Vertex shaderfloat4x4 matWorldViewProjection;float4x4 matWorldView;float4x4 matWorld;struct VS_INPUT {   float3 Pos      : POSITION0;   float3 Normal   : NORMAL;};struct VS_OUTPUT {   float4 hPos         : POSITION0;   float4 Position   : TEXCOORD0;   float3 Normal      : TEXCOORD1;};VS_OUTPUT vs_main(VS_INPUT Input){   VS_OUTPUT Output;   Output.hPos = mul(float4(Input.Pos, 1.0), matWorldViewProjection);      Output.Position = mul(float4(Input.Pos, 1.0), matWorldViewProjection);   Output.Normal = mul(Input.Normal, (float3x3)matWorld);      return Output; }// Pixel shaderfloat FarZ;struct VS_INPUT{   float4 hPos         : POSITION0;   float4 Position   : TEXCOORD0;   float3 Normal      : TEXCOORD1;};struct VS_OUTPUT{   float4 Depth  : COLOR0;   float4 Normal : COLOR1;};VS_OUTPUT ps_main(VS_INPUT IN) : COLOR0{     VS_OUTPUT Output = (VS_OUTPUT)0;       Output.Depth   = IN.Position.z / IN.Position.w;   Output.Normal  = float4(normalize(IN.Normal) * 0.5f + 0.5f, 0);      return Output;}


- One point light pass:
// Vertex shaderfloat4x4 matWorldViewProjection;float4x4 matViewInverse;float4x4 matWorldView;float4x4 matWorld;float FOV;float2 InvScreenDim;float2 ScreenDim;float3 ObjPos;float3 ObjSize;float3 LightPos0;struct VS_INPUT {   float3 Position : POSITION0;};struct VS_OUTPUT {   float4 Position    : POSITION;   float3 LightPos    : TEXCOORD0;   float4 vPos        : TEXCOORD1;   float3 vEyeRay     : TEXCOORD2;};float4 ConvertToVPos(float4 p){   return float4(0.5 * (float2(p.x + p.w, p.w - p.y) + p.w * InvScreenDim.xy), p.zw);}float3 CreateEyeRay(float4 p){    float ViewAspect = ScreenDim.x / ScreenDim.y;   float TanHalfFOV = tan(radians(FOV*0.5));      float3 ViewSpaceRay = float3(p.x * TanHalfFOV * ViewAspect, p.y * TanHalfFOV, p.w);   return mul(ViewSpaceRay, (float3x3)matViewInverse);}VS_OUTPUT vs_main( VS_INPUT Input ){   VS_OUTPUT Output = (VS_OUTPUT)0;   Output.Position = mul(float4(Input.Position, 1.0), matWorldViewProjection);   Output.LightPos = mul(float4(LightPos0, 1.0), matWorld);   Output.vPos = ConvertToVPos(Output.Position);   Output.vEyeRay = CreateEyeRay(Output.Position);   return Output;}// Pixel shadersampler2D DepthSampler;sampler2D NormalSampler;float4 EyePos;float4 LightColor0;float LightAttenuation0;float LightSpec0;float LightRadius0;struct VS_INPUT{   float4 Position    : POSITION;   float3 LightPos    : TEXCOORD0;   float4 vPos        : TEXCOORD1;   float4 vEyeRay     : TEXCOORD2;};float4 ps_main(VS_INPUT IN) : COLOR0{     float LightIntensity = 1.0;   float SpecularPower = 8.0;   float SpecularIntensity = 1.0;      float3 DiffColor = { 0.75, 0.75, 0.75 };      float  Depth   = tex2Dproj(DepthSampler, IN.vPos).r;   float3 Normal  = tex2Dproj(NormalSampler, IN.vPos).rgb;      Normal = normalize(2.0f * Normal - 1.0f);      // only works, when commented out?!   //IN.vEyeRay.xyz /= IN.vEyeRay.z;       // Compute pixel position   float3 PixelWorldPos = (IN.vEyeRay.xyz) * Depth + EyePos;    // Compute light attenuation and direction    float3 vLight = IN.LightPos - PixelWorldPos;   float Attenuation = saturate(1.0f - length(vLight)/LightRadius0);   vLight = normalize(vLight);      float NL = saturate(dot(Normal, vLight));      float3 Diffuse = NL * DiffColor.rgb;      float3 vReflection = normalize(reflect(-vLight, Normal));      float3 vViewer = normalize(EyePos - PixelWorldPos);      float Specular = SpecularIntensity * pow(saturate(dot(vReflection, vViewer)),      SpecularPower);      return Attenuation * LightIntensity * float4(Diffuse.rgb, Specular);}


Again... Thank you all for your great effort!

Cheers
No one knows a correct solution? :)
I´m not sure about this, I´m only reading this post because I´m trying to implement deferred shading myself at this point.

But isn´t z a value between 0.0 and 1.0 and z / w giving you the real depth from near plane to pixel position? You are writing z / w to a texture color channel. Like this:

Output.Depth = IN.Position.z / IN.Position.w;

But those values can only be between 0.0 and 1.0. That´s the only way they can be stored in the texture color channel.

I´m not sure about this becuase I´m still trying to learn for myself what the IN.Position.z value realy means.

Peter

This topic is closed to new replies.

Advertisement