Normal mapping problems

Started by
19 comments, last by MJP 15 years, 8 months ago
Can anyone see anything obviously wrong with the following code? I am having some trouble with the point light's position being incorrect in relation to the object. I am generating the the tangent frame fro the mesh via D3DXComputeTangentFrame(pMesh,NULL). Is there a problem there? Any insight would be greatly appreciated.

uniform extern float4x4 gWorld : WORLD       <string Source = "Scene";>;
uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;

uniform extern float3 gLightPos : LIGHTPOS0    <string Source = "Scene";>;

uniform extern float4 gDiffuseMtrl : DIFFUSE   <string Source = "Material";>;
uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>; 

sampler NormalMapS = sampler_state
{
	Texture = <gNormalMap>;
	MinFilter = ANISOTROPIC;
	MaxAnisotropy = 8;
	MagFilter = LINEAR;
	MipFilter = LINEAR;
	AddressU  = WRAP;
        AddressV  = WRAP;
};

struct InputVS
{
	float3 posL : POSITION;
	float3 tangent : TANGENT;
	float3 binormal : BINORMAL;
	float3 normalL : NORMAL;
	float2 tex0 : TEXCOORD;
};

struct OutputVS
{
    float4 posH      : POSITION0;
    float3 posW      : TEXCOORD0;
    float3 lightT    : TEXCOORD1;
    float2 tex0      : TEXCOORD2;
};

OutputVS PL_PPL_N_VS(InputVS inVS)
{
	OutputVS outVS = (OutputVS)0;
	
	float3 normalW = mul(inVS.normalL,(float3x3)gWorld);
	float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);
	float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);
	
	outVS.posH   = mul(float4(inVS.posL, 1.0f), gWVP);
	outVS.posW   = mul(inVS.posL,(float3x3)gWorld);
	outVS.lightT = mul(gLightPos,float3x3(tangentW,binormalW,normalW));
	outVS.tex0   = inVS.tex0;	
	
    return outVS;
}


float4 PL_PPL_N_PS(float3 posW      : TEXCOORD0,
                    float3 lightT    : TEXCOORD1,
		    float2 tex0      : TEXCOORD2) : COLOR
{
	float3 normalT = tex2D(NormalMapS, tex0);
	
        normalT = 2.0f*normalT - 1.0f;
    
	normalT = normalize(normalT);
	
	float3 lightDirT = normalize(posW - lightT);

	float s = max(dot(-lightDirT, normalT), 0.0f);
	
	float3 color = s*gDiffuseMtrl.rgb;
	
	return float4(color, 1.0);
	
	//return float4(1.0, 1.0, 1.0, 1.0);
}   


technique PL_PPL_N_Tech
{
    pass P0
    {		
        Sampler[0] = (NormalMapS);		

        VertexShader = compile vs_2_0 PL_PPL_N_VS();
        PixelShader  = compile ps_2_0 PL_PPL_N_PS();
    }
}

Advertisement
Yes, you have a few problems here. First off...you're not converting your vertex position to world space. By casting your world matrix to a 3x3, you're only rotating the position and not translating it. Second, in your pixel shader you're calculating your light direction by subtracting a the light position in tangent space from the pixel position in sort-of world space. This doesn't work, they both need to be in the same coordinate space. Third, you don't need to calculate the light direction in the pixel shader. You can calculate it in the vertex shader and interpolate it.

So let's rework your shader here to make some changes...

uniform extern float4x4 gWorld : WORLD       <string Source = "Scene";>;uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;uniform extern float3 gLightPos : LIGHTPOS0    <string Source = "Scene";>;uniform extern float4 gDiffuseMtrl : DIFFUSE   <string Source = "Material";>;uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>; sampler NormalMapS = sampler_state{	Texture = <gNormalMap>;	MinFilter = ANISOTROPIC;	MaxAnisotropy = 8;	MagFilter = LINEAR;	MipFilter = LINEAR;	AddressU  = WRAP;        AddressV  = WRAP;};struct InputVS{	float3 posL : POSITION;	float3 tangent : TANGENT;	float3 binormal : BINORMAL;	float3 normalL : NORMAL;	float2 tex0 : TEXCOORD;};struct OutputVS{    float4 posH      : POSITION0;    float3 lightDir  : TEXCOORD0;    float2 tex0      : TEXCOORD1;};OutputVS PL_PPL_N_VS(InputVS inVS){	OutputVS outVS = (OutputVS)0;		float3 normalW = mul(inVS.normalL,(float3x3)gWorld);	float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);	float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);		outVS.posH   = mul(float4(inVS.posL, 1.0f), gWVP);	float3 posW   = mul(float4(inVS.posL, 1.0f), gWorld).xyz;	outVS.lightDirT = mul(posW - gLightPos,float3x3(tangentW,binormalW,normalW));	outVS.tex0   = inVS.tex0;		    return outVS;}float4 PL_PPL_N_PS(float3 lightDirT    : TEXCOORD0,		           float2 tex0      : TEXCOORD1) : COLOR{	float3 normalT = tex2D(NormalMapS, tex0);	        normalT = 2.0f*normalT - 1.0f;    	normalT = normalize(normalT);		float s = max(dot(-normalize(lightDirT), normalT), 0.0f);		float3 color = s*gDiffuseMtrl.rgb;		return float4(color, 1.0);}   technique PL_PPL_N_Tech{    pass P0    {		        Sampler[0] = (NormalMapS);		        VertexShader = compile vs_2_0 PL_PPL_N_VS();        PixelShader  = compile ps_2_0 PL_PPL_N_PS();    }}
So I have change the code to exactly this:
And I am getting different relative light positioning problems.

uniform extern float4x4 gWorld : WORLD       <string Source = "Scene";>;uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;uniform extern float3 gLightPos : LIGHTPOS0    <string Source = "Scene";>;uniform extern float4 gDiffuseMtrl : DIFFUSE   <string Source = "Material";>;uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>; sampler NormalMapS = sampler_state{	Texture = <gNormalMap>;	MinFilter = ANISOTROPIC;	MaxAnisotropy = 8;	MagFilter = LINEAR;	MipFilter = LINEAR;	AddressU  = WRAP;    AddressV  = WRAP;};struct InputVS{	float3 posL : POSITION;	float3 tangent : TANGENT;	float3 binormal : BINORMAL;	float3 normalL : NORMAL;	float2 tex0 : TEXCOORD;};struct OutputVS{    float4 posH      : POSITION0;    float3 lightDirT : TEXCOORD0;    float2 tex0      : TEXCOORD1;};OutputVS PL_PPL_N_VS(InputVS inVS){	OutputVS outVS = (OutputVS)0;		float3 normalW = mul(inVS.normalL,(float3x3)gWorld);	float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);	float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);		outVS.posH      = mul(float4(inVS.posL, 1.0f), gWVP);	float3 posW     = mul(float4(inVS.posL, 1.0f), gWorld).xyz;	outVS.lightDirT = mul(posW - gLightPos,float3x3(tangentW,binormalW,normalW));	outVS.tex0      = inVS.tex0;		    return outVS;}float4 PL_PPL_N_PS(float3 lightDirT : TEXCOORD0,		           float2 tex0      : TEXCOORD1) : COLOR{	float3 normalT = tex2D(NormalMapS, tex0);	    normalT = 2.0f*normalT - 1.0f;    	normalT = normalize(normalT);	float s = max(dot(-lightDirT, normalT), 0.0f);		float3 color = s*gDiffuseMtrl.rgb;		return float4(color, 1.0);		//return float4(1.0, 1.0, 1.0, 1.0);}   technique PL_PPL_N_Tech{    pass P0    {		        Sampler[0] = (NormalMapS);		        VertexShader = compile vs_2_0 PL_PPL_N_VS();        PixelShader  = compile ps_2_0 PL_PPL_N_PS();    }}[\source]
Another problem I missed...you to change this
outVS.lightDirT = mul(posW - gLightPos,float3x3(tangentW,binormalW,normalW));

to this:
outVS.lightDirT = mul(float3x3(tangentW,binormalW,normalW), posW - gLightPos);

By the way this is how I am loading and rendering the effect.

Loading:
    m_pEffect = m_pResManager->GetXEffect(_T("PL_PPL_N.fx"));    m_hTech = m_pEffect->GetInterface()->GetTechniqueByName("PL_PPL_N_Tech");    m_hWorld = m_pEffect->GetInterface()->GetParameterByName(0, "gWorld");    m_hWorldViewProj = m_pEffect->GetInterface()->GetParameterByName(0, "gWVP");    m_hLightPos = m_pEffect->GetInterface()->GetParameterByName(0, "gLightPos");    m_hDiffuseMtrl = m_pEffect->GetInterface()->GetParameterByName(0, "gDiffuseMtrl");     m_hNormalMap = m_pEffect->GetInterface()->GetParameterByName(0, "gNormalMap");    m_pNormalMap = m_pResManager->GetTexture(_T(".\\data\\relief_tile1.tga"));    m_pTestMesh = m_pResManager->GetXMesh(CXMesh::formatResStr(_T(".\\data\\bump.x"),                                                                1.0,FALSE,                                                                GEN_BINORMAL|GEN_TANGENT).c_str());


Rendering:
    CCameraNode3D* pCam = m_pSceneManager->GetCurCamera();    MATRIX matView = pCam->GetViewMatrix();    MATRIX matProj = pCam->GetProjMatrix();    MATRIX matWorld = m_WorldObjectTransform;    VEC3 eyePos = pCam->GetEyePt();    CLightNode3D* pLight = m_pSceneManager->GetLight(0);    D3DLIGHT9* pLightInter = pLight->GetInterface();    MATRIX wvp;    wvp = matWorld * matView * matProj;    m_pEffect->GetInterface()->SetTechnique(m_hTech);    m_pEffect->GetInterface()->SetMatrix(m_hWorld,&matWorld);    m_pEffect->GetInterface()->SetMatrix(m_hWorldViewProj,&wvp);    m_pEffect->GetInterface()->SetValue(m_hLightPos,&(pLightInter->Position),sizeof(D3DXVECTOR3));        m_pEffect->GetInterface()->SetValue(m_hDiffuseMtrl,&VEC4(1.0,1.0,1.0,1.0),sizeof(D3DXVECTOR4));    m_pEffect->GetInterface()->SetTexture(m_hNormalMap,m_pNormalMap->GetInterface());    UINT numPasses = 0;    m_pEffect->GetInterface()->Begin(&numPasses, 0);    m_pEffect->GetInterface()->BeginPass(0);    m_pTestMesh->GetDeviceMesh()->DrawSubset(0);       m_pEffect->GetInterface()->EndPass();    m_pEffect->GetInterface()->End();


The light still seems to be coming 90 degrees off centered. I also added a normalization to the lightDirT in the pixel shader.

Here is the latest code:

uniform extern float4x4 gWorld : WORLD       <string Source = "Scene";>;uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;uniform extern float3 gLightPos : LIGHTPOS0    <string Source = "Scene";>;uniform extern float4 gDiffuseMtrl : DIFFUSE   <string Source = "Material";>;uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>; sampler NormalMapS = sampler_state{	Texture = <gNormalMap>;	MinFilter = ANISOTROPIC;	MaxAnisotropy = 8;	MagFilter = LINEAR;	MipFilter = LINEAR;	AddressU  = WRAP;    AddressV  = WRAP;};struct InputVS{	float3 posL : POSITION;	float3 tangent : TANGENT;	float3 binormal : BINORMAL;	float3 normalL : NORMAL;	float2 tex0 : TEXCOORD;};struct OutputVS{    float4 posH      : POSITION0;    float3 lightDirT : TEXCOORD0;    float2 tex0      : TEXCOORD1;};OutputVS PL_PPL_N_VS(InputVS inVS){	OutputVS outVS = (OutputVS)0;		float3 normalW = mul(inVS.normalL,(float3x3)gWorld);	float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);	float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);		outVS.posH      = mul(float4(inVS.posL, 1.0f), gWVP);	float3 posW     = mul(float4(inVS.posL, 1.0f), gWorld).xyz;	outVS.lightDirT = mul(float3x3(tangentW,binormalW,normalW), posW - gLightPos);	outVS.tex0      = inVS.tex0;		    return outVS;}float4 PL_PPL_N_PS(float3 lightDirT : TEXCOORD0,		           float2 tex0      : TEXCOORD1) : COLOR{	float3 normalT = tex2D(NormalMapS, tex0);	    normalT = 2.0f*normalT - 1.0f;    	normalT = normalize(normalT);	lightDirT = normalize(lightDirT);	float s = max(dot(-lightDirT, normalT), 0.0f);		float3 color = s*gDiffuseMtrl.rgb;		return float4(color, 1.0);		//return float4(1.0, 1.0, 1.0, 1.0);}   technique PL_PPL_N_Tech{    pass P0    {		        Sampler[0] = (NormalMapS);		        VertexShader = compile vs_2_0 PL_PPL_N_VS();        PixelShader  = compile ps_2_0 PL_PPL_N_PS();    }}
Could there be something possibly wrong with the tangent space generation that could be causing the problem? The light still seem to be coming from the wrong direction.

Does anyone have experience with HLSL debuggers? I have been trying PIX but it doesnt seem to let me choose what variable to watch (e.g. the vs inputs). Is there anyway to use VS2005 to debug into the shader code anymore?
I am trying to find a working sample of diffuse lit normal mapping, and have tried the MS parallax occlusion sample (which has a pure normal mapping mode). I have noticed that in this example dependent on what angle you set the object at you will get incorrect lighting direction. Has anyone else noticed this in the sample? Does anyone have a sample of normal mapping that is valid for all light directions and model orientations?

Here is an example of what I am talking about in the ms sample (the light is coming from the right but the object is lit as if its coming from the left):

http://img179.imageshack.us/my.php?image=msbumpprob2xa9.jpg

and here is the code they use for the normal mapping:
struct VS_OUTPUT{    float4 position          : POSITION;    float2 texCoord          : TEXCOORD0;    float3 vLightTS          : TEXCOORD1;   // light vector in tangent space, denormalized    float3 vViewTS           : TEXCOORD2;   // view vector in tangent space, denormalized    float2 vParallaxOffsetTS : TEXCOORD3;   // Parallax offset vector in tangent space    float3 vNormalWS         : TEXCOORD4;   // Normal vector in world space    float3 vViewWS           : TEXCOORD5;   // View vector in world space    }; VS_OUTPUT RenderSceneVS( float4 inPositionOS  : POSITION,                          float2 inTexCoord    : TEXCOORD0,                         float3 vInNormalOS   : NORMAL,                         float3 vInBinormalOS : BINORMAL,                         float3 vInTangentOS  : TANGENT ){    VS_OUTPUT Out;            // Transform and output input position     Out.position = mul( inPositionOS, g_mWorldViewProjection );           // Propagate texture coordinate through:    Out.texCoord = inTexCoord * g_fBaseTextureRepeat;    // Transform the normal, tangent and binormal vectors from object space to homogeneous projection space:    float3 vNormalWS   = mul( vInNormalOS,   (float3x3) g_mWorld );    float3 vTangentWS  = mul( vInTangentOS,  (float3x3) g_mWorld );    float3 vBinormalWS = mul( vInBinormalOS, (float3x3) g_mWorld );        // Propagate the world space vertex normal through:       Out.vNormalWS = vNormalWS;        vNormalWS   = normalize( vNormalWS );    vTangentWS  = normalize( vTangentWS );    vBinormalWS = normalize( vBinormalWS );        // Compute position in world space:    float4 vPositionWS = mul( inPositionOS, g_mWorld );                     // Compute and output the world view vector (unnormalized):    float3 vViewWS = g_vEye - vPositionWS;    Out.vViewWS = vViewWS;    // Compute denormalized light vector in world space:    float3 vLightWS = g_LightDir;           // Normalize the light and view vectors and transform it to the tangent space:    float3x3 mWorldToTangent = float3x3( vTangentWS, vBinormalWS, vNormalWS );           // Propagate the view and the light vectors (in tangent space):    Out.vLightTS = mul( vLightWS, mWorldToTangent );    Out.vViewTS  = mul( mWorldToTangent, vViewWS  );           // Compute the ray direction for intersecting the height field profile with     // current view ray. See the above paper for derivation of this computation.             // Compute initial parallax displacement direction:    float2 vParallaxDirection = normalize(  Out.vViewTS.xy );           // The length of this vector determines the furthest amount of displacement:    float fLength         = length( Out.vViewTS );    float fParallaxLength = sqrt( fLength * fLength - Out.vViewTS.z * Out.vViewTS.z ) / Out.vViewTS.z;            // Compute the actual reverse parallax displacement vector:    Out.vParallaxOffsetTS = vParallaxDirection * fParallaxLength;           // Need to scale the amount of displacement to account for different height ranges    // in height maps. This is controlled by an artist-editable parameter:    Out.vParallaxOffsetTS *= g_fHeightMapScale;   return Out;}   float4 ComputeIllumination( float2 texCoord, float3 vLightTS, float3 vViewTS, float fOcclusionShadow ){   // Sample the normal from the normal map for the given texture sample:   float3 vNormalTS = normalize( tex2D( tNormalHeightMap, texCoord ) * 2 - 1 );      // Sample base map:   float4 cBaseColor = tex2D( tBase, texCoord );      // Compute diffuse color component:   float3 vLightTSAdj = float3( vLightTS.x, -vLightTS.y, vLightTS.z );      float4 cDiffuse = saturate( dot( vNormalTS, vLightTSAdj )) * g_materialDiffuseColor;      // Compute the specular component if desired:     float4 cSpecular = 0;   if ( g_bAddSpecular )   {      float3 vReflectionTS = normalize( 2 * dot( vViewTS, vNormalTS ) * vNormalTS - vViewTS );                 float fRdotL = saturate( dot( vReflectionTS, vLightTSAdj ));      cSpecular = saturate( pow( fRdotL, g_fSpecularExponent )) * g_materialSpecularColor;   }      // Composite the final color:   float4 cFinalColor = (( g_materialAmbientColor + cDiffuse ) * cBaseColor + cSpecular ) * fOcclusionShadow;       return cFinalColor;  }   float4 RenderSceneBumpMapPS( PS_INPUT i ) : COLOR0{    //  Normalize the interpolated vectors:   float3 vViewTS   = normalize( i.vViewTS  );   float3 vLightTS  = normalize( i.vLightTS );        float4 cResultColor = float4( 0, 0, 0, 1 );   // Start the current sample located at the input texture coordinate, which would correspond   // to computing a bump mapping result:   float2 texSample = i.texCoord;   // Compute resulting color for the pixel:   cResultColor = ComputeIllumination( texSample, vLightTS, vViewTS, 1.0f );                 // If using HDR rendering, make sure to tonemap the resuld color prior to outputting it.   // But since this example isn't doing that, we just output the computed result color here:   return cResultColor;}   

Not sure why it's still not coming out right, at this point everything looks okay to me.

I don't know of any ready-to-build normal mapping samples for native d3d9...there's a normal mapping sample on the XNA Creator's Club website you could look at, if you want. It uses XNA of course but the HLSL is all the same.

I can post the normal mapping shader I use if you'd like. However it's a bit complicated since it's used with different instancing techniques, and it's part of a framework used by several different effects and techniques.

As far as PIX debugging, you don't even need to debug the shader to see the VS inputs and outputs. They'll be right there when you view the draw call, in the table. If you're going to be debugging shaders, make sure the shader/effect is compiled with the D3DXSHADER_DEBUG flag. You'll also probably want to compile without optimizations, so that you can step through the HLSL in the order of your code.
I am using PIX, I go into the debug pixel option, and am presented with a choice to debug the vertex (1 of 3) or the pixel shader. I go into one of the vertex shaders, but i dont see anything other than a stepable disassembly, how do i know where say the tangent, coming from the app, is located?

Do I need to compile the fx with the debug flag to even get that?

    vs_2_0    def c8, 1, 0, 0, 0    dcl_position v0    dcl_texcoord v1    mad r0, v0.xyzx, c8.xxxy, c8.yyyx    dp4 oPos.x, r0, c0    dp4 oPos.y, r0, c1    dp4 oPos.z, r0, c2    dp4 oPos.w, r0, c3    dp4 r1.x, r0, c4    dp4 r1.y, r0, c5    dp4 r1.z, r0, c6    add r0.xyz, r1, -c7    mov r1.x, c4.x    mov r1.y, c5.x    mov r1.z, c6.x    dp3 oT0.x, r1, r0    mov r1.x, c4.z    mov r1.y, c5.z    mov r1.z, c6.z    dp3 oT0.y, r1, r0    mov r1.x, c4.y    mov r1.y, c5.y    mov r1.z, c6.y    dp3 oT0.z, r1, r0    mov oT1.xy, v1

This topic is closed to new replies.

Advertisement