Sign in to follow this  
Ender1618

Normal mapping problems

Recommended Posts

Can anyone see anything obviously wrong with the following code? I am having some trouble with the point light's position being incorrect in relation to the object. I am generating the the tangent frame fro the mesh via D3DXComputeTangentFrame(pMesh,NULL). Is there a problem there? Any insight would be greatly appreciated.
uniform extern float4x4 gWorld : WORLD       <string Source = "Scene";>;
uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;

uniform extern float3 gLightPos : LIGHTPOS0    <string Source = "Scene";>;

uniform extern float4 gDiffuseMtrl : DIFFUSE   <string Source = "Material";>;
uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>; 

sampler NormalMapS = sampler_state
{
	Texture = <gNormalMap>;
	MinFilter = ANISOTROPIC;
	MaxAnisotropy = 8;
	MagFilter = LINEAR;
	MipFilter = LINEAR;
	AddressU  = WRAP;
        AddressV  = WRAP;
};

struct InputVS
{
	float3 posL : POSITION;
	float3 tangent : TANGENT;
	float3 binormal : BINORMAL;
	float3 normalL : NORMAL;
	float2 tex0 : TEXCOORD;
};

struct OutputVS
{
    float4 posH      : POSITION0;
    float3 posW      : TEXCOORD0;
    float3 lightT    : TEXCOORD1;
    float2 tex0      : TEXCOORD2;
};

OutputVS PL_PPL_N_VS(InputVS inVS)
{
	OutputVS outVS = (OutputVS)0;
	
	float3 normalW = mul(inVS.normalL,(float3x3)gWorld);
	float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);
	float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);
	
	outVS.posH   = mul(float4(inVS.posL, 1.0f), gWVP);
	outVS.posW   = mul(inVS.posL,(float3x3)gWorld);
	outVS.lightT = mul(gLightPos,float3x3(tangentW,binormalW,normalW));
	outVS.tex0   = inVS.tex0;	
	
    return outVS;
}


float4 PL_PPL_N_PS(float3 posW      : TEXCOORD0,
                    float3 lightT    : TEXCOORD1,
		    float2 tex0      : TEXCOORD2) : COLOR
{
	float3 normalT = tex2D(NormalMapS, tex0);
	
        normalT = 2.0f*normalT - 1.0f;
    
	normalT = normalize(normalT);
	
	float3 lightDirT = normalize(posW - lightT);

	float s = max(dot(-lightDirT, normalT), 0.0f);
	
	float3 color = s*gDiffuseMtrl.rgb;
	
	return float4(color, 1.0);
	
	//return float4(1.0, 1.0, 1.0, 1.0);
}   


technique PL_PPL_N_Tech
{
    pass P0
    {		
        Sampler[0] = (NormalMapS);		

        VertexShader = compile vs_2_0 PL_PPL_N_VS();
        PixelShader  = compile ps_2_0 PL_PPL_N_PS();
    }
}

Share this post


Link to post
Share on other sites
Yes, you have a few problems here. First off...you're not converting your vertex position to world space. By casting your world matrix to a 3x3, you're only rotating the position and not translating it. Second, in your pixel shader you're calculating your light direction by subtracting a the light position in tangent space from the pixel position in sort-of world space. This doesn't work, they both need to be in the same coordinate space. Third, you don't need to calculate the light direction in the pixel shader. You can calculate it in the vertex shader and interpolate it.

So let's rework your shader here to make some changes...


uniform extern float4x4 gWorld : WORLD <string Source = "Scene";>;
uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;
uniform extern float3 gLightPos : LIGHTPOS0 <string Source = "Scene";>;
uniform extern float4 gDiffuseMtrl : DIFFUSE <string Source = "Material";>;
uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>;

sampler NormalMapS = sampler_state
{
Texture = <gNormalMap>;
MinFilter = ANISOTROPIC;
MaxAnisotropy = 8;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};

struct InputVS
{
float3 posL : POSITION;
float3 tangent : TANGENT;
float3 binormal : BINORMAL;
float3 normalL : NORMAL;
float2 tex0 : TEXCOORD;
};

struct OutputVS
{
float4 posH : POSITION0;
float3 lightDir : TEXCOORD0;
float2 tex0 : TEXCOORD1;
};

OutputVS PL_PPL_N_VS(InputVS inVS)
{
OutputVS outVS = (OutputVS)0;

float3 normalW = mul(inVS.normalL,(float3x3)gWorld);
float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);
float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);

outVS.posH = mul(float4(inVS.posL, 1.0f), gWVP);
float3 posW = mul(float4(inVS.posL, 1.0f), gWorld).xyz;
outVS.lightDirT = mul(posW - gLightPos,float3x3(tangentW,binormalW,normalW));
outVS.tex0 = inVS.tex0;

return outVS;
}


float4 PL_PPL_N_PS(float3 lightDirT : TEXCOORD0,
float2 tex0 : TEXCOORD1) : COLOR
{
float3 normalT = tex2D(NormalMapS, tex0);

normalT = 2.0f*normalT - 1.0f;

normalT = normalize(normalT);

float s = max(dot(-normalize(lightDirT), normalT), 0.0f);

float3 color = s*gDiffuseMtrl.rgb;

return float4(color, 1.0);
}


technique PL_PPL_N_Tech
{
pass P0
{
Sampler[0] = (NormalMapS);

VertexShader = compile vs_2_0 PL_PPL_N_VS();
PixelShader = compile ps_2_0 PL_PPL_N_PS();
}
}


Share this post


Link to post
Share on other sites
So I have change the code to exactly this:
And I am getting different relative light positioning problems.


uniform extern float4x4 gWorld : WORLD <string Source = "Scene";>;
uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;

uniform extern float3 gLightPos : LIGHTPOS0 <string Source = "Scene";>;

uniform extern float4 gDiffuseMtrl : DIFFUSE <string Source = "Material";>;
uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>;

sampler NormalMapS = sampler_state
{
Texture = <gNormalMap>;
MinFilter = ANISOTROPIC;
MaxAnisotropy = 8;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};

struct InputVS
{
float3 posL : POSITION;
float3 tangent : TANGENT;
float3 binormal : BINORMAL;
float3 normalL : NORMAL;
float2 tex0 : TEXCOORD;
};

struct OutputVS
{
float4 posH : POSITION0;
float3 lightDirT : TEXCOORD0;
float2 tex0 : TEXCOORD1;
};

OutputVS PL_PPL_N_VS(InputVS inVS)
{
OutputVS outVS = (OutputVS)0;

float3 normalW = mul(inVS.normalL,(float3x3)gWorld);
float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);
float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);

outVS.posH = mul(float4(inVS.posL, 1.0f), gWVP);
float3 posW = mul(float4(inVS.posL, 1.0f), gWorld).xyz;
outVS.lightDirT = mul(posW - gLightPos,float3x3(tangentW,binormalW,normalW));
outVS.tex0 = inVS.tex0;

return outVS;
}


float4 PL_PPL_N_PS(float3 lightDirT : TEXCOORD0,
float2 tex0 : TEXCOORD1) : COLOR
{
float3 normalT = tex2D(NormalMapS, tex0);

normalT = 2.0f*normalT - 1.0f;

normalT = normalize(normalT);

float s = max(dot(-lightDirT, normalT), 0.0f);

float3 color = s*gDiffuseMtrl.rgb;

return float4(color, 1.0);

//return float4(1.0, 1.0, 1.0, 1.0);
}


technique PL_PPL_N_Tech
{
pass P0
{
Sampler[0] = (NormalMapS);

VertexShader = compile vs_2_0 PL_PPL_N_VS();
PixelShader = compile ps_2_0 PL_PPL_N_PS();
}
}
[\source]

Share this post


Link to post
Share on other sites
By the way this is how I am loading and rendering the effect.

Loading:

m_pEffect = m_pResManager->GetXEffect(_T("PL_PPL_N.fx"));

m_hTech = m_pEffect->GetInterface()->GetTechniqueByName("PL_PPL_N_Tech");

m_hWorld = m_pEffect->GetInterface()->GetParameterByName(0, "gWorld");
m_hWorldViewProj = m_pEffect->GetInterface()->GetParameterByName(0, "gWVP");
m_hLightPos = m_pEffect->GetInterface()->GetParameterByName(0, "gLightPos");
m_hDiffuseMtrl = m_pEffect->GetInterface()->GetParameterByName(0, "gDiffuseMtrl");
m_hNormalMap = m_pEffect->GetInterface()->GetParameterByName(0, "gNormalMap");

m_pNormalMap = m_pResManager->GetTexture(_T(".\\data\\relief_tile1.tga"));
m_pTestMesh = m_pResManager->GetXMesh(CXMesh::formatResStr(_T(".\\data\\bump.x"),
1.0,FALSE,
GEN_BINORMAL|GEN_TANGENT).c_str());



Rendering:

CCameraNode3D* pCam = m_pSceneManager->GetCurCamera();
MATRIX matView = pCam->GetViewMatrix();
MATRIX matProj = pCam->GetProjMatrix();
MATRIX matWorld = m_WorldObjectTransform;
VEC3 eyePos = pCam->GetEyePt();

CLightNode3D* pLight = m_pSceneManager->GetLight(0);
D3DLIGHT9* pLightInter = pLight->GetInterface();

MATRIX wvp;

wvp = matWorld * matView * matProj;

m_pEffect->GetInterface()->SetTechnique(m_hTech);

m_pEffect->GetInterface()->SetMatrix(m_hWorld,&matWorld);
m_pEffect->GetInterface()->SetMatrix(m_hWorldViewProj,&wvp);
m_pEffect->GetInterface()->SetValue(m_hLightPos,&(pLightInter->Position),sizeof(D3DXVECTOR3));

m_pEffect->GetInterface()->SetValue(m_hDiffuseMtrl,&VEC4(1.0,1.0,1.0,1.0),sizeof(D3DXVECTOR4));
m_pEffect->GetInterface()->SetTexture(m_hNormalMap,m_pNormalMap->GetInterface());

UINT numPasses = 0;
m_pEffect->GetInterface()->Begin(&numPasses, 0);
m_pEffect->GetInterface()->BeginPass(0);

m_pTestMesh->GetDeviceMesh()->DrawSubset(0);

m_pEffect->GetInterface()->EndPass();
m_pEffect->GetInterface()->End();



Share this post


Link to post
Share on other sites
The light still seems to be coming 90 degrees off centered. I also added a normalization to the lightDirT in the pixel shader.

Here is the latest code:


uniform extern float4x4 gWorld : WORLD <string Source = "Scene";>;
uniform extern float4x4 gWVP : WORLDVIEWPROJ <string Source = "Scene";>;

uniform extern float3 gLightPos : LIGHTPOS0 <string Source = "Scene";>;

uniform extern float4 gDiffuseMtrl : DIFFUSE <string Source = "Material";>;
uniform extern texture gNormalMap : NORMMAP0 <string Source = "Material";>;

sampler NormalMapS = sampler_state
{
Texture = <gNormalMap>;
MinFilter = ANISOTROPIC;
MaxAnisotropy = 8;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};

struct InputVS
{
float3 posL : POSITION;
float3 tangent : TANGENT;
float3 binormal : BINORMAL;
float3 normalL : NORMAL;
float2 tex0 : TEXCOORD;
};

struct OutputVS
{
float4 posH : POSITION0;
float3 lightDirT : TEXCOORD0;
float2 tex0 : TEXCOORD1;
};

OutputVS PL_PPL_N_VS(InputVS inVS)
{
OutputVS outVS = (OutputVS)0;

float3 normalW = mul(inVS.normalL,(float3x3)gWorld);
float3 tangentW = mul(inVS.tangent,(float3x3)gWorld);
float3 binormalW = mul(inVS.binormal,(float3x3)gWorld);//cross(tangentW,normalW);

outVS.posH = mul(float4(inVS.posL, 1.0f), gWVP);
float3 posW = mul(float4(inVS.posL, 1.0f), gWorld).xyz;
outVS.lightDirT = mul(float3x3(tangentW,binormalW,normalW), posW - gLightPos);
outVS.tex0 = inVS.tex0;

return outVS;
}


float4 PL_PPL_N_PS(float3 lightDirT : TEXCOORD0,
float2 tex0 : TEXCOORD1) : COLOR
{
float3 normalT = tex2D(NormalMapS, tex0);

normalT = 2.0f*normalT - 1.0f;

normalT = normalize(normalT);
lightDirT = normalize(lightDirT);

float s = max(dot(-lightDirT, normalT), 0.0f);

float3 color = s*gDiffuseMtrl.rgb;

return float4(color, 1.0);

//return float4(1.0, 1.0, 1.0, 1.0);
}


technique PL_PPL_N_Tech
{
pass P0
{
Sampler[0] = (NormalMapS);

VertexShader = compile vs_2_0 PL_PPL_N_VS();
PixelShader = compile ps_2_0 PL_PPL_N_PS();
}
}

Share this post


Link to post
Share on other sites
Could there be something possibly wrong with the tangent space generation that could be causing the problem? The light still seem to be coming from the wrong direction.

Does anyone have experience with HLSL debuggers? I have been trying PIX but it doesnt seem to let me choose what variable to watch (e.g. the vs inputs). Is there anyway to use VS2005 to debug into the shader code anymore?

Share this post


Link to post
Share on other sites
I am trying to find a working sample of diffuse lit normal mapping, and have tried the MS parallax occlusion sample (which has a pure normal mapping mode). I have noticed that in this example dependent on what angle you set the object at you will get incorrect lighting direction. Has anyone else noticed this in the sample? Does anyone have a sample of normal mapping that is valid for all light directions and model orientations?

Here is an example of what I am talking about in the ms sample (the light is coming from the right but the object is lit as if its coming from the left):

http://img179.imageshack.us/my.php?image=msbumpprob2xa9.jpg

and here is the code they use for the normal mapping:

struct VS_OUTPUT
{
float4 position : POSITION;
float2 texCoord : TEXCOORD0;
float3 vLightTS : TEXCOORD1; // light vector in tangent space, denormalized
float3 vViewTS : TEXCOORD2; // view vector in tangent space, denormalized
float2 vParallaxOffsetTS : TEXCOORD3; // Parallax offset vector in tangent space
float3 vNormalWS : TEXCOORD4; // Normal vector in world space
float3 vViewWS : TEXCOORD5; // View vector in world space

};

VS_OUTPUT RenderSceneVS( float4 inPositionOS : POSITION,
float2 inTexCoord : TEXCOORD0,
float3 vInNormalOS : NORMAL,
float3 vInBinormalOS : BINORMAL,
float3 vInTangentOS : TANGENT )
{
VS_OUTPUT Out;

// Transform and output input position
Out.position = mul( inPositionOS, g_mWorldViewProjection );

// Propagate texture coordinate through:
Out.texCoord = inTexCoord * g_fBaseTextureRepeat;

// Transform the normal, tangent and binormal vectors from object space to homogeneous projection space:
float3 vNormalWS = mul( vInNormalOS, (float3x3) g_mWorld );
float3 vTangentWS = mul( vInTangentOS, (float3x3) g_mWorld );
float3 vBinormalWS = mul( vInBinormalOS, (float3x3) g_mWorld );

// Propagate the world space vertex normal through:
Out.vNormalWS = vNormalWS;

vNormalWS = normalize( vNormalWS );
vTangentWS = normalize( vTangentWS );
vBinormalWS = normalize( vBinormalWS );

// Compute position in world space:
float4 vPositionWS = mul( inPositionOS, g_mWorld );

// Compute and output the world view vector (unnormalized):
float3 vViewWS = g_vEye - vPositionWS;
Out.vViewWS = vViewWS;

// Compute denormalized light vector in world space:
float3 vLightWS = g_LightDir;

// Normalize the light and view vectors and transform it to the tangent space:
float3x3 mWorldToTangent = float3x3( vTangentWS, vBinormalWS, vNormalWS );

// Propagate the view and the light vectors (in tangent space):
Out.vLightTS = mul( vLightWS, mWorldToTangent );
Out.vViewTS = mul( mWorldToTangent, vViewWS );

// Compute the ray direction for intersecting the height field profile with
// current view ray. See the above paper for derivation of this computation.

// Compute initial parallax displacement direction:
float2 vParallaxDirection = normalize( Out.vViewTS.xy );

// The length of this vector determines the furthest amount of displacement:
float fLength = length( Out.vViewTS );
float fParallaxLength = sqrt( fLength * fLength - Out.vViewTS.z * Out.vViewTS.z ) / Out.vViewTS.z;

// Compute the actual reverse parallax displacement vector:
Out.vParallaxOffsetTS = vParallaxDirection * fParallaxLength;

// Need to scale the amount of displacement to account for different height ranges
// in height maps. This is controlled by an artist-editable parameter:
Out.vParallaxOffsetTS *= g_fHeightMapScale;

return Out;
}

float4 ComputeIllumination( float2 texCoord, float3 vLightTS, float3 vViewTS, float fOcclusionShadow )
{
// Sample the normal from the normal map for the given texture sample:
float3 vNormalTS = normalize( tex2D( tNormalHeightMap, texCoord ) * 2 - 1 );

// Sample base map:
float4 cBaseColor = tex2D( tBase, texCoord );

// Compute diffuse color component:
float3 vLightTSAdj = float3( vLightTS.x, -vLightTS.y, vLightTS.z );

float4 cDiffuse = saturate( dot( vNormalTS, vLightTSAdj )) * g_materialDiffuseColor;

// Compute the specular component if desired:
float4 cSpecular = 0;
if ( g_bAddSpecular )
{
float3 vReflectionTS = normalize( 2 * dot( vViewTS, vNormalTS ) * vNormalTS - vViewTS );

float fRdotL = saturate( dot( vReflectionTS, vLightTSAdj ));
cSpecular = saturate( pow( fRdotL, g_fSpecularExponent )) * g_materialSpecularColor;
}

// Composite the final color:
float4 cFinalColor = (( g_materialAmbientColor + cDiffuse ) * cBaseColor + cSpecular ) * fOcclusionShadow;

return cFinalColor;
}

float4 RenderSceneBumpMapPS( PS_INPUT i ) : COLOR0
{
// Normalize the interpolated vectors:
float3 vViewTS = normalize( i.vViewTS );
float3 vLightTS = normalize( i.vLightTS );

float4 cResultColor = float4( 0, 0, 0, 1 );

// Start the current sample located at the input texture coordinate, which would correspond
// to computing a bump mapping result:
float2 texSample = i.texCoord;

// Compute resulting color for the pixel:
cResultColor = ComputeIllumination( texSample, vLightTS, vViewTS, 1.0f );

// If using HDR rendering, make sure to tonemap the resuld color prior to outputting it.
// But since this example isn't doing that, we just output the computed result color here:
return cResultColor;
}


Share this post


Link to post
Share on other sites
Not sure why it's still not coming out right, at this point everything looks okay to me.

I don't know of any ready-to-build normal mapping samples for native d3d9...there's a normal mapping sample on the XNA Creator's Club website you could look at, if you want. It uses XNA of course but the HLSL is all the same.

I can post the normal mapping shader I use if you'd like. However it's a bit complicated since it's used with different instancing techniques, and it's part of a framework used by several different effects and techniques.

As far as PIX debugging, you don't even need to debug the shader to see the VS inputs and outputs. They'll be right there when you view the draw call, in the table. If you're going to be debugging shaders, make sure the shader/effect is compiled with the D3DXSHADER_DEBUG flag. You'll also probably want to compile without optimizations, so that you can step through the HLSL in the order of your code.

Share this post


Link to post
Share on other sites
I am using PIX, I go into the debug pixel option, and am presented with a choice to debug the vertex (1 of 3) or the pixel shader. I go into one of the vertex shaders, but i dont see anything other than a stepable disassembly, how do i know where say the tangent, coming from the app, is located?

Do I need to compile the fx with the debug flag to even get that?


vs_2_0
def c8, 1, 0, 0, 0
dcl_position v0
dcl_texcoord v1
mad r0, v0.xyzx, c8.xxxy, c8.yyyx
dp4 oPos.x, r0, c0
dp4 oPos.y, r0, c1
dp4 oPos.z, r0, c2
dp4 oPos.w, r0, c3
dp4 r1.x, r0, c4
dp4 r1.y, r0, c5
dp4 r1.z, r0, c6
add r0.xyz, r1, -c7
mov r1.x, c4.x
mov r1.y, c5.x
mov r1.z, c6.x
dp3 oT0.x, r1, r0
mov r1.x, c4.z
mov r1.y, c5.z
mov r1.z, c6.z
dp3 oT0.y, r1, r0
mov r1.x, c4.y
mov r1.y, c5.y
mov r1.z, c6.y
dp3 oT0.z, r1, r0
mov oT1.xy, v1

Share this post


Link to post
Share on other sites
PIX won't be able to link the compiled shader to the HLSL unless its compiled with debug info. So yeah, you have to use that flag if you want to debug the HLSL code. Also like I said earlier, if you don't compile without optimizations then your instructions will be reordered upon compilation.

Share this post


Link to post
Share on other sites
Your vertex inputs are in the registers that start with a v. You see them declared right at the top, where it has dcl_position and dcl_texcoord. This means v0 is your input position, and v1 is your input texcoord. For your outputs the transformed vertex positions is placed in oPos, while oT0 - oT7 are your TEXCOORD output.

However something seems strange with your disassembly....your normal, binormal, and tangent don't get used at all. It looks like it's just transforming the light direction (posW - lightT) by the world matrix, which is in c4-c6 (constant registers are where your shader constants are stored, such as the matrices and the light position). I'm at work right now so I can't do it, but when I get home I'm going to try compiling your shader to see what disassembly I get.

Share this post


Link to post
Share on other sites
Well I compiled your effect in fxc, and I got quite a different result. I'm not sure why it would be compiling to something different for you...


vs_2_0
def c8, 1, 0, 0, 0
dcl_position v0
dcl_tangent v1
dcl_binormal v2
dcl_normal v3
dcl_texcoord v4
mad r0, v0.xyzx, c8.xxxy, c8.yyyx
dp4 oPos.x, r0, c0
dp4 oPos.y, r0, c1
dp4 oPos.z, r0, c2
dp4 oPos.w, r0, c3
dp4 r1.x, r0, c4
dp4 r1.y, r0, c5
dp4 r1.z, r0, c6
add r0.xyz, r1, -c7
dp3 r1.x, v1, c4
dp3 r1.y, v1, c5
dp3 r1.z, v1, c6
dp3 oT0.x, r1, r0
dp3 r1.x, v2, c4
dp3 r1.y, v2, c5
dp3 r1.z, v2, c6
dp3 oT0.y, r1, r0
dp3 r1.x, v3, c4
dp3 r1.y, v3, c5
dp3 r1.z, v3, c6
dp3 oT0.z, r1, r0
mov oT1.xy, v4

Share this post


Link to post
Share on other sites
Apparently I posted the wrong assembly (it was a moded version), so my actual assembly looks like the one you posted.

For simplicity i am drawing a simple quad centered at origin on the xz-plane, with the texture address origin at V0, and using the sdk supplied relief_tile1.jpg image for my normal map (the sphere,torus,cone,pyramid relief). Tangents and binormals are computed with D3DXComputeTangentFrame(pMesh,NULL).

Vertices laid out like so:

V0--V1
|.....|
|.....|
V2--V3

tri1: V0 V1 V2
tri2: V1 V3 V2

with:
^
|
z x->


Using PIX I am finding some oddities...

All the vertices have TBNs like so:

tan: 0.00, 0.00, -1.00
bin: 1.00, 0.00, 0.00
nrm: 0.00, 1.00, 0.00

Which I found odd considering, under the dxviewer app it shows the tangent heading in the pos x direction (along pos u) not neg z. If I hardcode my TBN values (which only works for this simple example) to be:

tan: 1.00, 0.00, 0.00
bin: 0.00, 0.00, 1.00
nrm: 0.00, 1.00, 0.00

My lighting finally seems correct.

Why would D3DXComputeTangentFrame behave this way? Could it be some how exchanging the tangent for the binormal?

My vertex decl looks like this before i send it to D3DXComputeTangentFrame:

Elem[0]: Stream:0, Offset:0, Type:D3DDECLTYPE_FLOAT3, Method:0, Usage:D3DDECLUSAGE_POSITION, UsageInd:0
Elem[1]: Stream:0, Offset:12, Type:D3DDECLTYPE_FLOAT3, Method:0, Usage:D3DDECLUSAGE_NORMAL, UsageInd:0
Elem[2]: Stream:0, Offset:24, Type:D3DDECLTYPE_FLOAT2, Method:0, Usage:D3DDECLUSAGE_TEXCOORD, UsageInd:0
Elem[3]: Stream:0, Offset:32, Type:D3DDECLTYPE_FLOAT3, Method:0, Usage:D3DDECLUSAGE_BINORMAL, UsageInd:0
Elem[4]: Stream:0, Offset:44, Type:D3DDECLTYPE_FLOAT3, Method:0, Usage:D3DDECLUSAGE_TANGENT, UsageInd:0

Share this post


Link to post
Share on other sites
UGH, sorry that was a miss type, it is actually:

V0: texcoord: 0,0
V1: texcoord: 1,0
V2: texcoord: 0,1
V3: texcoord: 1,1

As you had said. It seems to me that the tangent (generated by D3DXComputeTangentFrame) should be in the pos x dir, and it isnt.

Share this post


Link to post
Share on other sites
Okay I think I figured out your problem. For some reason, D3DXComputeTangentFrame stores the U derivative in the BINORMAL, and V in the TANGENT. So if you use D3DXComputeTangentFrameEx and use the correct semantics, it should generate the tangent frame the way you want it. For the other parameters, you can just copy what's specified in documentation.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this