• Advertisement
Sign in to follow this  

Getting Started with Vertex Shader Tweening

This topic is 2994 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hello, There seems to be a plethora of tutorials on vertex shaders, and articles on tweening with them, but no actual tutorials with full working code for both the calling C++ and the shader assembly itself (correct me if I'm wrong). All I want to do is simple linear interpolation via tweening. The following is what I have now, that works to render still meshes without lerping. Initialization:
	_TCHAR graphVSFile[2][16]={_T("meshVS.txt"),_T("meshVS.txt")};//I want to replace one of these with animVS.txt, described below.
	_TCHAR graphPSFile[2][16]={_T("meshPS.txt"),_T("textPS.txt")};
	DWORD dw;
	LPD3DXBUFFER buf;
	D3DXMATRIX matr;
	LPDIRECT3DVERTEXDECLARATION9 graphDecl;
	LPDIRECT3DVERTEXSHADER9 graphVS[2];
	LPDIRECT3DPIXELSHADER9 graphPS[2];
	LPDIRECT3DTEXTURE9 graphTex[4];
	D3DVERTEXELEMENT9 ve[4]={
		{0,0,D3DDECLTYPE_FLOAT3,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_POSITION,0},
		{0,12,D3DDECLTYPE_FLOAT3,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_NORMAL,0},
		{0,24,D3DDECLTYPE_FLOAT2,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_TEXCOORD,0},
		D3DDECL_END()
	};
	D3DLIGHT9 light={D3DLIGHT_DIRECTIONAL,{0.5f,0.5f,0.5f,0},{0,0,0,0},{0,0,0,0},{0,0,0},{0,0,-1},0,0,0,0,0,0,0};
	D3DMATERIAL9 mtrl={{1,1,1,1},{1,1,1,1},{0,0,0,0},{0,0,0,0},0};
	if(!graphDecl){//initialize shader resources
		graphDevice->CreateVertexDeclaration(ve,&graphDecl);
		for(dw=0;dw<2;++dw){
			D3DXAssembleShaderFromFile(graphVSFile[dw],NULL,NULL,D3DXSHADER_DEBUG,&buf,NULL); //this FAILS with animVS.txt
			graphDevice->CreateVertexShader((DWORD*)buf->GetBufferPointer(),&graphVS[dw]);
			buf->Release();
		}
		for(dw=0;dw<2;++dw){
			D3DXAssembleShaderFromFile(graphPSFile[dw],NULL,NULL,D3DXSHADER_DEBUG,&buf,NULL);
			graphDevice->CreatePixelShader((DWORD*)buf->GetBufferPointer(),&graphPS[dw]);
			buf->Release();
		}
		D3DXCreateSphere(graphDevice,131072,8,8,&graphSph,NULL);
	}
	//create shader textures
	D3DXCreateTexture(graphDevice,512,512,1,D3DUSAGE_RENDERTARGET,D3DFMT_X8R8G8B8,D3DPOOL_DEFAULT,&graphTex[0]);
	D3DXCreateTexture(graphDevice,512,512,1,D3DUSAGE_RENDERTARGET,D3DFMT_X8R8G8B8,D3DPOOL_DEFAULT,&graphTex[1]);
	D3DXCreateTexture(graphDevice,32,32,1,D3DUSAGE_RENDERTARGET,D3DFMT_X8R8G8B8,D3DPOOL_DEFAULT,&graphTex[2]);
	D3DXCreateTexture(graphDevice,32,32,1,D3DUSAGE_RENDERTARGET,D3DFMT_X8R8G8B8,D3DPOOL_DEFAULT,&graphTex[3]);
	//initialize fixed function pipeline
	D3DXMatrixOrthoLH(&matr,262144,262144,0,131072);
	graphDevice->SetTransform(D3DTS_PROJECTION,&matr);
	D3DXMatrixIdentity(&matr);
	graphDevice->SetTransform(D3DTS_VIEW,&matr);
	graphDevice->SetTransform(D3DTS_WORLD,&matr);
	graphDevice->SetRenderState(D3DRS_AMBIENT,0x7f7f7f);
	graphDevice->SetLight(0,&light);
	graphDevice->LightEnable(0,TRUE);
	graphDevice->SetMaterial(&mtrl);
	//prerender shader textures





And here is how I draw it:
	int dw;
	FLOAT tween;
	INT m,m2;
	D3DXMATRIX *matr;
	//set view transformations of matr here
	//set m and m2 to the 1st and 2nd frames of animation here
	setMatrix(modelNum,matr);//this sets the transformations of the model to matr.
	D3DXMatrixTranspose(matr,matr);
	graphDevice->SetVertexShaderConstantF(8,(float*)matr,4);
	//My hypothesis would be to change the above to the following:
	//graphDevice->SetVertexShaderConstantF(0,D3DXVECTOR4(tween,1-tween,0,0),4);
	//graphDevice->SetVertexShaderConstantF(12,(float*)matr,4);
	graphDevice->SetPixelShaderConstantF(2,(float*)&D3DXVECTOR4(0,0,-1,1),1);
	for(dw=0;dw<16;++dw)if(modelNumI[m][dw]){
		graphDevice->SetTexture(0,modelTex[mesh[m][dw].texture&0xf]);
		graphDevice->SetTexture(1,((DWORD)mesh[m][dw].ref>>1)?modelTex[31]:textrTex[0]);
		graphDevice->SetTexture(3,graphTex[(((DWORD)mesh[m][dw].ref&0x1)?0:2)+graphPass]);
		graphDevice->SetTexture(4,modelTex[(DWORD)mesh[m][dw].spec]);
		graphDevice->SetTexture(5,modelTex[(DWORD)mesh[m][dw].bump]);
		graphDevice->SetTexture(6,modelTex[(DWORD)mesh[m][dw].lum]);
		graphDevice->SetStreamSource(0,modelVB[m][dw],0,sizeof(MVERTEX));
		graphDevice->SetStreamSource(1,modelVB[m2][dw],0,sizeof(MVERTEX));//this is added now as the 2nd frame:
		graphDevice->SetIndices(modelIB[m][dw]);
		graphDevice->DrawIndexedPrimitive(D3DPT_TRIANGLELIST,0,0,modelNumV[m][dw],0,modelNumI[m][dw]);
	}





My current vertex shader simply applies the matrix it's given and shoots the vertex straight out into oT0 and oT1:
//meshVS.txt
vs.2.0
dcl_position0 v0
dcl_normal0 v1
dcl_texcoord0 v2
def c14, -1.0f,0.0f,0.0f,0.5f

m4x4 r0,v0,c8				//multiply position (v0) by world matrix
m4x4 r1,r0,c4				//multiply position (r0) by view matrix
m4x4 oPos,r1,c0				//multiply position (r1) by projection matrix

mov oT0,v2					//copy texture coordinates (v2)

m3x4 r0,v1,c8				//muliply normal (v1) by world matrix
m3x4 r1,r0,c4				//multply normal (r0) by view matrix
nrm r0,r1					//normalize normal (r1)

mad oT1,r0,c14.w,c14.w		//clamp normal (r0) to [0,1] and output






So now I need it to do lerping. Here is the code from a PDF on the subject:
// animVS.txt
// position frame 0 in v0
// normal frame 0 in v3
// position frame 1 in v14
// normal frame 1 in v15
// tween in c0.x
// 1 – tween in c0.y
// View/Projection matrix in c12-c15
// Compute the tweened position
mul r1,v0,c0.yyyy
mad r1,v14,c0.xxxx,r1
// Multiply by the view/projection and pass it along
m4x4 oPos,r1,c12





However, when I replace "meshVS.txt" with this in my code above, D3DXAssembleShaderFromFile() fails, and DirectX in debug mode doesn't say why. These 3 lines of code seem a little too simple to be true, even for simple tweening.
  1. Any ideas why it's failing to assemble? Does VC++ have a more thorough debugger that'll tell you?
  2. Does the rest of my code look like it'll work with the commented changes made?
I'm using Visual C++ Studio 2008 Express in Windows XP Professional on a Dell Inspiron e1505 laptop with 512MB RAM and its built-in i945g graphics card (which fails to do hardware vertex processing, btw, so this is with software emulation). Thanks.

Share this post


Link to post
Share on other sites
Advertisement
D3DXAssembleShaderFromFile has a parameter that lets you pass a pointer to an ID3DXBuffer interface. If there as an error, this pointer will be set to a buffer. You can then call GetBufferPointer to get a pointer to a string containing information on why it failed to assemble.

However I have to ask...why are you coding in shader assembly? Especially for something as simple as vertex tweening?

Share this post


Link to post
Share on other sites
Quote:
Original post by MJP
D3DXAssembleShaderFromFile has a parameter that lets you pass a pointer to an ID3DXBuffer interface. If there as an error, this pointer will be set to a buffer. You can then call GetBufferPointer to get a pointer to a string containing information on why it failed to assemble.

Aha. I checked for FAILED(D3DXAssembleShaderFromFile(bla bla)) and dumped the contents of the ppErrorMsg into a debug file. For the record, the assembler requires you to state the version at the top, and also to use dcl_ statements for each v# register. It now assembles fine like this:


vs.2.0
dcl_position0 v0 // position frame 0 in v0
dcl_normal0 v3 // normal frame 0 in v3
dcl_position1 v14 // position frame 1 in v14
dcl_normal1 v15 // normal frame 1 in v15
// tween in c0.x
// 1 – tween in c0.y
// View/Projection matrix in c12-c15
// Compute the tweened position
mul r1,v0,c0.yyyy
mad r1,v14,c0.xxxx,r1
// Multiply by the view/projection and pass it along
m4x4 oPos,r1,c12


Although the program runs, it now does not draw anything at all. My pixel shader uses oT0 and oT1 to do further manipulations, and my working VS above writes to those, so I'll do some more reading on this until I have everything down cold and can figure out what my old one was doing that this isn't.

Quote:
Original post by MJP
However I have to ask...why are you coding in shader assembly? Especially for something as simple as vertex tweening?

There was a thread here asking about it from several years back, and from what I remember, a shader is the most efficient way to do so. Is this not true? I suppose one could create a temporary vertex buffer, and fill it with the tweened vertices from each of the keyframes' buffers every frame for every object, but for some reason I had the impression that shaders are the new standard of efficiency for tweening.

Share this post


Link to post
Share on other sites
He means that you should make use of the effects framework, or at least HLSL shaders, instead of using shader ASM, which isn't even supported in more recent shader models.

Share this post


Link to post
Share on other sites
Quote:
Original post by bababooey
Quote:
Original post by MJP
However I have to ask...why are you coding in shader assembly? Especially for something as simple as vertex tweening?

There was a thread here asking about it from several years back, and from what I remember, a shader is the most efficient way to do so. Is this not true? I suppose one could create a temporary vertex buffer, and fill it with the tweened vertices from each of the keyframes' buffers every frame for every object, but for some reason I had the impression that shaders are the new standard of efficiency for tweening.


You can use vsa.exe and psa.exe in the DXSDK to compile your asm shaders on a command-line. Although what MJP is saying is that you shouldn't use ASM directly [ I personally stopped writing asm shaders after SM2.0 became the minimum compile spec ]...so you would write HLSL shaders now, and use flink.exe to compile them from the command line. There are also programs like FX Composer, Rendermonkey, or Visual Studio w/plugins to make editing the HLSL shaders easier.

Share this post


Link to post
Share on other sites
Aha, I didn't realize HLSL was different from VSA. Do you know of any tutorials on simple tweening with it, or could you provide an example that would fit in my program above?

Share this post


Link to post
Share on other sites
Sure I'll take a stab ....

*Actually I was about to write the whole shader as an exercise - when it's already been typed up online by somebody else xD

Just ignore the wobble calculations ... and focus on the face tween lines.

http://steelskies.com/HLSL1.htm


///////////////////////////////////////////////////////////
// Wobbly HLSL:
// This is a pretty trivial shader. Diffuse lighting is
// calculated, and the model surface is offset by a sine
// wave travelling through it.
// It also tweens between two streams - in this case,
// morphing the head model from one expression to another.
///////////////////////////////////////////////////////////

//The annotation following the texture declaration allows
// my application code to automatically load & bind the texture.
texture diffuseTexture : Texture < string name = "face.tga"; >;
float4 tweenAmount;
float4 light1;
float4 light2;
float time;
float4 eyePos;
float4x4 world : World;
float4x4 worldViewProj : WorldViewProjection;

sampler Sampler = sampler_state
{
Texture = (diffuseTexture);
MipFilter = LINEAR;
MinFilter = LINEAR;
MagFilter = LINEAR;
};


//Passed to the vertex shader from the pipeline
struct VS_INPUT
{
float4 pos1 : POSITION0;
float4 normal1 : NORMAL0;
float2 texCoord1 : TEXCOORD0;

float4 pos2 : POSITION1;
float4 normal2 : NORMAL1;
float2 texCoord2 : TEXCOORD1;
};

//VS output / PS input:
struct VS_OUTPUT
{
float4 pos : POSITION;
float4 color : COLOR0;
float2 texCoord : TEXCOORD0;
};

//PS output:
struct pixel
{
float4 color : COLOR;
};

//////////////////////////
// Handy functions:
//////////////////////////

// Tween the vertex position from the two streams,
float4 tweenPosition(float4 pos1, float4 pos2)
{
return tweenAmount.x*pos1 + tweenAmount.y*pos2;
}

// Tween the normal from the two streams.
float4 tweenNormal(float4 n1, float4 n2)
{
return (tweenAmount.x*n1 + tweenAmount.y*n2);
}

//Generate diffuse lighting from normal & the global light directions
float4 lightDiffuse(float4 normal)
{
return dot(normal, light1) + dot(normal, light2);
}

//////////////////////////
// Vertex Shaders:
//////////////////////////
VS_OUTPUT vertexShader(VS_INPUT IN)
{
VS_OUTPUT OUT;
float4 tweenedPos = tweenPosition(IN.pos1, IN.pos2);
OUT.pos = mul(tweenedPos, worldViewProj);

float4 normal = tweenNormal(IN.normal1, IN.normal2);
float4 normalScreen = normalise(mul(normal, world);

//Wobble calculations:
// The surface is offset in the direction of the surface normal,
// using some arbitary sine wave perturbations.
OUT.pos += normalScreen * 0.05f*sin(IN.pos1.y*20 + IN.pos1.z*15 + 4*time);
// This then affects the surface normal.
// I don't think this calculation is technically
// correct, but it looks ok. *shrug*
OUT.normal += normalScreen * 0.05f*cos(IN.pos1.y*20 + IN.pos1.z*15 + 4*time);

// The diffuse color will get clamped from 0..1.
// This then gives some unpleasant saturation effects. To minimise this,
// we divide the color by 2 here, then multiply by 2 in the pixel shader.
OUT.color = lightDiffuse(normal) * 0.5f;

OUT.texCoord = IN.texCoord1;

return OUT;

}

//////////////////////////
// Pixel Shaders:
//////////////////////////
pixel pixelShader(VS_OUTPUT IN)
{
pixel OUT;
float4 texSample = tex2D( Sampler, IN.texCoord ) + 0.05f;
// Color is multiplied by two, since we halved it to
// avoid saturation in the vertex shader.
OUT.color.rgb = IN.color.rgb*texSample.rgb * 2.0f;
OUT.color.a = 1.0f;

return OUT;
}

//////////////////////////
// Techniques:
//////////////////////////
technique T0
{
// The annotation following the pass allows my application
// to seperate the rendering process into stages, one for rendering to texture,
// one for overlaying on top of that texture, and so on.
pass P0 <string renderStage="pre";>
{
vertexshader = compile vs_1_1 vertexShader();
pixelshader = compile ps_1_1 pixelShader();

Lighting = False;
CullMode = None;
}
}




Share this post


Link to post
Share on other sites
Danny, thanks for the sample. I'm curious what SetVertexDeclaration() and SetShaderConstant*() preparations would be needed to use that.

Speaking of which, I found that my example from above isn't drawing because of a declaration issue. I have added lines for the 2nd position and normal in my declaration, but it complains when I add lines for them in the shader:


//these 3 remain from before
dcl_position0 v0
dcl_normal0 v1
dcl_texcoord0 v2
//I added these two, not changing anything else in the shader's code:
dcl_position1 v3
dcl_normal1 v4



This is my new declaration:


LPDIRECT3DVERTEXDECLARATION9 graphDecl; //global
D3DVERTEXELEMENT9 ve[6]={
{0,0,D3DDECLTYPE_FLOAT3,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_POSITION,0},
{0,12,D3DDECLTYPE_FLOAT3,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_NORMAL,0},
{0,24,D3DDECLTYPE_FLOAT2,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_TEXCOORD,0},
{1,0, D3DDECLTYPE_FLOAT3,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_POSITION,1},//added
{1,12,D3DDECLTYPE_FLOAT3,D3DDECLMETHOD_DEFAULT,D3DDECLUSAGE_NORMAL,1},//added
D3DDECL_END()
};
graphDevice->CreateVertexDeclaration(ve,&graphDecl);




This is the debug output:


//after DLL loading lines
Direct3D9: :====> ENTER: DLLMAIN(0526e6e0): Process Attach: 00000a24, tid=00000fc0
Direct3D9: :====> EXIT: DLLMAIN(0526e6e0): Process Attach: 00000a24
Direct3D9: (INFO) :Direct3D9 Debug Runtime selected.
Direct3D9: (WARN) :driver set D3DDEVCAPS_TEXTURENONLOCALVIDMEM w/o DDCAPS2_NONLOCALVIDMEM:turning off D3DDEVCAPS_TEXTURENONLOCALVIDMEM
Direct3D9: (WARN) :driver set D3DDEVCAPS_TEXTURENONLOCALVIDMEM w/o DDCAPS2_NONLOCALVIDMEM:turning off D3DDEVCAPS_TEXTURENONLOCALVIDMEM
Direct3D9: (WARN) :driver set D3DDEVCAPS_TEXTURENONLOCALVIDMEM w/o DDCAPS2_NONLOCALVIDMEM:turning off D3DDEVCAPS_TEXTURENONLOCALVIDMEM
Direct3D9: (INFO) :======================= Hal SWVP device selected

Direct3D9: (INFO) :HalDevice Driver Style 8

Direct3D9: :DoneExclusiveMode
D3D9 Helper: Warning: Default value for D3DRS_POINTSIZE_MAX is 2.19902e+012f, not 1.44115e+017f. This is ok.
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" wurde geladen
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" entladen.
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" wurde geladen
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" entladen.
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" wurde geladen
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" entladen.
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" wurde geladen
"cs.exe": "C:\WINDOWS\system32\D3DCompiler_42.dll" entladen.
Direct3D9: (ERROR) :Vertex shader function usage (D3DDECLUSAGE_POSITION, 1) does not have corresponding usage in the current vertex declaration

Direct3D9: (INFO) :The vertex declaration is (Stream, Offset, Type, Method, Usage, UsageIndex):

Direct3D9: (INFO) :0, 0, D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 0

Direct3D9: (INFO) :0, 12, D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_NORMAL, 0

Direct3D9: (INFO) :0, 24, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 0

Eine Ausnahme (erste Chance) bei 0x7c812a6b in cs.exe: Microsoft C++-Ausnahme: long an Speicherposition 0x0012e5dc..
Direct3D9: (ERROR) :DrawIndexedPrimitive failed.

//the above is repeated ad infinitum.




I thought my declaration lines in ve[] above match the dcl_ statements pretty well, no?

Share this post


Link to post
Share on other sites
Solved this last issue: I wasn't explicitly calling SetVertexDeclaration() every frame.

Thanks, everyone, for your inputs.

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement