Jump to content
  • Advertisement
Sign in to follow this  
Khaos Dragon

creating a shadowing cubemap

This topic is 4868 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

There is not much documentation out there but from what I've found I have derived the following code for creating a cube map texture and rendering to it around a point light position. Just wondering if its close to being right. creating the cubemap
device->CreateCubeTexture( 1024, 1, D3DUSAGE_RENDERTARGET, D3DFMT_R32F, 
                           D3DPOOL_DEFAULT, &cubeTexture, 0 );








rendering the depth environment around the light position to a cubemap
LPDIRECT3DSURFACE9 BackBuffer, ZBuffer;
LPDIRECT3DSURFACE9 cubeFace;
D3DMATRIX   matProj;
D3DMATRIX   matView;
D3DXVECTOR3 lightPos(  light.x, light.y, light.z );
D3DXVECTOR3 up      (  0.0f, 1.0f,  0.0f );
D3DXVECTOR3 upForTop(  0.0f, 0.0f,  1.0f );
D3DXVECTOR3 upForBot(  0.0f, 0.0f, -1.0f );

D3DXVECTOR3 negX    (  light.x-1.0f,  light.y,      light.z );
D3DXVECTOR3 posX    (  light.x+1.0f,  light.y,      light.z );
D3DXVECTOR3 negY    (  light.x,       light.y-1.0f, light.z);
D3DXVECTOR3 posY    (  light.x,       light.y+1.0f, light.z );
D3DXVECTOR3 negZ    (  light.x,       light.y,      light.z-1.0f );
D3DXVECTOR3 posZ    (  light.x,       light.y,      light.z+1.0f );

//saving old back and depth buffers
device->GetRenderTarget(&BackBuffer);
device->GetDepthStencilSurface(&ZBuffer);

//setup 90 degree field of view projection
D3DXMatrixPerspectiveFovRH(&matProj, D3DX_PI/2, 1.0f, 0.5f, 1000.0f);
device->SetTransform(D3DTS_PROJECTION, &matProj);

//render to all sides of cubemap
cubeTexture->GetCubeMapSurface( D3DCUBEMAP_FACE_NEGATIVE_X, 0, &cubeFace);
device->SetRenderTarget ( 0, cubeFace );
D3DXMatrixLookAtRH(&matView, lightPos, negX, up);
device->SetTransform( D3DTS_VIEW, &matView );
device->Clear(0,NULL,D3DCLEAR_ZBUFFER,D3DCOLOR_XRGB(0,0,0),1.0f,0);
drawScene();

cubeTexture->GetCubeMapSurface( D3DCUBEMAP_FACE_POSITIVE_X, 0, &cubeFace);
device->SetRenderTarget ( 0, cubeFace );
D3DXMatrixLookAtRH(&matView, lightPos, posX, up);
device->SetTransform( D3DTS_VIEW, &matView );
device->Clear(0,NULL,D3DCLEAR_ZBUFFER,D3DCOLOR_XRGB(0,0,0),1.0f,0);
drawScene();

cubeTexture->GetCubeMapSurface( D3DCUBEMAP_FACE_NEGATIVE_Y, 0, &cubeFace);
device->SetRenderTarget ( 0, cubeFace );
D3DXMatrixLookAtRH(&matView, lightPos, negY, upForBot);
device->SetTransform( D3DTS_VIEW, &matView );
device->Clear(0,NULL,D3DCLEAR_ZBUFFER,D3DCOLOR_XRGB(0,0,0),1.0f,0);
drawScene();

cubeTexture->GetCubeMapSurface( D3DCUBEMAP_FACE_POSITIVE_Y, 0, &cubeFace);
device->SetRenderTarget ( 0, cubeFace );
D3DXMatrixLookAtRH(&matView, lightPos, posY, upForTop);
device->SetTransform( D3DTS_VIEW, &matView );
device->Clear(0,NULL,D3DCLEAR_ZBUFFER,D3DCOLOR_XRGB(0,0,0),1.0f,0);
drawScene();

cubeTexture->GetCubeMapSurface( D3DCUBEMAP_FACE_NEGATIVE_Z, 0, &cubeFace);
device->SetRenderTarget ( 0, cubeFace  );
D3DXMatrixLookAtRH(&matView, lightPos, negZ, up);
device->SetTransform( D3DTS_VIEW, &matView );
device->Clear(0,NULL,D3DCLEAR_ZBUFFER,D3DCOLOR_XRGB(0,0,0),1.0f,0);
drawScene();

cubeTexture->GetCubeMapSurface( D3DCUBEMAP_FACE_POSITIVE_Z, 0, &cubeFace);
device->SetRenderTarget ( 0, cubeFace );
D3DXMatrixLookAtRH(&matView, lightPos, posZ, up);
device->SetTransform( D3DTS_VIEW, &matView );
device->Clear(0,NULL,D3DCLEAR_ZBUFFER,D3DCOLOR_XRGB(0,0,0),1.0f,0);
drawScene();

//restore old buffers
device->SetRenderTarget ( 0, BackBuffer );


shaders for filling the distance cubemap
////////VERTEX SHADER
float4x4 ModelViewProj : WORLDVIEWPROJ;
float4x4 ModelWorld      : WORLD;
float4   lightPos;

struct VS_INPUT
{
	float4 position  : POSITION;
};

struct VS_OUTPUT
{
	float4 hposition   : POSITION;
      float3 lightVec    : TEXCOORD0;	
};


VS_OUTPUT main( VS_INPUT IN )
{
    VS_OUTPUT OUT;

    OUT.hposition = mul( IN.position, ModelViewProj );

    //getting the position of  the vertex in the world
    float4 posWorld = mul(IN.position, ModelWorld);
	 
    //getting vertex -> light  vector
    OUT.lightVec = lightPos - posWorld;
       
    return OUT;
}

////////PIXEL SHADER
struct VS_OUTPUT
{
      float4 hposition   : POSITION;
      float3 lightVec    : TEXCOORD0;	
};

struct PS_OUTPUT
{
	float4 color : COLOR;
};



PS_OUTPUT main( VS_OUTPUT IN )
{
	PS_OUTPUT OUT;

      OUT.color =  length(lightVec);

	return OUT;
}


For the scene shaders I will simply do something like lookedupDistance = texCube( cubemap, worldPosition ); if( lookedupDistance < actualDistanceFromVertexToLight ) { ////shadowed color = black; } else { color = diffuse + specular; } scene shaders

/////PIXEL SHADER
float4 diffuseColor;
float4 specularColor;

struct VS_OUTPUT
{
	float4 hposition   : POSITION;
	float2 texcoord0   : TEXCOORD0;
	float3 worldPos    : TEXCOORD1;
      float3 lightVec    : TEXCOORD2;
      float  att         : TEXCOORD3;
	float3 View        : TEXCOORD4;
	float3 distanceVec : TEXCOORD5;
};

struct PS_OUTPUT
{
	float4 color : COLOR;
};

sampler colorTexture;
sampler normalTexture;
samplerCUBE distanceCubemap;

PS_OUTPUT main( VS_OUTPUT IN )
{
	PS_OUTPUT OUT;

	float shadowTerm = 1.0f;
	float len = length( IN.distanceVec );
	if( texCUBE( distanceCubemap, IN.worldPos ).r < len )
		shadowTerm = 0.0f;	

	//calculate the color and the normal
     	float4 color = tex2D(colorTexture, IN.texcoord0);
       
     	//uncompress normal map
     	float3 normal = 2.0f * tex2D(normalTexture, IN.texcoord0).rgb - 1.0f;

	float3 ViewDir = normalize(IN.View); 
       
      //normalize the light
      float3 light = normalize(IN.lightVec);
       
     	 //set the output color
      float diffuse = saturate(dot(normal, light));


	//calculate specular component
	float3 Reflect = normalize(2 * diffuse * normal - light);
    	float4 specular = pow(diffuse*saturate(dot(Reflect, ViewDir)), 8); 
	
     	 //multiply the attenuation with the color
      OUT.color =  IN.att*shadowTerm*(color*diffuse*diffuseColor + specular);
	return OUT;
}

[Edited by - Khaos Dragon on July 24, 2005 11:21:55 PM]

Share this post


Link to post
Share on other sites
Advertisement
Two things about that:

1) Iirc, you can't use the up vector for the Y faces, because D3DXMatrixLookAt will produce a degenerate matrix. I forget what the proper up vector is supposed to be, sorry.

2) In regards to the depth buffer question you had on IRC, (I can only read the logs at work, so I couldn't reply) I can't really help you there. My solution was done using shaders from start-to-finish, so what I did actually was render into the cubemap with an R32F texture, and using a shader to save the distance from the light. The benefit of this was that I could later use another shader for the shadowing, meaning I could do the far better looking PCF, not the comparatively ugly hardware filtering. Also, skewbe mapping is extremely dependant on shaders, so if you want to upgrade to that later, it'd be better to start using a fully shader-driven system for the shadow mapping.

Edit: Oh, and yes, 1024 is a good resolution to start at for higher-end cards, not your FX5200. For that one, I'd start much lower, like, 512.

Share this post


Link to post
Share on other sites
I guess you should use another up-vector when rendering the negY and posY views, just as Cypher19 stated. your up-vector is parallel to negY and posY, so this won´t work... you should be able to use z-axis (0.0, 0.0, 1.0) as up-vector for those two views though. Just my two cents, don´t know anything more about that :)

Share this post


Link to post
Share on other sites
Quote:
Original post by matches81
I guess you should use another up-vector when rendering the negY and posY views, just as Cypher19 stated. your up-vector is parallel to negY and posY, so this won´t work... you should be able to use z-axis (0.0, 0.0, 1.0) as up-vector for those two views though. Just my two cents, don´t know anything more about that :)


From rotating with my hands, it seems in my right handed coordinate system that the up vector should be 0,0,-1 for the bottom side and 0,0,1 for the top side.

edit: source omitted because I fixed it in the topmost post.

[Edited by - Khaos Dragon on July 22, 2005 2:59:56 PM]

Share this post


Link to post
Share on other sites
Oh, I just noticed something. You need to add the light's location to every lookat vector (negX, posX, etc.)

Share this post


Link to post
Share on other sites
Quote:
Original post by Cypher19
Oh, I just noticed something. You need to add the light's location to every lookat vector (negX, posX, etc.)


good call, I'll fix that in the last posting. Seems like all the trivial logical errors has been cleaned up but still not sure if this is the correct way of creating and rendering to a depth cube map.

Share this post


Link to post
Share on other sites
From a logical standpoint, it is, but as I said earlier I would strongly recommend using an R32F colour texture to save the distance of every shadow map texel from the light because you can then use those values later for far better filtering in another shader that the hardware cannot do in fixed function.

Edit: Actually, looking back on it an R16F texture would be just as good. I just automatically run to R32F out of habit :-P

Share this post


Link to post
Share on other sites
ok I changed the format to the one you described so if I understand correctly I should be creating and rendering to the cubemap the same way you are cypher (if we forget about skewbe mapping)?

I want to get it to the point where all I have to worry about is the actual depth comparison part which seems should mostly just be some shader math.

Share this post


Link to post
Share on other sites
Yeah, it looks good except for one detail I just forgot: You'll still want a depth buffer target so that proper z-order is maintained. You'll only need just one, which you can clear on each pass.

Share this post


Link to post
Share on other sites
Quote:
Original post by Cypher19
Yeah, it looks good except for one detail I just forgot: You'll still want a depth buffer target so that proper z-order is maintained. You'll only need just one, which you can clear on each pass.


Hmm am I not already setting depth buffer target for each face when I call device->SetRenderTarget ( cubeFace, ZBuffer ); ?

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement
×

Important Information

By using GameDev.net, you agree to our community Guidelines, Terms of Use, and Privacy Policy.

We are the game development community.

Whether you are an indie, hobbyist, AAA developer, or just trying to learn, GameDev.net is the place for you to learn, share, and connect with the games industry. Learn more About Us or sign up!

Sign me up!