CSM help (problem with cascades/shadowmap) [SOLVED]

Started by
7 comments, last by lipsryme 11 years, 3 months ago

Well so I've finally managed to get the correct projection working.
My problem now is that either the shadow map is not correct or the shader code is not sampling the cascades like it should.
From my observation it looks like it's only sampling from the first cascade and the rest is just black I guess.

I've captured a 30sec video showcasing it:

">



Here's my most recent code:

CPU side calculation:

void Shadows::RenderShadows(FSQ* &fsq,
							std::vector<Primitive*> primitiveList,
							std::vector<ModelObject*> modelList,
							ID3D11SamplerState* &pointSampler,
							LightManager* &lightManager)
{
	ID3D11DeviceContext* context = this->renderer->GetDeviceContext();
	ID3D11DepthStencilView* shadowMapDSV = lightManager->GetDominantDirectionalLight()->GetShadowMapDSV();
	context->ClearDepthStencilView(shadowMapDSV, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);


	// Set RenderTarget
	ID3D11RenderTargetView* nullRenderTargets[D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT] = { NULL };
	context->OMSetRenderTargets(D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT, nullRenderTargets, shadowMapDSV);

	// Set DepthStencil to write enabled
	context->OMSetDepthStencilState(this->writeDepthStencilState, 0);


	// Set Shader
	context->VSSetShader(this->generateShadowMapShader->GetVS(), 0, 0);
	context->PSSetShader(NULL, 0, 0);


	// Set ColorWriteDisabled Blendstate
	float blendFactor[4] = {1, 1, 1, 1};
	context->OMSetBlendState(this->ColorWriteDisabledBlendState, blendFactor, 0xFFFFFFFF);


	//*****************************************************
	// DIRECTIONAL LIGHT SOURCE (CASCADED SHADOW MAPPING)
	//*****************************************************
	DirectionalLight* dominantLight = lightManager->GetDominantDirectionalLight();

	// Cascade offsets
	const XMFLOAT2 Offsets[4] = {
		XMFLOAT2(0.0f, 0.0f),
		XMFLOAT2(0.5f, 0.0f),
		XMFLOAT2(0.5f, 0.5f),
		XMFLOAT2(0.0f, 0.5f)
	};

	const float sMapSize = static_cast<float>(dominantLight->GetShadowMapSize());
	const unsigned int NumCascades = dominantLight->GetNumberCascades();

	// Render meshes to each cascade
	for(UINT cascadeIdx = 0; cascadeIdx < NumCascades; ++cascadeIdx)
	{
		// Set viewport
		D3D11_VIEWPORT viewport;
		viewport.TopLeftX = Offsets[cascadeIdx].x * sMapSize * 2;
		viewport.TopLeftY = Offsets[cascadeIdx].y * sMapSize * 2;
		viewport.Width = static_cast<float>(sMapSize);
		viewport.Height = viewport.Width;
		viewport.MinDepth = 0.0f;
		viewport.MaxDepth = 1.0f;
		context->RSSetViewports(1, &viewport);

		// Compute the cascaded shadow transformations
		dominantLight->CalculateLightProjection(renderer->GetEngine()->GetCamera(), cascadeIdx);


		// Render Shadow Map
		dominantLight->RenderShadowMap(context, primitiveList, modelList);
	}





	// Set input and output off
	ID3D11ShaderResourceView* NullResource = NULL;
	ID3D11RenderTargetView* NullRT = NULL;
	context->OMSetRenderTargets(1, &NullRT, NULL);

	// Set the blend state for opaque objects
	context->OMSetBlendState(0, 0, 0xFFFFFFFF);

	// Set DepthStencil to read only
	context->OMSetDepthStencilState(this->readOnlyDepthStencilState, 0);

	// Set back to original
	context->RSSetViewports(1, &this->renderer->graphicsDesc.viewport);

	// Clear Shadow Accumulation target with white
	ID3D11RenderTargetView* shadowAccRTV = this->shadowAccTarget->GetRenderTargetView();
	context->ClearRenderTargetView(shadowAccRTV, D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f));

	// Set Render Target
	context->OMSetRenderTargets(1, &shadowAccRTV, NULL);

	// Set Shader
	context->VSSetShader(this->accumulateShadowsShader->GetVS(), 0, 0);
	context->PSSetShader(this->accumulateShadowsShader->GetPS(), 0, 0);



	//*************************************************
	// DIRECTIONAL LIGHT SOURCE
	//*************************************************


	// Set Sampler & SRV
	ID3D11ShaderResourceView* shadowMapSRV = dominantLight->GetShadowMapSRV();
	ID3D11SamplerState* shadowMapSampler = dominantLight->GetShadowMapSampler();
	context->PSSetShaderResources(0, 1, &shadowMapSRV);
	context->PSSetSamplers(0, 1, &shadowMapSampler);
	
	ID3D11ShaderResourceView* depthTargetSRV = this->renderer->GetDepthBufferTarget();
	context->PSSetShaderResources(1, 1, &depthTargetSRV);
	context->PSSetSamplers(1, 1, &pointSampler);


	// Set constant buffer
	D3D11_MAPPED_SUBRESOURCE mappedResource;

	// Lock the constant buffer so it can be written to
	context->Map(cbShadowProjection, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);

	// Get a pointer to the data in the constant buffer.
	cbShadowProjectionProperties* pTransformData = (cbShadowProjectionProperties*)mappedResource.pData;


	// Copy the matrices into the constant buffer
	XMVECTOR det;
	XMMATRIX view = this->renderer->GetEngine()->GetCamera()->GetViewMatrixNonTransposed();
	view = XMMatrixInverse(&det, view);
	XMMATRIX shadowMatrices[4];
	for(int i = 0; i < 4; i++)
	{
		// Set Shadow matrices
		//-----------------------------
		shadowMatrices[i] = XMLoadFloat4x4(&dominantLight->GetShadowMatrix(i));

		// Premultiply inverse view with the light's view projection
		shadowMatrices[i] = XMMatrixMultiplyTranspose(view, shadowMatrices[i]);

		XMStoreFloat4x4(&pTransformData->ShadowMatrix[i], shadowMatrices[i]);

		// Set Cascade Splits
		//----------------------
		pTransformData->cascadeSplits[i] = dominantLight->GetShadowCascadeSplit(i);
	}

	XMMATRIX Projection = this->renderer->GetEngine()->GetCamera()->GetProjectionMatrixNonTransposed();
	Projection = XMMatrixInverse(&det, Projection);
	Projection = XMMatrixTranspose(Projection);
	XMStoreFloat4x4(&pTransformData->InverseProjection, Projection);

	pTransformData->shadowMapSize = XMFLOAT2(sMapSize, sMapSize);

	float nearClip = this->renderer->GetEngine()->GetCamera()->GetNearClip();
	float farClip = this->renderer->GetEngine()->GetCamera()->GetFarClip();

	pTransformData->ProjectionA = farClip / (farClip - nearClip);
	pTransformData->ProjectionB = (-farClip * nearClip) / (farClip - nearClip);


	// Unlock the constant buffer
	context->Unmap(this->cbShadowProjection, 0);


	// Set Constant buffer
	context->VSSetConstantBuffers(0, 1, &this->cbShadowProjection);
	context->PSSetConstantBuffers(0, 1, &this->cbShadowProjection);

	// Draw Fullscreen-Quad
	fsq->Draw(context);


	// Set input and output off
	context->PSSetShaderResources(0, 1, &NullResource);
	context->PSSetShaderResources(1, 1, &NullResource);
	context->OMSetRenderTargets(1, &NullRT, NULL);

}

void DirectionalLight::CalculateLightProjection(BaseCamera* camera, UINT &cascadeIdx)
{
	static const float ShadowDist = 0.4f;
	static const float Backup = -1.0f;
	static const float CascadeSplits[4] = { 0.125f, 0.25f, 0.5f, 1.0f };
	static const float Bias = 0.005f;
	static const float nearClip = 1.0f;


	// Get the 8 points of the view frustum in world space
	XMVECTOR frustumCornerWS[8] = 
	{
		XMVectorSet(-1.0f,  1.0f,  0.0f,  1.0f),
		XMVectorSet( 1.0f,  1.0f,  0.0f,  1.0f),
		XMVectorSet( 1.0f, -1.0f,  0.0f,  1.0f),
		XMVectorSet(-1.0f, -1.0f,  0.0f,  1.0f),
		XMVectorSet(-1.0f,  1.0f,  1.0f,  1.0f),
		XMVectorSet( 1.0f,  1.0f,  1.0f,  1.0f),
		XMVectorSet( 1.0f, -1.0f,  1.0f,  1.0f),
		XMVectorSet(-1.0f, -1.0f,  1.0f,  1.0f),
	};

	float prevSplitDist = cascadeIdx == 0 ? 0.0f : CascadeSplits[cascadeIdx - 1] * ShadowDist;
	float splitDist = CascadeSplits[cascadeIdx] * ShadowDist;

	XMVECTOR det;
	XMMATRIX CamViewProj = camera->GetViewProjectionNonTransposed();
	XMMATRIX invViewProj = XMMatrixInverse(&det, CamViewProj);
	for(UINT i = 0; i < 8; ++i)
	{
		frustumCornerWS[i] = XMVector3TransformCoord(frustumCornerWS[i], invViewProj);
	}

	// Scale by the shadow view distance
	for(UINT i = 0; i < 4; ++i)
	{
		XMVECTOR cornerRay = XMVectorSubtract(frustumCornerWS[i + 4], frustumCornerWS[i]);
		XMVECTOR nearCornerRay = XMVectorScale(cornerRay, prevSplitDist);
		XMVECTOR farCornerRay = XMVectorScale(cornerRay, splitDist);
		frustumCornerWS[i + 4] = XMVectorAdd(frustumCornerWS[i], farCornerRay);
		frustumCornerWS[i] = XMVectorAdd(frustumCornerWS[i], nearCornerRay);
	}

	// Calculate the centroid of the view frustum
	XMVECTOR sphereCenterVec = XMVectorZero();
	for(UINT i = 0; i < 8; ++i)
	{
		sphereCenterVec = XMVectorAdd(sphereCenterVec, frustumCornerWS[i]);
	}
	sphereCenterVec = XMVectorScale(sphereCenterVec, 1.0f / 8.0f);

	// Calculate the radius of a bounding sphere
	XMVECTOR sphereRadiusVec = XMVectorZero();
	for(UINT i = 0; i < 8; ++i)
	{
		XMVECTOR dist = XMVector3Length(XMVectorSubtract(frustumCornerWS[i], sphereCenterVec));
		sphereRadiusVec = XMVectorMax(sphereRadiusVec, dist);
	}		

	sphereRadiusVec = XMVectorRound(sphereRadiusVec);
	const float sphereRadius = XMVectorGetX(sphereRadiusVec);
	const float backupDist = sphereRadius + nearClip + Backup;

	// Get Position of the shadow camera
	XMVECTOR shadowCameraPosVec = sphereCenterVec;
	XMVECTOR backupDirVec = XMVectorNegate(XMLoadFloat3(&this->direction));
	backupDirVec = XMVectorScale(backupDirVec, backupDist);
	shadowCameraPosVec = XMVectorAdd(shadowCameraPosVec, backupDirVec);
	XMVECTOR upVec = XMVectorSet(0.0f, 1.0f, 0.0f, 1.0f);

	// Come up witha new orthographic camera for the shadow caster
	XMMATRIX shadowProjection = XMMatrixOrthographicOffCenterLH(-sphereRadius, sphereRadius,
														        -sphereRadius, sphereRadius,
														         nearClip, backupDist + sphereRadius);

	XMMATRIX shadowView = XMMatrixLookAtLH(shadowCameraPosVec, sphereCenterVec, upVec);
	XMMATRIX shadowViewProjection = XMMatrixMultiply(shadowView, shadowProjection);
	
	// Create the rounding matrix, by projecting the world-space origin and determining
	// the fractional offset in texel space
	XMMATRIX shadowMatrix = shadowViewProjection;
	XMVECTOR shadowOrigin = XMVectorSet(0.0f, 0.0f, 0.0f, 1.0f);
	shadowOrigin = XMVector4Transform(shadowOrigin, shadowMatrix);
	shadowOrigin = XMVectorScale(shadowOrigin, this->shadowMapSize / 2.0f);

	XMVECTOR roundedOrigin = XMVectorRound(shadowOrigin);
	XMVECTOR roundOffset = XMVectorSubtract(roundedOrigin, shadowOrigin);
	roundOffset = XMVectorScale(roundOffset, 2.0f / this->shadowMapSize);
	roundOffset = XMVectorSetZ(roundOffset, 0.0f);
	roundOffset = XMVectorSetW(roundOffset, 0.0f);

	shadowProjection.r[3] = XMVectorAdd(shadowProjection.r[3], roundOffset);
	shadowViewProjection = XMMatrixMultiply(shadowView, shadowProjection);
	XMStoreFloat4x4(&this->ViewProjection, shadowViewProjection);
	shadowMatrix = shadowViewProjection;


	// Apply the scale / offset / bias matrix, which transforms from [-1,1]
	// post-projection space to [0,1] UV space
	const float bias = Bias;
	XMMATRIX texScaleBias;
	texScaleBias.r[0] = XMVectorSet(0.5f,  0.0f,  0.0f,  0.0f);
	texScaleBias.r[1] = XMVectorSet(0.0f, -0.5f,  0.0f,  0.0f);
	texScaleBias.r[2] = XMVectorSet(0.0f,  0.0f,  1.0f,  0.0f);
	texScaleBias.r[3] = XMVectorSet(0.5f,  0.5f, -bias,  1.0f);
	shadowMatrix = XMMatrixMultiply(shadowMatrix, texScaleBias);

	// Apply the cascade offset / scale matrix, which applies the offset and scale needed to
	// convert the UV coordinate into the proper coordinate for the cascade being sampled in
	// the atlas

	// Cascade offsets
	const XMFLOAT2 Offsets[4] = {
		XMFLOAT2(0.0f, 0.0f),
		XMFLOAT2(0.5f, 0.0f),
		XMFLOAT2(0.5f, 0.5f),
		XMFLOAT2(0.0f, 0.5f)
	};

	XMFLOAT4 offset(Offsets[cascadeIdx].x, Offsets[cascadeIdx].y, 0.0f, 1.0f);
	XMMATRIX cascadeOffsetMatrix = XMMatrixScaling(0.5f, 0.5f, 1.0f);
	cascadeOffsetMatrix.r[3] = XMLoadFloat4(&offset);
	shadowMatrix = XMMatrixMultiply(shadowMatrix, cascadeOffsetMatrix);
	
	// Store the shadow matrix
	XMStoreFloat4x4(&this->ShadowMatrices[cascadeIdx], shadowMatrix);

	// Store the split distance in terms of view space depth
	const float clipDist = camera->GetFarClip() - camera->GetNearClip();
	this->cascadeSplits[cascadeIdx] = camera->GetNearClip() + splitDist * clipDist;
} 

GPU side computation:


static const uint NumCascades = 4;


struct VSI
{
	float4 Position : POSITION0;
	float2 UV		: TEXCOORD0;
};


struct VSO
{
	float4 Position			: SV_POSITION;
	float2 UV				: TEXCOORD0;
	float3 ViewRay			: TEXCOORD1;
};





cbuffer cbShadowProjection : register(b0)
{
	float4x4 InverseProjection;
	float4x4 ShadowMatrices[4];
	float4 CascadeSplits;

	float ProjectionA;
	float ProjectionB;
	float2 shadowMapSize;
};







// Image texture
Texture2D ShadowMap : register(t0);
SamplerComparisonState ShadowMapSampler : register(s0);

Texture2D DepthTarget : register(t1);
SamplerState PointSampler : register(s1);








VSO VS(VSI input)
{
    VSO output = (VSO)0;

    output.Position = input.Position;
	output.UV = input.UV;

	float3 positionVS = mul(input.Position, InverseProjection);
	output.ViewRay = float3(positionVS.xy / positionVS.z, 1.0f);


    return output;
}





//--------------------------------------------------------------------------------------
// Samples the shadow map cascades based on the world-space position, using edge-tap
// smoothing PCF for filtering
//--------------------------------------------------------------------------------------
float SampleShadowCascade(in float3 positionVS, in uint cascadeIdx)
{
	float4x4 shadowMatrix = ShadowMatrices[cascadeIdx];
	float3 shadowPosition = mul(float4(positionVS, 1.0f), shadowMatrix).xyz;
	float2 shadowTexCoord = shadowPosition.xy;
	float shadowDepth = shadowPosition.z;

	// Edge tap smoothing
	const int Radius = 2;
	const float ShadowMapSize = 2048.0f * 2;
	const int NumSamples = (Radius * 2 + 1) * (Radius * 2 + 1);

	float2 fracs = frac(shadowTexCoord.xy * ShadowMapSize);
	float leftEdge = 1.0f - fracs.x;
	float rightEdge = fracs.x;
	float topEdge = 1.0f - fracs.y;
	float bottomEdge = fracs.y;

	float shadowVisibility = 0.0f;

	[unroll(NumSamples)]
	for (int y = -Radius; y <= Radius; y++)
	{
		[unroll(NumSamples)]
		for (int x = -Radius; x <= Radius; x++)
		{
			float2 offset = float2(x, y) * (1.0f / ShadowMapSize);
			float2 sampleCoord = shadowTexCoord + offset;
			float sample = ShadowMap.SampleCmp(ShadowMapSampler, sampleCoord, shadowDepth).x;

			float xWeight = 1;
			float yWeight = 1;

			if(x == -Radius)
				xWeight = leftEdge;
			else if(x == Radius)
				xWeight = rightEdge;

			if(y == -Radius)
				yWeight = topEdge;
			else if(y == Radius)
				yWeight = bottomEdge;

			shadowVisibility += sample * xWeight * yWeight;
		}
	}

	shadowVisibility  /= NumSamples;
	shadowVisibility *= 1.5f;

	return shadowVisibility;
}




//--------------------------------------------------------------------------------------
// Computes the visibility term by performing the shadow test
//--------------------------------------------------------------------------------------
float3 ShadowVisibility(in float3 positionVS)
{
	float3 shadowVisibility = 1.0f;
	uint cascadeIdx = 0;
	float depthVS = positionVS.z;

	// Figure out which cascade to sample from
	[unroll]
	for(uint i = 0; i < NumCascades - 1; ++i)
	{
		[flatten]
		if(depthVS > CascadeSplits[i])
			cascadeIdx = i + 1;
	}

	shadowVisibility = SampleShadowCascade(positionVS, cascadeIdx);

	// Sample the next cascade, and blend between the two results to
	// smooth the transition
	const float BlendThreshold = 0.1f;
	float nextSplit = CascadeSplits[cascadeIdx];
	float splitSize = i == 0 ? nextSplit : nextSplit - CascadeSplits[cascadeIdx - 1];
	float splitDist = (nextSplit - depthVS) / splitSize;

	float nextSplitVisibility = SampleShadowCascade(positionVS, cascadeIdx + 1);
	float lerpAmt = smoothstep(0.0f, BlendThreshold, splitDist);
	shadowVisibility = lerp(nextSplitVisibility, shadowVisibility, lerpAmt);

	if(cascadeIdx == 0)
	{
		shadowVisibility *= float3(1.0f, 0.0f, 0.0f);
	}
	else if(cascadeIdx == 1)
	{
		shadowVisibility *= float3(0.0f, 1.0f, 0.0f);
	}
	else if(cascadeIdx == 2)
	{
		shadowVisibility *= float3(0.0f, 0.0f, 1.0f);
	}
	else if(cascadeIdx == 3)
	{
		shadowVisibility *= float3(1.0f, 1.0f, 0.0f);
	}
	


	return shadowVisibility;
}





float4 PS(VSO input) : SV_TARGET0
{
	float4 output = float4(0.0f, 0.0f, 0.0f, 1.0f);

	// Reconstruct view space position from depth buffer
	float3 ViewRay = input.ViewRay.xyz;
	float depth = DepthTarget.Sample(PointSampler, input.UV).r;
	float linearDepth = ProjectionB / (depth - ProjectionA);
	float3 PositionVS = ViewRay * linearDepth;

	float3 shadowVisibility = ShadowVisibility(PositionVS);

	return float4(shadowVisibility, 1.0f);
}

I'm so close to having it working now, any idea what' I'm doing wrong ?
I still don't quite understand how the parameters "ShadowDist" and "Backup" have to be set.

Advertisement
why do you want to trasfer it back to worldspace? that two extra matrix multiplications.

keep borh samples in viewspace, since i assume you save the shadow in the a depth texture, and the defferd position in a depth aswell?
then you just the defferd position to the shadow viewspace, and the shadow to viewspace, then compare the depth.

This might not help you solve the issue, but it could simplify your algorithm.
"There will be major features. none to be thought of yet"
So say you have a shadow matrix, such that WorldPosition * ShadowMatrix gives you the position that you want in shadow map space. If you have a view-space position, then you could get the same result by doing ViewPosition * InvViewMatrix * ShadowMatrix. The nice thing about matrices is that you can combine two matrix transformations into a single matrix, which means that if you can pre-multiply InvViewMatrix * ShadowMatrix on the CPU and pass that to the GPU. Then in your shader you just do ViewPosition * ViewShadowMatrix and you'll get the result that you want.

EDIT: I made a rough diagram to illustrate what I'm talking about:

[attachment=12523:Cascades.PNG]

The picture shows a top-down view of the view frustum (the green trapezoid), with two shadow cascades (the red rectangles) whose sizes are marked (which are not to scale). Ignoring the Z dimension for a minute, in terms of X and Y the second cascade is 2x the width and height of the first cascade and is located 1 unit to the right and 0.75 units downwards. Using that, you could project using the first cascade matrix and then apply the appropriate offset and scale to get coordinates in terms of the second cascade. I left out the Z coordinate, but it works the same way.
I've now pre multiplied the shadow matrices with the inverse view(my camera's view) and multiply that with my position in view space.
I still don't quite understand this part:

"in terms of X and Y the second cascade is 2x the width and height of the first cascade and is located 1 unit to the right and 2.25 units upwards. Using that, you could project using the first cascade matrix and then apply the appropriate offset and scale to get coordinates in terms of the second cascade."

I'm having a hard time understanding where those numbers come from and how I would obtain my sample coordinates from that.

Let me go through this process from the beginning.
I calculate the view projection of my light source which is the shadow matrix.
Then I transform these from post-projection space to uv space like this:

// Apply the scale/offset/bias matrix, which transform from [-1, 1]
// post-projection space to [0, 1] UV space
const float bias = Bias;
XMMATRIX texScaleBias;
texScaleBias.r[0] = XMVectorSet(0.5f, 0.0f, 0.0f, 0.0f);
texScaleBias.r[1] = XMVectorSet(0.0f, -0.5f, 0.0f, 0.0f);
texScaleBias.r[2] = XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
texScaleBias.r[3] = XMVectorSet(0.5f, 0.5f, -bias, 1.0f);
shadowMatrix = XMMatrixMultiply(shadowMatrix, texScaleBias);


Now inside the shader function I obtain this shadow matrix(which I pre-multiplied by the inverseView) and multiply it by the view space position of my geometry to basically get my geometry position in the light's space, correct ?
Then I use the xy position of the projected position in shadow space as texture coordinate to sample the shadow map and comparing this with the projected z position. If I'm wrong somewhere what's the mistake I'm making here ?

float SampleShadowCascade(in float3 positionVS, in uint cascadeIdx)
{
float4x4 shadowMatrix = ShadowMatrices[cascadeIdx];
float3 shadowPosition = mul(float4(positionVS, 1.0f), shadowMatrix).xyz;
float2 shadowTexCoord = shadowPosition.xy;
float shadowDepth = shadowPosition.z;

// Edge tap smoothing
const int Radius = 2;
const int NumSamples = (Radius *2 + 1) * (Radius * 2 + 1);

float2 fracs = frac(shadowTexCoord.xy * ShadowMapSize);
float leftEdge = 1.0f - fracs.x;
float rightEdge = fracs.x;
float topEdge = 1.0f - fracs.y;
float bottomEdge = fracs.y;

float shadowVisibility = 0.0f;

[unroll(NumSamples)]
for(int y = -Radius; y <= Radius; y++)
{
[unroll(NumSamples)]
{
for(int x = -Radius; x <= Radius; x++)
{
float2 offset = float2(x, y) * (1.0f / ShadowMapSize);
float2 sampleCoord = shadowTexCoord + offset;
float currentSample = ShadowMap.SampleCmp(ShadowMapSampler, sampleCoord, shadowDepth).x;

float xWeight = 1;
float yWeight = 1;

if(x == -Radius)
{
xWeight = leftEdge;
}
else if(x == Radius)
{
xWeight = rightEdge;
}

if(y == -Radius)
{
yWeight = topEdge;
}
else if(y == Radius)
{
yWeight = bottomEdge;
}

shadowVisibility += currentSample * xWeight * yWeight;
}
}

shadowVisibility /= NumSamples;
shadowVisibility *= 1.5f;
}

return shadowVisibility;
}

"in terms of X and Y the second cascade is 2x the width and height of the first cascade and is located 1 unit to the right and 2.25 units upwards. Using that, you could project using the first cascade matrix and then apply the appropriate offset and scale to get coordinates in terms of the second cascade."

I'm having a hard time understanding where those numbers come from and how I would obtain my sample coordinates from that.


I think he's just using those numbers from the example he drew. Not sure about the 2.25 units upward (seems like 0.75 units upward from the illustration). I *BELIEVE* the point of this example was to show that for subsequent cascades beyond the first, you could obtain the correct values by transforming with cascade 0's matrix and then applying a transform, rather than requiring N cascade matrix transforms.
Yeah that should have been "0.75 units downward", sorry about that.
Alright I understand the concept now. So in code would I calculate the view position for the first shadow matrix and then do something like:

float3 offset = float3(shadowPositionCascade[0].x + 1, shadowPositionCascade[0] + 0.75, somethingHere);
shadowPositionCascade[1] = shadowPositionCascade[0] + offset;


?

Still it sounds like a good optimization but my problem is rather in getting a correct result from all this.
It's probably easier to show this in a video:


It looks like it's still in the wrong space, seeing as it moves around with my camera.
Still not getting anywhere : / any ideas ?

Updated first post (4th Jan 2013)

This topic is closed to new replies.

Advertisement