Jump to content

  • Log In with Google      Sign In   
  • Create Account

jonathantompson

Member Since 11 Aug 2009
Offline Last Active Apr 08 2011 02:01 PM

Topics I've Started

SSAO problems! Please help!

24 March 2011 - 09:43 AM

I been tearing my hair out for days trying to get my DX9 SSAO code working. I know that there have been a million posts on this, but dispute reading them 1000 times I'm still having trouble!

I'm using the algorithm described in:

A Simple and Practical Approach to SSAO:

I think the issue stems from my calculation of view space normals and positions. Is the view space position suppose to be normalized?

Here is the code for storing view space normals and positions to two D3DFMT_A32B32G32R32F render targets:

Position and Normal Map Generation:
struct PosNormMap_PSIn
{
 	float4 PosWVP      : POSITION0;  // Homogenious position
 	float4 PosWV   	: TEXCOORD0;  // View space position
 	float4 NormWV      : TEXCOORD1;  // View space normal
};

struct PosNormMap_PSOut // 128 bit buffers
{
 	float4 Pos			: COLOR0;  // View space position
 	float4 Norm        : COLOR1;  // View space normal
};


// Vertex Shader
PosNormMap_PSIn BuildPosNormMapVS(float3 Position : POSITION, 
						  		float3 Normal  : NORMAL0 )
{
    PosNormMap_PSIn Output;
    
    Output.PosWVP = mul(float4(Position, 1.0f), gWVP);
    Output.PosWV  = mul(float4(Position, 1.0f), gWV);
    Output.NormWV = mul(float4(Normal, 0.0f), gWV);
    
    return Output;
}


// Normalize into [0,1]...  Not really necessary for floating point textures, but required for integer textures
float4 NormalToTexture(float4 norm)
{
	return norm * 0.5f + 0.5f; 
}

// Pixel Shader
PosNormMap_PSOut BuildPosNormMapPS(PosNormMap_PSIn Input)
{
	PosNormMap_PSOut Output;
	
	Output.Pos = Input.PosWV;
	//Output.Pos.xy = Output.Pos.xy / Output.Pos.w;
	//Output.Pos.z = linstep(gCameraNearFar.x, gCameraNearFar.y, Output.Pos.z); // Rescale from 0 to 1
	Output.Norm = NormalToTexture(normalize(Input.NormWV)); // Interpolated normals can become unnormal--so normalize., put into [0,1] range
	
    return Output;
}

technique BuildPosNormMap_Tech
{
	pass P0
	{
		vertexShader = compile vs_3_0 BuildPosNormMapVS();
        	pixelShader  = compile ps_3_0 BuildPosNormMapPS();
	}
}


Here is the code for populating the occlusion buffer (D3DFMT_R32F):

uniform extern float2   gVectorNoiseSize; // default: 64x64
uniform extern float2   gScreenSize;
uniform extern float	gSSAOSampleRadius; // default: 0.5-->2.0
uniform extern float    gSSAOBias; // default: 0.05
uniform extern float    gSSAOIntensity; // default: 3.0
uniform extern float    gSSAOScale; // default: 1.0-->2.0


sampler PPSampPosition = sampler_state
{
	Texture = <gPPPosition>;
	MipFilter = POINT;
    MinFilter = POINT;
	MagFilter = POINT;
	MaxAnisotropy = 1;
	AddressU = CLAMP;
	AddressV = CLAMP;
};

sampler PPSampNormal = sampler_state
{
	Texture = <gPPNormal>;
	MipFilter = POINT;
    MinFilter = POINT;
	MagFilter = POINT;
	MaxAnisotropy = 1;
	AddressU = CLAMP;
	AddressV = CLAMP;
};

sampler PPSampVectorNoise = sampler_state
{
	Texture = <gPPVectorNoise>;
	MipFilter = LINEAR;
    MinFilter = LINEAR;
	MagFilter = LINEAR;
	MaxAnisotropy = 1;
	AddressU = WRAP;
	AddressV = WRAP;
};


// Vertex Shader
void OcclusionMap_VS(float3 pos0 		: POSITION,
		             float2 tex0 		: TEXCOORD0,
			         out float4 oPos0    : POSITION0,
			         out float2 oTex0    : TEXCOORD1)
{
	// Pass on texture and position coords to PS
	oTex0 = tex0;
	oPos0 = float4(pos0,1.0f);
}

float3 getPosition(in float2 tex0)
{
    return tex2D(PPSampPosition,tex0).xyz;
}

float3 getNormal(in float2 tex0)
{
    return normalize(tex2D(PPSampNormal, tex0).xyz * 2.0f - 1.0f);
	//return tex2D(PPSampNormal, tex0).xyz * 2.0f - 1.0f;
}

float2 getRandom(in float2 tex0)
{
    return normalize(tex2D(PPSampVectorNoise, gScreenSize * tex0 / gVectorNoiseSize).xy * 2.0f - 1.0f);
}

float doAmbientOcclusion(in float2 tcoord,in float2 tex0, in float3 p, in float3 cnorm)
{
    float3 diff = getPosition(tcoord + tex0) - p;
    const float3 v = normalize(diff);
    const float d = length(diff)*gSSAOScale;
    return max(0.0,dot(cnorm,v)-gSSAOBias)*(1.0/(1.0+d))*gSSAOIntensity;
}


// Pixel Shader
float4 OcclusionMap_PS(float2 tex0 : TEXCOORD1) : COLOR
{
	const float2 vec[4] = { float2(1,0),float2(-1,0),float2(0,1),float2(0,-1) };

    float3 p = getPosition(tex0);
    float3 n = getNormal(tex0);
    float2 rand = getRandom(tex0);
    float ao = 0.0f;
    float rad = gSSAOSampleRadius/p.z;
    //SSAO Calculation
	const int iterations = 4;
    for (int j = 0; j < iterations; ++j)
	{
        float2 coord1 = reflect(vec[j],rand)*rad;
        // float2 coord2 = float2(coord1.x*0.707 - coord1.y*0.707, coord1.x*0.707 + coord1.y*0.707);
        float2 coord2 = float2(coord1.x - coord1.y, coord1.x + coord1.y) * 0.707f;
        ao += doAmbientOcclusion(tex0,coord1*0.25, p, n);
        ao += doAmbientOcclusion(tex0,coord2*0.5, p, n);
        ao += doAmbientOcclusion(tex0,coord1*0.75, p, n);
        ao += doAmbientOcclusion(tex0,coord2, p, n);
    }
 
	ao/=(float)iterations*4.0;
    //END

	return float4(ao, 1.0f, 1.0f, 1.0f);
}


// Technique
technique OcclusionMap_Tech
{
    pass P0
    {
        // Specify the vertex and pixel shader associated with this pass.
        vertexShader = compile vs_3_0 OcclusionMap_VS();
        pixelShader  = compile ps_3_0 OcclusionMap_PS();
    }
}


The normal buffer output looks ok. But the view buffer output looks off. I think each object is close to the camera near plane so (Z << 1). I think that's why everything looks dark.

Also, what Filters should be used for position and normal buffers? When calculating the occlusion buffer I'm rendering a full screen quad the same size as the position and normal buffers, so 1:1 texel mapping. I'm guessing POINT, with CLAMP addressing is correct?

Viewspace Position Buffer (62.5% of original size):
Posted Image


Viewspace Normal Buffer (62.5% of original size):
Posted Image


Occlusion Buffer (62.5% of original size):
Posted Image


Thanks in advance for the help!

DX9 Variance SM - Texture Hardware Filter Issues!

15 March 2011 - 02:00 PM

I am putting together a DX9 renderer with cascade variance shadow maps. Even though the shadows are rendering correctly, I am having trouble getting any sort of texture filtering to work. The aim of course (and the whole point of using the variance shadow maps) is to soften and anti-alias the projected shadow map with hardware sampling... I understand the theory that storing 2 moments and using Chebyshev inequality to approximate shadow percentage allows you to do linear sampling (which can be hardware accelerated).

The shadow map texture format is D3DFMT_G32R32F, though I have tried others... However no matter what sampler_state I include in the effect, I can't seem to get any visible hardware sampling.

I've included all the relevant HLSL code below as well as a few screen shots. I'm using a single low res shadow map to accentuate the aliasing.

Basically, I'm unsure why "Mag/MagFilter = xxx;" (POINT, LINEAR, ANISOTROPIC) doesn't affect the texture sampler.

Any help would be greatly appreciated:

Here is the vertex shader code TO BUILD THE SHADOW MAP:
 Depth_PSIn BuildShadowMapVS(float3 Position : POSITION ) // Object space position
{
    Depth_PSIn Output;
    
    Output.Position = mul(float4(Position, 1), gWVP);
    Output.PosView  = mul(float4(Position, 1), gWV).xyz;
    
    return Output;
}



Here is the pixel shader code TO BUILD THE SHADOW MAP:
float linstep(float min, float max, float v)
{
    return clamp((v - min) / (max - min), 0, 1);
}

// Rescale into [0, 1]
float RescaleDistToLight(float Distance)
{
    return linstep(gLight.nearFar.x, gLight.nearFar.y, Distance);
}

float2 GetFPBias()
{
    //return float2(0.5, 0);
    return float2(0, 0);
}

float2 ComputeMoments(float Depth)
{
	float dx = ddx(Depth);  
	float dy = ddy(Depth);  
	
    // Compute first few moments of depth
    float2 Moments;
    Moments.x = Depth;
    Moments.y = Depth * Depth  + 0.25*(dx*dx + dy*dy); ;

    return Moments;
}



float4 BuildShadowMapPS(Depth_PSIn Input) : COLOR
{
	float Depth = RescaleDistToLight(length(Input.PosView)) + gVSMDepthEpsilon;
	float2 Moments = ComputeMoments(Depth) + GetFPBias();
    return float4(Moments.x, Moments.y, 0.0f, 0.0f);
}


Here is the vertex shader code WHEN RENDERING A TEXTURED MESH:
void MeshTextured_SpotLight_VS(float3 posL 		: POSITION,
					           float3 normalL      : NORMAL0,
					           float2 tex0 		: TEXCOORD0,
					           out float4 oPosH    : POSITION0,
					           out float3 oPosW    : TEXCOORD0,
					           out float3 oNormalW : TEXCOORD1,
					           out float3 oToEyeW  : TEXCOORD2,
					           out float2 oTex0    : TEXCOORD3,
					           out float oSliceDepth : TEXCOORD4)
{
	// Transform to homogeneous clip space.
	oPosH = mul(float4(posL, 1.0f), gWVP);
	
	// Transform vertex position to world space.
	oPosW = mul(float4(posL, 1.0f), gW).xyz;
	
	// Transform normal to world space (assume no non-uniform scaling).
	oNormalW = mul(float4(normalL, 0.0f), gW).xyz;
	
	// Compute the unit vector from the vertex to the eye.
	oToEyeW = gEyePosW - oPosW;
	
	// Pass on texture coords to PS
	oTex0 = tex0;
	
	// Calculate the slice depth
	oSliceDepth = oPosH.z;
}


Here is the pixel shader code WHEN RENDERING A TEXTURED MESH:
// Per-pixel shading.  Diffuse, ambient and specular
void PerPixelShading_SpotLight(	float3 posW, 
								float3 normalW, 
								float3 toEyeW, 
								float3 lightVecW, 
								float4 color,
								out float3 spec, 
								out float3 diffuse, 
								out float3 ambient,
								out float spot)
{
	// Compute the reflection vector.
	float3 r = reflect(-lightVecW, normalW);
	
	// Determine how much (if any) specular light makes it into the eye.
	float t  = pow(max(dot(r, toEyeW), 0.0f), gMtrl.specPower);
	
	// Determine the diffuse light intensity that strikes the vertex.
	float s = max(dot(lightVecW, normalW), 0.0f);
	
	// Compute the ambient, diffuse and specular terms separately. 
	spec = t*(gMtrl.spec*gLight.spec).rgb;
	diffuse = s*(gMtrl.diffuse.rgb*gLight.diffuse.rgb);
	ambient = gMtrl.ambient.rgb*gLight.ambient.rgb;
	
	// Compute spotlight coefficient.
	spot = pow(max( dot(-lightVecW, gLight.dirW), 0.0000001f), gLight.spotPower);
}

// Per-pixel Shadow.
void PerPixelShadowing_SpotLight( float3 posW, 
						  		float sliceDepth, 
						  		float DistToLight,
						  		out int Split,
						  		out float shadowCoeff )
{
	// Compute which split we're in:
	// (slideDepth > dist_0) + (slideDepth > dist_1) + (slideDepth > dist_2) + (slideDepth > dist_3)
    Split = dot(1, sliceDepth > gSplitDistances);
    
    // Project using the associated matrix
    float4 PosInLight = mul(float4(posW, 1), gSplitVPMatrices[Split]);
    float2 LightTexCoord = (PosInLight.xy / PosInLight.w) * float2(0.5, -0.5) + 0.5;

	// SHADOW CODE
	if(gDoShadowing)
	{
		// Sample the correct shadow map
		float2 Moments;
		if(Split == 0)
			Moments = tex2D(ShadowMapS0, LightTexCoord).xy; 
		if(Split == 1)
			Moments = tex2D(ShadowMapS1, LightTexCoord).xy; 
		if(Split == 2)
			Moments = tex2D(ShadowMapS2, LightTexCoord).xy; 
		if(Split == 3)
			Moments = tex2D(ShadowMapS3, LightTexCoord).xy; 
		if(Split == 4)
			Moments = tex2D(ShadowMapS4, LightTexCoord).xy; 
		Moments = Moments + GetFPBias();
		float RescaledDist = RescaleDistToLight(DistToLight);
		
		// VARIANCE SHADOW MAPS
		shadowCoeff = ChebyshevUpperBound(Moments, RescaledDist, gVSMMinVariance);
		shadowCoeff = LBR(shadowCoeff);
	}
	else
	{
		shadowCoeff = 1.0f;
	}
}

float4 MeshTextured_SpotLight_PS(float3 posW    : TEXCOORD0,
                         		float3 normalW : TEXCOORD1,
                         		float3 toEyeW  : TEXCOORD2,
                         		float2 tex0    : TEXCOORD3,
                         		float sliceDepth : TEXCOORD4) : COLOR
{
	// Interpolated normals can become unnormal--so normalize.
	normalW = normalize(normalW);
	toEyeW  = normalize(toEyeW);
	
	// Calculate normalized light vector and distance to light
	float3 lightVecW = gLight.posW - posW;
	float DistToLight = length(lightVecW);
	lightVecW /= DistToLight;
	
	// Sample Texture map.
	float4 texColor = tex2D(TexS, tex0); 
	
	// Calculate per-pixel shading for spot light
	float3 spec;
	float3 diffuse;
	float3 ambient;
	float spot;
	PerPixelShading_SpotLight(posW, normalW, toEyeW, lightVecW, texColor, spec, diffuse, ambient, spot);
	
	// Calculate Shadow
	int Split;
	float shadowCoeff;
	PerPixelShadowing_SpotLight(posW, sliceDepth, DistToLight, Split, shadowCoeff);
	
	// Light/Texture pixel.  Note that shadow coefficient only affects diffuse/spec.
	float3 litColor = spot*(ambient*texColor.rgb + shadowCoeff*(diffuse*texColor.rgb + spec));
	
	// Visualize the splits by adding a linearly interpolated color offset
	if (gVisualizeSplits) {
        litColor = lerp(litColor, SplitColors[Split], 0.5);
    }
	
	return float4(litColor, gMtrl.diffuse.a*texColor.a);
}

POINT Filter, 1 cascade, 512x512 textures, No texture blur:
Posted Image



LINEAR Filter, 1 cascade, 512x512 textures, No texture blur:
Posted Image



ANISOTROPIC Filter, 1 cascade, 512x512 textures, No texture blur:
Posted Image

POINT Filter, 4 cascades, 512x512 textures, 5x5 box blur:
Posted Image

Is "Real-Time Rendering, A.-Moller et. al." a good reference?

06 January 2011 - 03:49 AM

Hey guys,

Does anyone have this book and could they offer some insight into whether or not it is a good purchase. I'm looking for a good reference book for real-time graphics algorithms.

http://www.amazon.com/Real-Time-Rendering-Third-Tomas-Akenine-Moller/dp/1568814240

Is there a better resource?

Thanks,
Jonathan

player controls

09 September 2010 - 11:30 AM

MY QUESTION: WHAT IS THE BEST WAY TO IMPLEMENT PLAYER CONTROLS IN A PHYSICS ENGINE?

BACKGROUND: I'm putting together a quick XNA game.

The physics engine is simply an RK4 integrator + bounding spheres with impulse based collision response.

I'm currently implementing the player controls and I am not entirely sure how to do it. As I see it there are two options;

1. Add as impulse before integration:
a) object.V +=someImpulse
b) RK4Integrate(F,V,etc)

2. Add as a force
a) Attach force to object.
b) RK4Integrate(F,V,etc)

When the player stops moving the object:

a) Find force to stop object in Tstop time.
b) Attach the force.
c) RK4Integrate(F,V,etc)

I can see issues with both approaches.

1. Looks less natural. Player motion is discontinuous. BUT it's easier for the player to control.

2. If the physics time step gets longer than Tstop, there are potential stability issues. Also you need to deal with the fact that the player may never stop (ie, it could oscillate around a point).

WHAT IS THE USUAL APPROACH?

Help with OBB Collision Detection

03 April 2010 - 03:46 AM

I am trying to implement an OBB bounding volume hierarchy as in: S. Gottschalk et. al, "OBBTree: A Hierarchical Structure for Rapid Interference Detection". I have successfully generated the OBB Tree from a polygon soup, but I am having trouble testing OBB nodes for collision. My first question: 1. My collision code is reporting incorrect collisions. I think the issue stems from the fact that in my OBB code each OBB node has a rotation matrix and translation vector to describe orientation and position within the model frame, and then that model frame itself has a rotation and translation within the world frame. In Gottschalk's code, there is only one Rotation and one Translation for each OBB. Please let me know if any of the following is incorrect: My OBB tree is represented by something like this: class obbox { vector3 boxCenter; // Box center in model frame vector3 boxDimension; // Box half lengths matrix3x3 orientMat; // 3 columns are eigenvectors of // covariance matrix. }; But the OBB Tree is part of a rigid body object that has it's own rotation and translation vector: class rbobject { vector3 Trans; // Translation vector3 Rot; // Rotation } I have been using the detection code in Christer Ericson's book, Real-Time Collision Detection (page 103-105): http://books.google.com/books?id=WGpL6Sk9qNAC&lpg=PA101&ots=Pl4MjF4ciO&dq=Real-Time%20Collision%20Detection%20oriented%20bounding%20boxes&pg=PA101#v=onepage&q=Real-Time%20Collision%20Detection%20oriented%20bounding%20boxes&f=false In this, he has one rotation matrix and one translation vector to describe each OBB element. My confusion stems from the fact that I have two rotations and two translations for each OBB node. Instead I use the code below to find the R and T values in Ericson's code: matrix3x3 R_a = rbobject_a.Rot * obbox_a.orientMat; matrix3x3 R_b = rbobject_b.Rot * obbox_b.orientMat; // Matrix describing B in A's coordinate frame // A^-1 * B = A' * B for orthonormal A matrix3x3 R = Transpose(R_a) * R_b; vector3 t_a_world = (rbobject_a.Rot * obbox_a.boxCenter) + rbobject_a.Trans; vector3 t_b_world = (rbobject_b.Rot * obbox_b.boxCenter) + rbobject_b.Trans; vector3 t_a_b_world = t_b_world - t_a_world; // Bring translation vector into a's coordinate frame vector3 t = Transpose(R_a) * t_a_b_world ; Then the rest of the code is the same as in the text book. Is this correct? I can't see anything wrong with the matrix / vector manipulations, but I am getting incorrect collisions reported. My second question: 2. Can you test for leaf node collisions by using an OBB of close to zero thickness and which lies in the plane of the leaf triangle? I can't see any issue with this approach. Maybe the OBB collision test can be simplified if one of the lengths is known to be zero thickness (as described in Gottschalk's thesis), but the detection code should still work. Right? Thanks, Jonathan

PARTNERS