Raising the Deferred Depth Buffer Reconstruction Bar

Started by
17 comments, last by EnlightenedOne 9 years, 8 months ago

Hi All,

I am a seasoned DX Developer and having scoured the web for decent deferred shader approaches I am trying to adapt samples from the web to the forsaken tongue smile.png

Deferred Rendering Tutorials I have seen:

In my attempt to port the last sample (based on the Second-Depth Anti-Alias sample) I get world space wrapped inside the light. Clearly the primary fault is that the inverse view projection is invalid (this was the worst number of artifacts I could generate rotating around a cube).

glitch.jpg

I intentionally have a very well rounded light sphere to make the distortions clear.

Provided I can get the basics like a world space position I can write beautiful shaders, I am struggling to get this point of reference and without it I feel about 2cm tall.

Because I want this to be available for everyone (I hate the lack of GL sample code!) here is what I have so far (apologies that there is no sample app):

GeomVertShader:


#version 330

in vec3 inPos;
in vec2 inUV;
in vec3 inNormal;
in vec3 inBinormal;
in vec3 inTangent;

uniform mat4 wvpMatrix;

out vec4 mWVPPosition;
out vec3 pNormal;
out vec3 pBinormal;
out vec3 pTangent;
out vec2 texCoord;

void main(void) {
mWVPPosition = wvpMatrix * vec4(inPos, 1.0f);
gl_Position = mWVPPosition;

pNormal = inNormal;
pBinormal = inBinormal;
pTangent = inTangent;

    texCoord = inUV;
}

GeomFragShader


#version 330

in vec4 mWVPPosition;
in vec3 pNormal;
in vec3 pBinormal;
in vec3 pTangent;
in vec2 texCoord;

uniform mat4 wvpMatrix;

uniform sampler2D diffuseTexture;
uniform sampler2D normalTexture;
uniform sampler2D heightTexture;
uniform sampler2D specularTexture;

layout (location = 0) out vec4 colourOut;   
layout (location = 1) out vec4 normalOut;


void main(void) {

vec3 bump = 2 * texture(normalTexture, texCoord).xyz -1;

vec3 normal = pTangent * bump.x + pBinormal * bump.y + pNormal * bump.z;
normal = normalize(normal);

colourOut = texture( diffuseTexture, texCoord );
    // specular intensity
    vec3 specularSample = texture( specularTexture, texCoord ).xyz;
    colourOut.w = ( specularSample.x + specularSample.y + specularSample.z ) / 3;

    normalOut.xyz = normal;
    normalOut.w = 1;
}

PointLightVertShader


#version 330

in vec3 inPos;
in vec2 inUV;

uniform mat4 wvpMatrix;
uniform mat4 ivpMatrix;
uniform vec2 zBounds;
uniform vec3 camPos;
uniform float invRadius;

uniform vec3 lightPos;
uniform vec3 lightColour;
uniform float lightRadius;
uniform float lightFalloff;

out vec4 mWVPPosition;

void main(void) {

	vec3 position = inPos;

	position *= lightRadius;
	position += lightPos;

	mWVPPosition = wvpMatrix * vec4(position, 1.0f);
	gl_Position = mWVPPosition;
}

PointLightFragShader


#version 330

in vec4 mWVPPosition;

uniform mat4 wvpMatrix;
uniform mat4 ivpMatrix;
uniform vec2 zBounds;
uniform vec3 camPos;
uniform float invRadius;

uniform vec3 lightPos;
uniform vec3 lightColour;
uniform float lightRadius;
uniform float lightFalloff;

uniform sampler2D diffuseTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;

layout (location = 0) out vec4 colourOut;

void main(void) {

	vec2 UV = mWVPPosition.xy;
	
	float depth = texture(diffuseTexture, UV).x;

	vec3 addedLight = vec3(0,0,0);

	//if (depth >= zBounds.x && depth <= zBounds.y) 
	{
		
		vec4 diffuseTex = texture(diffuseTexture, UV);
		vec4 normal = texture(normalTexture, UV);

		// Screen-space position
		vec4 cPos = vec4(UV, depth, 1);
		
		// World-space position
		vec4 wPos = ivpMatrix * cPos;
		vec3 pos = wPos.xyz / wPos.w;

		// Lighting vectors
		vec3 lVec = (lightPos - pos) * invRadius;
		vec3 lightVec = normalize(lVec);
		vec3 viewVec = normalize(camPos - pos);

		// Attenuation that falls off to zero at light radius
		float atten = clamp(1.0f - dot(lVec, lVec), 0.0, 1.0);
		atten *= atten;

		// Lighting
		float colDiffuse = clamp(dot(lightVec, normal.xyz), 0, 1);
		float specular_intensity = diffuseTex.w * 0.4f;
		float specular = specular_intensity * pow(clamp(dot(reflect(-viewVec, normal.xyz), lightVec), 0.0, 1.0), 10.0f);

		addedLight = atten * (colDiffuse * diffuseTex.xyz + specular);
	}

	colourOut = vec4(addedLight.xyz, 1);
}

Note that for the moment I am totally ignoring the optimisation of "if (depth >= zBounds.x && depth <= zBounds.y)" because I want to crack the basic reconstruction before experimenting with this.

Shader Binding:


		Matrix4f inverseViewProjection = new Matrix4f();

		Vector3f camPos = cameraController.getActiveCameraPos();
		
		GL20.glUniform3f(shader.getLocCamPos(), camPos.x, camPos.y, camPos.z);
		
		inverseViewProjection = cameraController.getActiveVPMatrixInverse();
		
		//inverseViewProjection = inverseViewProjection.translate(new Vector3f(-1f, 1f, 0));
		//inverseViewProjection = inverseViewProjection.scale(new Vector3f(2, -2, 1));
		
		inverseViewProjection = inverseViewProjection.scale(new Vector3f(1f/engineParams.getDisplayWidth(), 1f/engineParams.getDisplayHeight(), 1));
		
		GL20.glUniformMatrix4(shader.getLocmIVPMatrix(), false, OpenGLHelper.getMatrix4ScratchBuffer(inverseViewProjection));
		
		float nearTest = 0, farTest = 0;
		
		Matrix4f projection = new Matrix4f(cameraController.getCoreCameraProjection());
		
		GL20.glUniformMatrix4(shader.getLocmWVP(), false, OpenGLHelper.getMatrix4ScratchBuffer(cameraController.getActiveViewProjectionMatrix()));
		
		Vector2f zw = new Vector2f(projection.m22, projection.m23);
		
		//Vector4f testLightViewSpace = new Vector4f(lightPos.getX(), lightPos.getY(), lightPos.getZ(), 1);
		
		//testLightViewSpace = OpenGLHelper.columnVectorMultiplyMatrixVector((Matrix4f)cameraController.getActiveCameraView(), testLightViewSpace);
		
		// Compute z-bounds
		Vector4f lPos = OpenGLHelper.columnVectorMultiplyMatrixVector(cameraController.getActiveCameraView(), new Vector4f(lightPos.x, lightPos.y, lightPos.z, 1.0f));
		float z1 = lPos.z + lightRadius;

		//if (z1 > NEAR_DEPTH)
		{
			float z0 = Math.max(lPos.z - lightRadius, NEAR_DEPTH);

			nearTest = (zw.x + zw.y / z0);
			farTest = (zw.x + zw.y / z1);

			if (nearTest > 1) {
				nearTest = 1;
			} else if (nearTest < 0) {
				nearTest = 0;
			}
			
			if (farTest > 1) {
				farTest = 1;
			} else if (farTest < 0) {
				farTest = 0;
			}
			
			GL20.glUniform3f(shader.getLocLightPos(), lightPos.getX(), lightPos.getY(), lightPos.getZ());
			GL20.glUniform3f(shader.getLocLightColour(), lightColour.getX(), lightColour.getY(), lightColour.getZ());
			GL20.glUniform1f(shader.getLocLightRadius(), lightRadius);
			GL20.glUniform1f(shader.getLocInvRadius(), 1f/lightRadius);
			GL20.glUniform1f(shader.getLocLightFalloff(), lightFalloff);
			
			GL20.glUniform2f(shader.getLocZBounds(), nearTest, farTest);
		}

The line "inverseViewProjection = cameraController.getActiveVPMatrixInverse();" depends on the multiplied result of the inverse of these two:

View Matrix


	public void updateViewMatrix(Matrix4f coreViewMatrix) {
		
 		Matrix4f.setIdentity(coreViewMatrix);
		
 		if (lookAtVector.length() != 0) {
 			lookAtVector.normalise();
 		}
		Vector3f.cross(up, lookAtVector, right);
		if (right.length() != 0) {
			right.normalise();
		}
		Vector3f.cross(lookAtVector, right, up);
		if (up.length() != 0) {
			up.normalise();
		}
		
		coreViewMatrix.m00 = right.x;
		coreViewMatrix.m01 = up.x;
		coreViewMatrix.m02 = lookAtVector.x;
		coreViewMatrix.m03 = 0;
		
		coreViewMatrix.m10 = right.y;
		coreViewMatrix.m11 = up.y;
		coreViewMatrix.m12 = lookAtVector.y;
		coreViewMatrix.m13 = 0;
		
		coreViewMatrix.m20 = right.z;
		coreViewMatrix.m21 = up.z;
		coreViewMatrix.m22 = lookAtVector.z;
		coreViewMatrix.m23 = 0;
		
		//Inverse dot from eye position
		coreViewMatrix.m30 = -Vector3f.dot(eyePosition, right);
		coreViewMatrix.m31 = -Vector3f.dot(eyePosition, up);
		coreViewMatrix.m32 = -Vector3f.dot(eyePosition, lookAtVector);
		coreViewMatrix.m33 = 1;
	}

Projection Matrix:


	public static void createProjection(Matrix4f projectionMatrix, float fov, float aspect, float znear, float zfar) {

		float scale = (float) Math.tan((Math.toRadians(fov)) * 0.5f) * znear;
	    float r = aspect * scale;
	    float l = -r;
	    float t = scale;
	    float b = -t;
		
		projectionMatrix.m00 = 2 * znear / (r-l);
		projectionMatrix.m01 = 0;
		projectionMatrix.m02 = 0;
		projectionMatrix.m03 = 0;

		projectionMatrix.m10 = 0;
		projectionMatrix.m11 = 2 * znear / (t-b);
		projectionMatrix.m12 = 0;
		projectionMatrix.m13 = 0;

		projectionMatrix.m20 = (r + l) / (r-l);
		projectionMatrix.m21 = (t+b)/(t-b);
		projectionMatrix.m22 = -(zfar + znear) / (zfar-znear);
		projectionMatrix.m23 = -1;

		projectionMatrix.m30 = 0;
		projectionMatrix.m31 = 0;
		projectionMatrix.m32 = -2 * zfar * znear / (zfar - znear);
		projectionMatrix.m33 = 0;
	}

TLDR:

Please help me diagnose what is wrong with the lighting from the picture/code above, my holy grail is a working sample of true depth reconstruction in OpenGL preferably to world space.

Advertisement

Ok, so after a quick glance over your code, I found the following. Maybe some of it helps:

1. createProjection

I don't know what LinAlg framework you are using, but when specifying elements of a matrix it is customary to denote the row with the first index and the column with the second index. It is also customary in OpenGL to use column vectors. If you are following both customs, then the projection matrix you create is transposed.

2. mWVPPosition

In the fragment shader, mWVPPosition is the projective clip space position of the sample on the light's sphere. You get the euclidean clip space position by dividing


vec3 euclClipSpacePosition = mWVPPosition.xyz / mWVPPosition.w;

This means, that using mWVPPosition.xy as the UV coordinate doesn't make any sense. the correct UV coordinate would be


    vec2 UV = euclClipSpacePosition.xy * 0.5 + vec2(0.5);

3. world space reconstruction

This is the gist of your world space reconstruction:


    // vec2 UV = mWVPPosition.xy; << this is wrong!
    vec2 UV = euclClipSpacePosition.xy * 0.5 + vec2(0.5);

    float depth = texture(diffuseTexture, UV).x;

        // Screen-space position

        vec4 cPos = vec4(UV, depth, 1);

        // World-space position

        vec4 wPos = ivpMatrix * cPos;

Now, I assume that cPos should contain the clip space position (even though you wrote screen space in the comment). Based on how you create your projection matrix, your clip space ranges from -1 to 1 along the x y and z coordinates. However, your UV coordinate only ranges from 0 to 1, same as your depth (I think the depth from a depth texture is normalized to 0-1, not 100% sure though). So, in order to get the "true" clip space position, you need to do


cPos = vec4(UV * 2 - vec2(1, 1), depth*2 - 1, 1);

or, even better


cPos = vec4(euclClipSpacePosition.xy, depth*2 - 1, 1);

If you do that, and if ivpMatrix truly contains vpMatrix^-1 (which it doesn't in your case) then you should get the correct world space position.

4. computing ivpMatrix

You compute the inverse projection matrix as


inverseViewProjection = cameraController.getActiveVPMatrixInverse();
inverseViewProjection = inverseViewProjection.scale(new Vector3f(1f/engineParams.getDisplayWidth(), 1f/engineParams.getDisplayHeight(), 1));

I don't really know, why you would be scaling with the reciprocal display size here. Just skip that part for now.

Now, using the inverse projection view matrix is probably not the fastest way, but it is the easiest, so until you get things running with that method, you should probably not try to do any fancy optimizations. Also, didn't s.b. on this forum create a tutorial on world space reconstruction using the inverse VP matrix?

That was very concise and informative thank you so much!

Also, didn't s.b. on this forum create a tutorial on world space reconstruction using the inverse VP matrix?

I took a look but did not find anything doing a very quick search, I will search again based on the profile, I have seen roughly 4 forum chains on this forum specifically relating to reconstruction.

Point 1, it is [row][column], I am sitting atop the LWJGL maths framework. I experimented with transposing the projection matrix post construction but I noted no visual changes in behaviour.

Point 2, excellent correction, mistake in my understanding.

Point 3, you were absolutely right you also cleaned up my concern about depth normalisation as I knew OpenGL was -1 to 1 I just didnt know when to try and modify elements to best utilise the depth buffer.

Point 4, removing the scalar has got me something more consistent.

I am still trying to identify what I am looking at now:

The point light is creating a cone or event horizon aligned to the angle between the camera and the center of the point light (when viewed from negative aproximately x-1, y0.5, z-1:

pointLightConeToCameraA.jpg

Note that the rotation of the camera appears to have offset the light in a way I had not anticipated.

cameraRotationInfluencingLightB.jpg

I will keep experimenting here are the corrections based on your analysis:

PointLightFragShader:


#version 330

in vec4 mWVPPosition;

uniform mat4 wvpMatrix;
uniform mat4 ivpMatrix;
uniform vec2 zBounds;
uniform vec3 camPos;
uniform float invRadius;

uniform vec3 lightPos;
uniform vec3 lightColour;
uniform float lightRadius;
uniform float lightFalloff;

uniform sampler2D diffuseTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;

layout (location = 0) out vec4 colourOut;

void main(void) {

	vec3 euclClipSpacePosition = mWVPPosition.xyz / mWVPPosition.w;

	vec2 UV = euclClipSpacePosition.xy * 0.5 + vec2(0.5);

	float depth = texture(diffuseTexture, UV).x;

	vec3 addedLight = vec3(0,0,0);

	//if (depth >= zBounds.x && depth <= zBounds.y) 
	{
		
		vec4 diffuseTex = texture(diffuseTexture, UV);
		vec4 normal = texture(normalTexture, UV);

		// Clip-space position
		vec4 cPos = vec4(euclClipSpacePosition.xy, depth*2 -1, 1);
		
		// World-space position
		vec4 wPos = ivpMatrix * cPos;
		vec3 pos = wPos.xyz / wPos.w;

		// Lighting vectors
		vec3 lVec = (lightPos - pos) * invRadius;
		vec3 lightVec = normalize(lVec);
		vec3 viewVec = normalize(camPos - pos);

		// Attenuation that falls off to zero at light radius
		float atten = clamp(1.0f - dot(lVec, lVec), 0.0, 1.0);
		atten *= atten;

		// Lighting
		float colDiffuse = clamp(dot(lightVec, normal.xyz), 0, 1);
		float specular_intensity = diffuseTex.w * 0.4f;
		float specular = specular_intensity * pow(clamp(dot(reflect(-viewVec, normal.xyz), lightVec), 0.0, 1.0), 10.0f);

		addedLight = atten * (colDiffuse * diffuseTex.xyz + specular);
	}

	colourOut = vec4(addedLight.xyz, 1);
}

Shader Binding:


		Matrix4f inverseViewProjection = new Matrix4f();

		Vector3f camPos = cameraController.getActiveCameraPos();
		
		GL20.glUniform3f(shader.getLocCamPos(), camPos.x, camPos.y, camPos.z);
		
		inverseViewProjection = cameraController.getActiveVPMatrixInverse();
		
		//inverseViewProjection = inverseViewProjection.translate(new Vector3f(-1f, 1f, 0));
		//inverseViewProjection = inverseViewProjection.scale(new Vector3f(2, -2, 1));
		
		//inverseViewProjection = inverseViewProjection.scale(new Vector3f(1f/engineParams.getDisplayWidth(), 1f/engineParams.getDisplayHeight(), 1));
		
		GL20.glUniformMatrix4(shader.getLocmIVPMatrix(), false, OpenGLHelper.getMatrix4ScratchBuffer(inverseViewProjection));
		
		float nearTest = 0, farTest = 0;
		
		Matrix4f projection = new Matrix4f(cameraController.getCoreCameraProjection());
		
		GL20.glUniformMatrix4(shader.getLocmWVP(), false, OpenGLHelper.getMatrix4ScratchBuffer(cameraController.getActiveViewProjectionMatrix()));
		
		Vector2f zw = new Vector2f(projection.m22, projection.m23);
		
		//Vector4f testLightViewSpace = new Vector4f(lightPos.getX(), lightPos.getY(), lightPos.getZ(), 1);
		
		//testLightViewSpace = OpenGLHelper.columnVectorMultiplyMatrixVector((Matrix4f)cameraController.getActiveCameraView(), testLightViewSpace);
		
		// Compute z-bounds
		Vector4f lPos = OpenGLHelper.columnVectorMultiplyMatrixVector(cameraController.getActiveCameraView(), new Vector4f(lightPos.x, lightPos.y, lightPos.z, 1.0f));
		float z1 = lPos.z + lightRadius;

		//if (z1 > NEAR_DEPTH)
		{
			float z0 = Math.max(lPos.z - lightRadius, NEAR_DEPTH);

			nearTest = (zw.x + zw.y / z0);
			farTest = (zw.x + zw.y / z1);

			if (nearTest > 1) {
				nearTest = 1;
			} else if (nearTest < 0) {
				nearTest = 0;
			}
			
			if (farTest > 1) {
				farTest = 1;
			} else if (farTest < 0) {
				farTest = 0;
			}
			
			GL20.glUniform3f(shader.getLocLightPos(), lightPos.getX(), lightPos.getY(), lightPos.getZ());
			GL20.glUniform3f(shader.getLocLightColour(), lightColour.getX(), lightColour.getY(), lightColour.getZ());
			GL20.glUniform1f(shader.getLocLightRadius(), lightRadius);
			GL20.glUniform1f(shader.getLocInvRadius(), 1f/lightRadius);
			GL20.glUniform1f(shader.getLocLightFalloff(), lightFalloff);
			
			GL20.glUniform2f(shader.getLocZBounds(), nearTest, farTest);
		}

Now, using the inverse projection view matrix is probably not the fastest way, but it is the easiest, so until you get things running with that method, you should probably not try to do any fancy optimizations

- Absolutely I have seen some of the optimisation tricks but until I have a firm handle I will stick to getting the basics going.

It always helps for debugging, to output individual variables as colors and check, if they behave as expected. For example, after
vec3 pos = wPos.xyz / wPos.w;
you could do a
colourOut = vec4(pos * 0.01, 1); // << play around with that factor until you see s.th.
return;

Looking at your code I see, that you are still scaling the inverse projection view matrix. Is that a copy&paste mistake?


A quick note about "experimenting" with this:
One of the incredibly usefull properties, if not THE most usefull property, of anything math related is that you do not need to "guess", you can "reason" about it. I don't know how good your understanding of projective spaces is,
but if you feel that you don't understand certain aspects of it take the time to read up on it. Later on it will save you the time you invested into it many times over.

I have identified that the point of failure still centers around the camera or camera position in the reconstruction.

I did some diagnostics by looking at the viewVec around the camera in space in the DX sample and the GL sample, the flipped UV seems fine to me, the issue I can see is that when my camera moves the viewVec does not move with it:

DX:

viewSpaceDX.jpg

GL (before camera movement):

viewSpaceGL.jpg

GL (after moving):

viewSpaceGLMoved.jpg

I will keep experimenting to try and correct this, keep me posted if you spot the bug smile.png

Looking at your code I see, that you are still scaling the inverse projection view matrix. Is that a copy&paste mistake?

- Yes sorry about that! Corrected it now.

A quick note about "experimenting" with this:
One of the incredibly usefull properties, if not THE most usefull property, of anything math related is that you do not need to "guess", you can "reason" about it. I don't know how good your understanding of projective spaces is,
but if you feel that you don't understand certain aspects of it take the time to read up on it. Later on it will save you the time you invested into it many times over.

- It is clear my understanding is shakey. I can reason my way out of most scenarios and I know to analyse things in colour as you suggested :) I think that number of potential points of failure for each visual error made me concious that I might miss some subtle nuance and have it cost me more time than I can imagine, but I appreciate the frank fact that I need to have a firm handle on these processes before I move on from this topic. I want to get precomputed atmospheric scattering and this reconstruction is the sticking sore point for me, all my attempts have failed to get it practically going. I wanted a foundation I could refer to as an iron clad solution so I can break it and understand how it comes together from there. It is not my usual approach but it is (in my opinion with my background) faster than a ground up approach with something this complex.

A good example of my rational for trying to approach this from a basic top down is there are so many unknowns, consider this from the original sample:


// Pre-scale-bias the matrix so we can use the screen position directly
	float4x4 viewProjInv = (!viewProj) * (translate(-1.0f, 1.0f, 0.0f) * scale(2.0f, -2.0f, 1.0f));

		renderer->setShaderConstant4x4f("ViewProjInv", viewProjInv * scale(1.0f / width, 1.0f / height, 1.0f));

Why does he have to scale his inverse view projection into what I believe would be defined as NDC before applying it?

Analysis has not gone well my light is still dependent on the viewer position. I have noticed some old unpleasantries from my original delve into reconstruction here are some pictures I am curious to understand. Shader code is at the end all the light values are just attenuation into RGB:

straightDown.jpg

Above is the light at 0,0,0 from 0,0.5,0:

closeUp.jpg

Above is the light from close up (-1,0.5,-1 ish).

clouseUpSide.jpg

Above is the box from roughly 0,0.5,-1 note that the intensity looks relatively accurate, I am not sure quite why the clipping for the light is not much more closely in line with the clipping for the camera (I want to write it off as a precision issue but it is much to early to do so):

Here is a snapshot of the box from various distances:

distanceBehaviour.jpg

  • The corner marked as A is always the same.
  • Top Left - The box at a distance where no lighting is visible far outside the light sphere.
  • Top Right - The box from near the edge of the light sphere, note that the far corners and corner A are becoming lit at the same time.
  • Bottom Left - The box closer (the intensity is much lower than at 0,0,0!) the unpleasant flaw I want to focus on is point x the center of the light.
  • Bottom Right - The box from the same position as the Bottom Left image with the camera turned right, note that the lights projection position has moved relative to the camera tilt in the inverse from the screens center, this behaviour is mirrored in both pitch and yaw via the x and y axis.

Shader code for reference:


	vec3 euclClipSpacePosition = mWVPPosition.xyz / mWVPPosition.w;

	vec2 UV = euclClipSpacePosition.xy * 0.5 + vec2(0.5);

	float depth = texture(diffuseTexture, UV).x * 2 -1;

	vec3 addedLight = vec3(0,0,0);

	vec4 diffuseTex = texture(diffuseTexture, UV);
	vec4 normal = texture(normalTexture, UV);

	// Clip-space position
	vec4 cPos = vec4(euclClipSpacePosition.xy, depth, 1);
	
	// World-space position
	vec4 wPos = ivpMatrix * cPos;
	vec3 pos = wPos.xyz / wPos.w;

	// Lighting vectors, lightPos = 0,0,0, invRadius = 1 for the moment.
	vec3 lVec = (lightPos - pos.xyz) * invRadius;
	vec3 lightVec = normalize(lVec);
	vec3 viewVec = normalize(camPos - pos.xyz);

	// Attenuation that falls off to zero at light radius
	float atten = clamp(1.0f - dot(lVec, lVec), 0.0, 1.0);
	atten *= atten;

Any hints welcome, still chugging along but as you can see above I am not having a good time of it!

Not sure if this was for debugging or s.th., but I think you are sampling the wrong texture:
float depth = texture(diffuseTexture, UV).x * 2 -1; // << Did you mean depthTexture?

A good example of my rational for trying to approach this from a basic top down is there are so many unknowns, consider this from the original sample:

// Pre-scale-bias the matrix so we can use the screen position directly
	float4x4 viewProjInv = (!viewProj) * (translate(-1.0f, 1.0f, 0.0f) * scale(2.0f, -2.0f, 1.0f));

		renderer->setShaderConstant4x4f("ViewProjInv", viewProjInv * scale(1.0f / width, 1.0f / height, 1.0f));
Why does he have to scale his inverse view projection into what I believe would be defined as NDC before applying it?


Well let's try it with the power of math:

vec4 projectiveWorldSpacePosition = viewProjInv * vec4(someVector, depth*2-1, 1);
plugging in the above we get:
projectiveWorldSpacePosition = viewProj^-1 * translate(-1.0f, 1.0f, 0.0f) * scale(2.0f, -2.0f, 1.0f) * scale(1.0f / width, 1.0f / height, 1.0f) * vec4(someVector, depth*2-1, 1);
splitting that up gives us:
A = scale(1.0f / width, 1.0f / height, 1.0f) * vec4(someVector, depth*2-1, 1);
B = translate(-1.0f, 1.0f, 0.0f) * scale(2.0f, -2.0f, 1.0f) * A;
projectiveWorldSpacePosition = viewProj^-1 * B;
since he is dividing the initial vector by the width/height, that would indicate, that he starts out with the pixel location, and not some normalized UV or screen space location. After dividing by width and height you get s.th. similar to a UV coordinate. So A is, in a sense, vec4(UV, depth*2-1, 1). Next he does B = A * vec4(2, -2, 1, 1) + vec4(-1, 1, 0, 0). That is essentially the same operation as cPos = vec4(UV * 2 - vec2(1, 1), depth*2 - 1, 1) only that his y-axis needs mirroring.

So, to round things up, the matrix that gets computed in the code snipped is, contrary to it's name, NOT the inverse view projection matrix. It contains the inverse projection matrix, but in addition some of the scaling I told you to perform in the pixel shader was already "baked" into it and he starts out with a pixel location in the range [0..width-1] and [0..height-1], not a normalized location in the range [-1..1] and [-1..1]. Without looking at his shader code, I would predict that he is using gl_FragCoord instead of a custom interpolant/varying.


I think the key problem with sample code is that many tweaks and variations can be made, and people tend to choose bad names for their variables, and if you try to combine code from multiple samples without actually knowing what is happening there, stuff becomes inconsistent and breaks.


The images of the lit environment sadly don't give me any usable pointers. I haven't looked at your lighting code, only at the world space position reconstruction, so there may be problems in the lighting as well. I would stick with the colored view of the world space position for now. Make sure, that the scene and lights are rendered with the same view projection matrix, and that the inverse view projection matrix used in the lighting pass is actually the inverse of the used view projection matrix (does multiplication give the identity matrix?).
Do the values you bind to the uniforms actually arrive in the shader? Are you binding and sampling the correct textures?

I uncovered a couple of mistakes thanks to your explanation and discovered a key bug whereby an optimisation to my ambient lighting phase got my point light lighting phase normal plumbed in where I had expected my depth texture to be. When the depth reconstruction was corrected I could see the texture in a corner and realised immediately why so many things had gone so wrong and made me doubt some core understanding which whilst shaky was valid.

float depth = texture(diffuseTexture, UV).x * 2 -1; // << Did you mean depthTexture?

- Yes this was intentional experimentation although it echoed the core mistake I had made.

I also just realised that one big cavet is that I have been plumbing a normal map into a bump map which is another reason the behaviour was so peculiar.

I am not out of the woods yet. Here is what I have so far:

normalWorking.jpg

+

normalWorkingB.jpg

The remaining hurdle involves the negative normals of my cube being lit when the light is hitting the back of the surface (as if the normal is the inverse of the way I would like it to be) I have tried many permutations of NBT on the cube face I have left empty but even with no NBT data the flat lighting still behaves as though the face was facing the other way, I am lead to believe it has to a bug in my lighting.

normalsInverse.jpg

The faces toward the camera are becoming unlit as the light approaches what should be their normal. The behaviour is as if both faces on each axis normals were facing toward the positive direction. I am not sure if my home brew Cube is at fault or my shader code:

Here are both for those with interest:

VertGeom:


#version 330

in vec3 inPos;
in vec2 inUV;
in vec3 inNormal;
in vec3 inBinormal;
in vec3 inTangent;

uniform mat4 mWorld; //World matrix of object with all transformations (excluding view and projection)
uniform vec3 v3EyePos; //The location of the camera untransformed
uniform mat4 wvpMatrix;
uniform mat4 mWVMatrix;
uniform mat4 mWUnscaled; //World matrix of object with rotation and movement but no scale
uniform int materialId;

uniform float texDepthScale; //Depth of the parallax map (height map effect in lamens terms)
uniform float texBias; //Bias of the parallax map

out vec4 mWVPPosition;
out vec2 texCoord;
out mat3 tbnMatrix;

void main(void) {
	mWVPPosition = wvpMatrix * vec4(inPos, 1.0f);
	gl_Position = mWVPPosition;

	texCoord = inUV;
	
	vec4 v4WorldPos = mWorld * vec4(inPos, 1.0f);

	

	mat3 mTBNMatrix = mat3(mWUnscaled) * mat3(inTangent, inBinormal, inNormal);

        v3ViewDir = mTBNMatrix * (v3EyePos - v4WorldPos.xyz);

	tbnMatrix = mTBNMatrix;

}

FragGeom:


#version 330

in vec4 mWVPPosition;
in vec2 texCoord;
in vec3 v3ViewDir;
in mat3 tbnMatrix;
in vec3 vNormal;
in vec3 vBinormal;
in vec3 vTangent;

uniform mat4 mWorld; //World matrix of object with all transformations (excluding view and projection)
uniform vec3 v3EyePos; //The location of the camera untransformed
uniform mat4 wvpMatrix;
uniform mat4 mWVMatrix;
uniform mat4 mWUnscaled; //World matrix of object with rotation and movement but no scale
uniform int materialId;

uniform float texDepthScale; //Depth of the parallax map (height map effect in lamens terms)
uniform float texBias; //Bias of the parallax map

uniform sampler2D diffuseTexture;
uniform sampler2D normalTexture;
uniform sampler2D heightTexture;
uniform sampler2D specularTexture;

layout (location = 0) out vec4 colourOut;   
layout (location = 1) out vec4 normalOut;

void main(void) {

	//Determine the height of this pixel
	float fltHeight = texDepthScale * texture(heightTexture, texCoord).r - texBias;

	//Compute the new texture coordinate to use based on the parallax
	vec2 v2UVHeightDisplacement = (fltHeight * v3ViewDir).xy;

	//Create true offset
	v2UVHeightDisplacement += texCoord;

	colourOut = texture(diffuseTexture, v2UVHeightDisplacement);

        //Specular exponent
        vec3 specExp = texture(specularTexture, v2UVHeightDisplacement).rgb;
        colourOut.w = (specExp.x + specExp.y + specExp.z) / 3;

	vec3 normalMap = texture( normalTexture, v2UVHeightDisplacement ).xyz * 2.0f - 1.0f;
	normalMap = tbnMatrix * normalMap;
	normalMap = normalize(normalMap.xyz);
	normalOut.xyz = 0.5f * (normalMap + 1.0f);
	
	normalOut.w = 1;
}

VertPointLight:


#version 330

in vec3 inPos;
in vec2 inUV;

uniform mat4 wvpMatrix;
uniform mat4 ivpMatrix;
uniform mat4 mWVMatrix;
uniform vec2 zBounds;
uniform vec3 camPos;
uniform float invRadius;

uniform vec3 lightPos;
uniform vec3 lightColour;
uniform float lightRadius;
uniform float lightFalloff;

uniform float projNFLinearScalarA;
uniform float projNFLinearScalarB;

out vec4 mWVPPosition;
out vec3 viewRay;

void main(void) {

	vec3 position = inPos;

	position *= lightRadius;
	position += lightPos;

	mWVPPosition = wvpMatrix * vec4(position, 1.0f);
	gl_Position = mWVPPosition;
}

GeomPointLight:


#version 330

in vec4 mWVPPosition;
in vec3 viewRay;

uniform mat4 wvpMatrix;
uniform mat4 ivpMatrix;
uniform mat4 mWVMatrix;
uniform vec2 zBounds;
uniform vec3 camPos;
uniform float invRadius;

uniform vec3 lightPos;
uniform vec3 lightColour;
uniform float lightRadius;
uniform float lightFalloff;

uniform float projNFLinearScalarA;
uniform float projNFLinearScalarB;

uniform sampler2D diffuseTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;

layout (location = 0) out vec4 colourOut;

void main(void) {

	vec3 euclClipSpacePosition = mWVPPosition.xyz / mWVPPosition.w;

	vec2 UV = euclClipSpacePosition.xy * 0.5 + vec2(0.5);

	float depth = texture(depthTexture, UV).x * 2 -1;

	vec3 addedLight = vec3(0,0,0);

	//if (depth >= zBounds.x && depth <= zBounds.y)  
	{
		vec4 diffuseTex = texture(diffuseTexture, UV);
		vec4 normal = texture(normalTexture, UV);//2.0f * texture(normalTexture, UV) - 1.0f;

		// Clip-space position
		vec4 cPos = vec4(euclClipSpacePosition.xy, depth, 1);
		
		// World-space position
		vec4 wPos = ivpMatrix * cPos;
		vec3 pos = wPos.xyz / wPos.w;
	
		// Lighting vectors
		vec3 lVec = (lightPos - pos) * invRadius;
		vec3 lightVec = normalize(lVec);
		vec3 viewVec = normalize(camPos - pos);

		// Attenuation that falls off to zero at light radius
		float atten = clamp(1.0f - dot(lVec, lVec), 0.0f, 1.0f);
		atten *= atten;
		
		// Lighting
		float diffuse = clamp(dot(normal.xyz, lightVec), 0.0f, 1.0f);
		float specular_intensity = diffuseTex.w * 0.4f;
		float specular = specular_intensity * pow(clamp(dot(reflect(-viewVec, normal.xyz), lightVec),0.0f, 1.0f), 10.0f);

		addedLight = (diffuseTex.xyz + specular) * vec3(diffuse,diffuse,diffuse) * atten;// * (diffuse * diffuseTex + specular);

	}

	colourOut = vec4(addedLight.xyz, 1);
}

The cubes vertex data being provided to the shader is an iron clad guarantee. The uncertainty is how badly I have setup the NBT data, I have tried many permutations to flip the normal with no success:


To keep this simple I have just provided indicies and a vertex definition.

		indices = new byte[] {
				0, 1, 2,//Front (this is the negative z face with no normal that I am struggling with)
				2, 3, 0,
				6, 5, 4,//Back
				4, 7, 6,
				8, 9, 10,//Top
				10, 11, 8,
				14, 13, 12,//Bottom
				12, 15, 14,
				16, 17, 18,//Right
				18, 19, 16,
				22, 21, 20,//Left
				20, 23, 22
		};


Verticies for the curious
0 Vert
VertPos: 0.0x, 0.0y, 0.0z
VertUV: 0.0u, 1.0v
VertNormal: 0.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, 0.0z

1 Vert
VertPos: 0.0x, 20.0y, 0.0z
VertUV: 0.0u, 0.0v
VertNormal: 0.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, 0.0z

2 Vert
VertPos: 20.0x, 20.0y, 0.0z
VertUV: -1.0u, 0.0v
VertNormal: 0.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, 0.0z

3 Vert
VertPos: 20.0x, 0.0y, 0.0z
VertUV: -1.0u, 1.0v
VertNormal: 0.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, 0.0z

4 Vert
VertPos: 0.0x, 0.0y, 20.0z
VertUV: 0.0u, 1.0v
VertNormal: 0.0x, 0.0y, 1.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 1.0x, 0.0y, 0.0z

5 Vert
VertPos: 0.0x, 20.0y, 20.0z
VertUV: 0.0u, 0.0v
VertNormal: 0.0x, 0.0y, 1.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 1.0x, 0.0y, 0.0z

6 Vert
VertPos: 20.0x, 20.0y, 20.0z
VertUV: -1.0u, 0.0v
VertNormal: 0.0x, 0.0y, 1.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 1.0x, 0.0y, 0.0z

7 Vert
VertPos: 20.0x, 0.0y, 20.0z
VertUV: -1.0u, 1.0v
VertNormal: 0.0x, 0.0y, 1.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 1.0x, 0.0y, 0.0z

8 Vert
VertPos: 0.0x, 20.0y, 0.0z
VertUV: 0.0u, 1.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

9 Vert
VertPos: 0.0x, 20.0y, 20.0z
VertUV: 0.0u, 0.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

10 Vert
VertPos: 20.0x, 20.0y, 20.0z
VertUV: -1.0u, 0.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

11 Vert
VertPos: 20.0x, 20.0y, 0.0z
VertUV: -1.0u, 1.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

12 Vert
VertPos: 0.0x, 0.0y, 0.0z
VertUV: 0.0u, 1.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

13 Vert
VertPos: 0.0x, 0.0y, 20.0z
VertUV: 0.0u, 0.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

14 Vert
VertPos: 20.0x, 0.0y, 20.0z
VertUV: -1.0u, 0.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

15 Vert
VertPos: 20.0x, 0.0y, 0.0z
VertUV: -1.0u, 1.0v
VertNormal: 0.0x, 1.0y, 0.0z
VertBinormal: -1.0x, 0.0y, 0.0z
VertTangent: 0.0x, 0.0y, -1.0z

16 Vert
VertPos: 0.0x, 20.0y, 0.0z
VertUV: 0.0u, 0.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 0.0x, 0.0y, 1.0z

17 Vert
VertPos: 0.0x, 0.0y, 0.0z
VertUV: 0.0u, 1.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 0.0x, 0.0y, 1.0z

18 Vert
VertPos: 0.0x, 0.0y, 20.0z
VertUV: -1.0u, 1.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 0.0x, 0.0y, 1.0z

19 Vert
VertPos: 0.0x, 20.0y, 20.0z
VertUV: -1.0u, 0.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 1.0y, 0.0z
VertTangent: 0.0x, 0.0y, 1.0z

20 Vert
VertPos: 20.0x, 20.0y, 0.0z
VertUV: 0.0u, 0.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 1.0z
VertTangent: 0.0x, 1.0y, 0.0z

21 Vert
VertPos: 20.0x, 0.0y, 0.0z
VertUV: 0.0u, 1.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 1.0z
VertTangent: 0.0x, 1.0y, 0.0z

22 Vert
VertPos: 20.0x, 0.0y, 20.0z
VertUV: -1.0u, 1.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 1.0z
VertTangent: 0.0x, 1.0y, 0.0z

23 Vert
VertPos: 20.0x, 20.0y, 20.0z
VertUV: -1.0u, 0.0v
VertNormal: 1.0x, 0.0y, 0.0z
VertBinormal: 0.0x, 0.0y, 1.0z
VertTangent: 0.0x, 1.0y, 0.0z

This is effectively world space based depth reconstruction/calculations to provide normal mapping and parallax relief mapping via a deferred approach. Everything appears working bar these normals. I am not yet importing any model data from external formats.

This topic is closed to new replies.

Advertisement