Hi All,
A bit of a curiosity, I am fascinated by this subject and paper:
http://hal.inria.fr/docs/00/28/87/58/PDF/article.pdf
I have a copy of the shaders for the technique and they all say this:
EnlightenedOne hasn't added any contacts yet.
27 August 2014 - 04:58 PM
Hi All,
A bit of a curiosity, I am fascinated by this subject and paper:
http://hal.inria.fr/docs/00/28/87/58/PDF/article.pdf
I have a copy of the shaders for the technique and they all say this:
01 August 2014 - 07:18 PM
Hi All,
I am a seasoned DX Developer and having scoured the web for decent deferred shader approaches I am trying to adapt samples from the web to the forsaken tongue
Deferred Rendering Tutorials I have seen:
In my attempt to port the last sample (based on the Second-Depth Anti-Alias sample) I get world space wrapped inside the light. Clearly the primary fault is that the inverse view projection is invalid (this was the worst number of artifacts I could generate rotating around a cube).
I intentionally have a very well rounded light sphere to make the distortions clear.
Provided I can get the basics like a world space position I can write beautiful shaders, I am struggling to get this point of reference and without it I feel about 2cm tall.
Because I want this to be available for everyone (I hate the lack of GL sample code!) here is what I have so far (apologies that there is no sample app):
GeomVertShader:
#version 330 in vec3 inPos; in vec2 inUV; in vec3 inNormal; in vec3 inBinormal; in vec3 inTangent; uniform mat4 wvpMatrix; out vec4 mWVPPosition; out vec3 pNormal; out vec3 pBinormal; out vec3 pTangent; out vec2 texCoord; void main(void) { mWVPPosition = wvpMatrix * vec4(inPos, 1.0f); gl_Position = mWVPPosition; pNormal = inNormal; pBinormal = inBinormal; pTangent = inTangent; texCoord = inUV; }
GeomFragShader
#version 330 in vec4 mWVPPosition; in vec3 pNormal; in vec3 pBinormal; in vec3 pTangent; in vec2 texCoord; uniform mat4 wvpMatrix; uniform sampler2D diffuseTexture; uniform sampler2D normalTexture; uniform sampler2D heightTexture; uniform sampler2D specularTexture; layout (location = 0) out vec4 colourOut; layout (location = 1) out vec4 normalOut; void main(void) { vec3 bump = 2 * texture(normalTexture, texCoord).xyz -1; vec3 normal = pTangent * bump.x + pBinormal * bump.y + pNormal * bump.z; normal = normalize(normal); colourOut = texture( diffuseTexture, texCoord ); // specular intensity vec3 specularSample = texture( specularTexture, texCoord ).xyz; colourOut.w = ( specularSample.x + specularSample.y + specularSample.z ) / 3; normalOut.xyz = normal; normalOut.w = 1; }
PointLightVertShader
#version 330 in vec3 inPos; in vec2 inUV; uniform mat4 wvpMatrix; uniform mat4 ivpMatrix; uniform vec2 zBounds; uniform vec3 camPos; uniform float invRadius; uniform vec3 lightPos; uniform vec3 lightColour; uniform float lightRadius; uniform float lightFalloff; out vec4 mWVPPosition; void main(void) { vec3 position = inPos; position *= lightRadius; position += lightPos; mWVPPosition = wvpMatrix * vec4(position, 1.0f); gl_Position = mWVPPosition; }
PointLightFragShader
#version 330 in vec4 mWVPPosition; uniform mat4 wvpMatrix; uniform mat4 ivpMatrix; uniform vec2 zBounds; uniform vec3 camPos; uniform float invRadius; uniform vec3 lightPos; uniform vec3 lightColour; uniform float lightRadius; uniform float lightFalloff; uniform sampler2D diffuseTexture; uniform sampler2D normalTexture; uniform sampler2D depthTexture; layout (location = 0) out vec4 colourOut; void main(void) { vec2 UV = mWVPPosition.xy; float depth = texture(diffuseTexture, UV).x; vec3 addedLight = vec3(0,0,0); //if (depth >= zBounds.x && depth <= zBounds.y) { vec4 diffuseTex = texture(diffuseTexture, UV); vec4 normal = texture(normalTexture, UV); // Screen-space position vec4 cPos = vec4(UV, depth, 1); // World-space position vec4 wPos = ivpMatrix * cPos; vec3 pos = wPos.xyz / wPos.w; // Lighting vectors vec3 lVec = (lightPos - pos) * invRadius; vec3 lightVec = normalize(lVec); vec3 viewVec = normalize(camPos - pos); // Attenuation that falls off to zero at light radius float atten = clamp(1.0f - dot(lVec, lVec), 0.0, 1.0); atten *= atten; // Lighting float colDiffuse = clamp(dot(lightVec, normal.xyz), 0, 1); float specular_intensity = diffuseTex.w * 0.4f; float specular = specular_intensity * pow(clamp(dot(reflect(-viewVec, normal.xyz), lightVec), 0.0, 1.0), 10.0f); addedLight = atten * (colDiffuse * diffuseTex.xyz + specular); } colourOut = vec4(addedLight.xyz, 1); }
Note that for the moment I am totally ignoring the optimisation of "if (depth >= zBounds.x && depth <= zBounds.y)" because I want to crack the basic reconstruction before experimenting with this.
Shader Binding:
Matrix4f inverseViewProjection = new Matrix4f(); Vector3f camPos = cameraController.getActiveCameraPos(); GL20.glUniform3f(shader.getLocCamPos(), camPos.x, camPos.y, camPos.z); inverseViewProjection = cameraController.getActiveVPMatrixInverse(); //inverseViewProjection = inverseViewProjection.translate(new Vector3f(-1f, 1f, 0)); //inverseViewProjection = inverseViewProjection.scale(new Vector3f(2, -2, 1)); inverseViewProjection = inverseViewProjection.scale(new Vector3f(1f/engineParams.getDisplayWidth(), 1f/engineParams.getDisplayHeight(), 1)); GL20.glUniformMatrix4(shader.getLocmIVPMatrix(), false, OpenGLHelper.getMatrix4ScratchBuffer(inverseViewProjection)); float nearTest = 0, farTest = 0; Matrix4f projection = new Matrix4f(cameraController.getCoreCameraProjection()); GL20.glUniformMatrix4(shader.getLocmWVP(), false, OpenGLHelper.getMatrix4ScratchBuffer(cameraController.getActiveViewProjectionMatrix())); Vector2f zw = new Vector2f(projection.m22, projection.m23); //Vector4f testLightViewSpace = new Vector4f(lightPos.getX(), lightPos.getY(), lightPos.getZ(), 1); //testLightViewSpace = OpenGLHelper.columnVectorMultiplyMatrixVector((Matrix4f)cameraController.getActiveCameraView(), testLightViewSpace); // Compute z-bounds Vector4f lPos = OpenGLHelper.columnVectorMultiplyMatrixVector(cameraController.getActiveCameraView(), new Vector4f(lightPos.x, lightPos.y, lightPos.z, 1.0f)); float z1 = lPos.z + lightRadius; //if (z1 > NEAR_DEPTH) { float z0 = Math.max(lPos.z - lightRadius, NEAR_DEPTH); nearTest = (zw.x + zw.y / z0); farTest = (zw.x + zw.y / z1); if (nearTest > 1) { nearTest = 1; } else if (nearTest < 0) { nearTest = 0; } if (farTest > 1) { farTest = 1; } else if (farTest < 0) { farTest = 0; } GL20.glUniform3f(shader.getLocLightPos(), lightPos.getX(), lightPos.getY(), lightPos.getZ()); GL20.glUniform3f(shader.getLocLightColour(), lightColour.getX(), lightColour.getY(), lightColour.getZ()); GL20.glUniform1f(shader.getLocLightRadius(), lightRadius); GL20.glUniform1f(shader.getLocInvRadius(), 1f/lightRadius); GL20.glUniform1f(shader.getLocLightFalloff(), lightFalloff); GL20.glUniform2f(shader.getLocZBounds(), nearTest, farTest); }
The line "inverseViewProjection = cameraController.getActiveVPMatrixInverse();" depends on the multiplied result of the inverse of these two:
View Matrix
public void updateViewMatrix(Matrix4f coreViewMatrix) { Matrix4f.setIdentity(coreViewMatrix); if (lookAtVector.length() != 0) { lookAtVector.normalise(); } Vector3f.cross(up, lookAtVector, right); if (right.length() != 0) { right.normalise(); } Vector3f.cross(lookAtVector, right, up); if (up.length() != 0) { up.normalise(); } coreViewMatrix.m00 = right.x; coreViewMatrix.m01 = up.x; coreViewMatrix.m02 = lookAtVector.x; coreViewMatrix.m03 = 0; coreViewMatrix.m10 = right.y; coreViewMatrix.m11 = up.y; coreViewMatrix.m12 = lookAtVector.y; coreViewMatrix.m13 = 0; coreViewMatrix.m20 = right.z; coreViewMatrix.m21 = up.z; coreViewMatrix.m22 = lookAtVector.z; coreViewMatrix.m23 = 0; //Inverse dot from eye position coreViewMatrix.m30 = -Vector3f.dot(eyePosition, right); coreViewMatrix.m31 = -Vector3f.dot(eyePosition, up); coreViewMatrix.m32 = -Vector3f.dot(eyePosition, lookAtVector); coreViewMatrix.m33 = 1; }
Projection Matrix:
public static void createProjection(Matrix4f projectionMatrix, float fov, float aspect, float znear, float zfar) { float scale = (float) Math.tan((Math.toRadians(fov)) * 0.5f) * znear; float r = aspect * scale; float l = -r; float t = scale; float b = -t; projectionMatrix.m00 = 2 * znear / (r-l); projectionMatrix.m01 = 0; projectionMatrix.m02 = 0; projectionMatrix.m03 = 0; projectionMatrix.m10 = 0; projectionMatrix.m11 = 2 * znear / (t-b); projectionMatrix.m12 = 0; projectionMatrix.m13 = 0; projectionMatrix.m20 = (r + l) / (r-l); projectionMatrix.m21 = (t+b)/(t-b); projectionMatrix.m22 = -(zfar + znear) / (zfar-znear); projectionMatrix.m23 = -1; projectionMatrix.m30 = 0; projectionMatrix.m31 = 0; projectionMatrix.m32 = -2 * zfar * znear / (zfar - znear); projectionMatrix.m33 = 0; }
TLDR:
Please help me diagnose what is wrong with the lighting from the picture/code above, my holy grail is a working sample of true depth reconstruction in OpenGL preferably to world space.
19 April 2014 - 04:25 PM
Hi All,
I am porting/migrating my pipeline from forward to deferred rendering because I want precomputed atmospheric scattering. If I do not learn how to achieve the technique I will never die a happy man (and vice versa ). The goal is:
The paper with it is interesting reading as is prolands source code example, however prolands demo does not actually use a GBuffer so I need to blend their high level behaviour from this DX port to achieve the effect in GLSL 330.
I am binding my render buffer as a texture (24bit 8 bit stencil), I can read and draw it to a quad on screen, I know that the none linear depth values it writes are valid and my shader/model binding process is tried and true (right handed). My current goal is to establish all the fiddly unpleasant transforms required to get from GBuffer post processing exposed and to better understand the most practical way to handle transforms around depth. My first stop was the realisation that if I can get the post process surfacePos below I will be home free as all the lighting is similar it is just the sources used to lookup values that have changed.
Here is the code I am attempting to port, the transforms through various coordinate space I have a loose grasp of, but the part I do not get is how the SV_POSITION translates in GLSL. Changing the gl_FragDepth to try and mimic screen space changes ends badly.
GBUFFER GEOMETRY___________________________________________________
struct VS_OUT {
float4 posH : SV_POSITION;
float3 posW : POSITION;
float3 tangent : TANGENT0;
float3 bitangent : TANGENT1;
float2 texC : TEXCOORD0;
};
...
Vertex shader snippet of interest to position:
output.posH = mul(float4(posWorld, 1.0f), g_viewProj);
output.posH.z = output.posH.z * output.posH.w * g_invFarPlane;
POST PROCESSING_____________________________________________________
Vertex Shader
static const float EPSILON_ATMOSPHERE = 0.002f;
static const float EPSILON_INSCATTER = 0.004f;
Texture2D g_depth;
Texture2D g_color;
Texture2D g_normal;
Texture2D g_texIrradiance;
Texture3D g_texInscatter;
float3 g_cameraPos;
float3 g_sunVector;
float4x4 g_cameraWorld;
float4 g_frustumFar[4];
float4 g_frustumNear[4];
struct VS_IN {
float3 posL : POSITION;
float2 texC : TEXCOORD0;
uint index : TEXCOORD1;
};
struct VS_OUT {
float4 posH : SV_POSITION;
float2 texC : TEXCOORD0;
float3 nearToFar : TEXCOORD2;
float3 cameraToNear : TEXCOORD3;
};
VS_OUT VS(VS_IN input) {
VS_OUT output;
output.posH = float4(input.posL,1.0f);
output.texC = input.texC;
float3 frustumFarWorld = mul(float4(g_frustumFar[input.index].xyz, 1.0f), g_cameraWorld).xyz;
float3 frustumNearWorld = mul(float4(g_frustumNear[input.index].xyz, 1.0f), g_cameraWorld).xyz;
output.cameraToNear = frustumNearWorld - g_cameraPos;
output.nearToFar = frustumFarWorld - frustumNearWorld;
return output;
}
Pixel Shader:
float4 PS_PLANET_DEFERRED(VS_OUT input) : SV_TARGET0 {
// reconstructing world space postion by interpolation
float depthVal = g_depth.SampleLevel( PointSamplerClamp, input.texC, 0 ).r;
float3 surfacePos = g_cameraPos + input.cameraToNear + depthVal * input.nearToFar;
// obtaining the view direction vector
float3 viewDir = normalize(input.nearToFar);
I believe g_cameraWorld is the world rotation of the camera.
http://mynameismjp.wordpress.com/2009/03/10/reconstructing-position-from-depth/
http://web.archive.org/web/20130416194336/http://olivers.posterous.com/linear-depth-in-glsl-for-real
http://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer
http://www.opengl.org/discussion_boards/showthread.php/164734-Deferred-shading/page5
http://www.geeks3d.com/20091216/geexlab-how-to-visualize-the-depth-buffer-in-glsl/
So far I have had no success reconstructing raw depth by blending these snippets input on the problem or testing them in relative isolation, I have a feeling that everyone is tampering with depth output in the geometry buffer but its not very clear in many of the snippets I have found exactly what parameters they are using to do this and why. I am going to try and focus on filling in the gaps from the model above because the resources above suggest it is still an efficient mechanism for solving reconstructing the desirable spaces in most post processes.
Does anyone have a tutorial where this reconstruction process is applied as a holistic piece of functioning code? I would love to see the implementation for these frustum shapes on the near and far plane. I just need to see a proper GBuffer pipeline using depth to reconstruct position and linear depth so I can reverse engineer and inspect its properties to understand the bugs in my own code and move on.
I really cannot wait to play with that effect. If I get my shaders to reconstruct from depth I will post them up and describe the parts that have as of yet confounded me.
I welcome any input.
Many Thanks,
Enlightened One
18 October 2011 - 04:02 PM
glGenVertexArraysOES(1, &puiVAO); glBindVertexArrayOES(puiVAO); if (!puiVbo) puiVbo = new GLuint[modScene.nNumMesh]; if (!puiIndexVbo) puiIndexVbo = new GLuint[modScene.nNumMesh]; glGenBuffers(modScene.nNumMesh, puiVbo); for (unsigned int i = 0; i < modScene.nNumMesh; ++i) { // Load vertex data into buffer object SPODMesh& Mesh = modScene.pMesh[i]; unsigned int uiSize = Mesh.nNumVertex * Mesh.sVertex.nStride; glBindBuffer(GL_ARRAY_BUFFER, puiVbo[i]); glBufferData(GL_ARRAY_BUFFER, uiSize, Mesh.pInterleaved, GL_STATIC_DRAW); // Load index data into buffer object if available puiIndexVbo[i] = 0; if (Mesh.sFaces.pData) { glGenBuffers(1, &puiIndexVbo[i]); uiSize = PVRTModelPODCountIndices(Mesh) * sizeof(GLshort); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, puiIndexVbo[i]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, uiSize, Mesh.sFaces.pData, GL_STATIC_DRAW); } } glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArrayOES(0);
glBindVertexArrayOES(puiVAO); THROWS ERROR! for(unsigned int i = 0; i < modScene.nNumMeshNode; ++i) { SPODMesh& Mesh = modScene.pMesh[i32MeshIndex]; // bind the VBO for the mesh glBindBuffer(GL_ARRAY_BUFFER, puiVbo[i32MeshIndex]); // bind the index buffer, won't hurt if the handle is 0 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, puiIndexVbo[i32MeshIndex]); // Set the vertex attribute offsets glVertexAttribPointer(SNAVERTEX_ARRAY, 3, GL_FLOAT, GL_FALSE, Mesh.sVertex.nStride, Mesh.sVertex.pData); glVertexAttribPointer(SNANORMAL_ARRAY, 3, GL_FLOAT, GL_FALSE, Mesh.sNormals.nStride, Mesh.sNormals.pData); if(Mesh.nNumUVW) // Do we have texture co-ordinates? { glVertexAttribPointer(SNATEXCOORD_ARRAY, 2, GL_FLOAT, GL_FALSE, Mesh.psUVW[0].nStride, Mesh.psUVW[0].pData); } // Draw the Indexed Triangle list glDrawElements(GL_TRIANGLES, Mesh.nNumFaces*3, GL_UNSIGNED_SHORT, 0); } glBindVertexArrayOES(0);
18 October 2011 - 06:15 AM