I'm resuscitating this this thread as I seem to be getting a lot closer, but still not close enough for the cigar. Since the last post, I've switched to a viewspace approach and am trying to adapt
this paper by Fabio Policarpo and Francisco Fonseca. I'm fairly certain that my MRT's are correct, but the post-processing is generating some unexpected results. The problem
seems to be that there are some strange directional artifacts, probably in the way I'm handling the post-processing filter (lighting does change when I move the light around, but it also seems to change (along with the black streak artifact when I move the camera around). All in all, combine my rather daft mathematical skills and a certain case of braindeadness and the result is a fine case of confusion.
So, the attached image shows the my buffers as they are right now (diffuse, depth encoded as color and view-space normal) in the MRT windows and the combined result in the main window. Notice the strange lighting artifact. I've no idea what it's from - there's only one light source in the scene and it's on a vertical line at (0, 2.5, 0) while (0, 0, 0) is at the same level as the ground quad at the back (from the middle top corner in the image) edge of the cube.
As the MRT's seem to be right about what one would expect, I'll skip the GBuffer code and only post relevant code for the full screen filter (it computes and includes normals, but those are ignored for clarity in the actual lighting code - eg all the code should do is simple per-fragment attenuation-based lighting). I've also tested the depth encoding and it seems to be just fine.
VERTEX SHADER:
varying vec3 vViewPosition; //this is the unprojected position of the corner of the fullscreen quad passed in as TU1 texture coordinates (see below for the code)
void main( void )
{
gl_Position = ftransform();
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_TexCoord[1] = gl_MultiTexCoord1;
vViewPosition = gl_MultiTexCoord1;
gl_FrontColor = vec4(1.0, 1.0, 1.0, 1.0);
}
FRAGMENT SHADER:
uniform sampler2D tImage0;
uniform sampler2D tImage1;
uniform sampler2D tImage2;
uniform vec3 vEyePosition;
uniform vec3 vEyeDirection;
uniform vec3 lightpos;
uniform vec2 planes;
varying vec3 vViewPosition;
vec3 lighting(vec3 Scolor, vec3 Spos, float Sradius, vec3 p, vec3 n, vec3 Mdiff, vec3 Mspec, float Mshi)
{
vec3 vLightDir = Spos - p;
// vec3 vViewVec = normalize(p);
// vec3 vHalfVec = normalize(vViewVec + vLightDir);
// if(dot(n, normalize(vLightDir)) < 0.0)
// return vec3(1, 0, 0);
// attenuation (equation 2)
float att = clamp(1.0 - length(vLightDir) / Sradius, 0.0, 1.0);
// vLightDir = normalize(vLightDir);
// diffuse and specular terms (equations 3 and 4)
// vec3 Idiff = clamp(dot(vLightDir, n), 0.0, 1.0) * Mdiff * Scolor;
// vec3 Ispec = pow(clamp(dot(vHalfVec, n), 0.0, 1.0), Mshi) * Mspec * Scolor;
// final color (part of equation 1)
//vec3 d = float_to_color((planes.x * p.z + planes.y) / -p.z);
//<<<<SKIP LIGHT COLOR, SPECULAR COMP., NORMALS AND STUFF FOR NOW - USE ONLY INTENSITY>>>
return att * Mdiff;//(Idiff/* + Ispec*/);
}
float color_to_float(vec3 color)
{
const vec3 byte_to_float=vec3(1.0,1.0/256,1.0/(256*256));
return dot(color,byte_to_float);
}
void main( void )
{
vec3 lightcolor = vec3(1, 1, 1);
float depth = color_to_float(texture2D(tImage1, gl_TexCoord[0].xy));
// view dir
vec3 view = normalize(vViewPosition);
// position
vec3 pos;
pos.z = -planes.y / (planes.x + depth);
pos.xy = view.xy / view.z * pos.z;
// normal
vec3 normal = texture2D(tImage2, gl_TexCoord[0].xy) - 0.5;//f3tex2D(normal_map,IN.texcoord)-0.5;
float len = length(normal);
if(len > 0.1)
normal /= len;
else
normal = 0;
// material information
vec3 diffuse = texture2D(tImage0, gl_TexCoord[0].xy);//f3tex2D(diffuse_map,IN.texcoord);
vec4 specular = vec4(0, 0, 0, 1);//f4tex2D(specular_map,IN.texcoord);
// lighting equation (see listing 2)
float fLightRadius = 20.0;
vec3 final_color = lighting(lightcolor, lightpos.xyz, fLightRadius, pos, normal, diffuse, specular.xyz, 0/*specular.w*/);
// return the final color
gl_FragColor = vec4(final_color.xyz, 1.0);
}
There's a bunch of extraneous code in the fragment shader, although it shouldn't be too much.
And finally, the relevant bits from the application side:
//FROM THE FILTER CODE
...
//set the light's position; lightpos is in world space
TVector3D l = lightpos;
IMatrix4f modelview;
glGetFloatv(GL_MODELVIEW_MATRIX, modelview);
l = modelview * l;
shader->SetUniform3f(uniLightPosition, (float)l.x, (float)l.y, (float)l.z);
DrawFullscreenQuad(drv);
...
//AND THE RELEVANT DrawFullscreenQuad() FUNCTION
DrawFullscreenQuad(IVideoDriver * drv)
{
int sx = drv->window->GetWidth();
int sy = drv->window->GetHeight();
int pixels[4][2] = { { 0, 0 }, { 0, sy }, { sx, sy }, { sx, 0} };
int viewport[4] = { 0, 0, sx, sy };
IMatrix4f view_rotation;
//in D3D:
// 1 2 3 4
//1 right.x up.x look.x 0
//2 right.y up.y look.y 0
//3 right.z up.z look.z 0
//4 pos.x pos.y pos.z 1
//in GL:
// 1 2 3 4
//1 right.x right.y right.z pos.x
//2 up.x up.y up.z pos.y
//3 look.x look.y look.z pos.z
//4 0 0 0 1
TVector3D r = drv->camera->Right;
TVector3D l = drv->camera->Orientation;
TVector3D u = drv->camera->UpVector;
TVector3D p = drv->camera->Position;
view_rotation[0 ] = l.x;
view_rotation[1 ] = l.y;
view_rotation[2 ] = l.z;
view_rotation[3 ] = 0;
view_rotation[4 ] = u.x;
view_rotation[5 ] = u.y;
view_rotation[6 ] = u.z;
view_rotation[7 ] = 0;
view_rotation[8 ] = r.x;
view_rotation[9 ] = r.y;
view_rotation[10] = r.z;
view_rotation[11] = 0;
view_rotation[12] = 0;
view_rotation[13] = 0;
view_rotation[14] = 0;
view_rotation[15] = 1;
//setting the translation row to identity
// view_rotation[12] = p.x;
// view_rotation[13] = p.y;
// view_rotation[14] = p.z;
// view_rotation[15] = 1;
double model_matrix[16];
double proj_matrix[16];
glGetDoublev(GL_MODELVIEW_MATRIX, model_matrix);
glGetDoublev(GL_PROJECTION_MATRIX, proj_matrix);
TVector3D v[4];
TVector3D distance;
for(int i = 0; i < 4; i++)
{
gluUnProject(
pixels[0],pixels[1],1,
model_matrix, proj_matrix, viewport,
&distance[0], &distance[1], &distance[2]);
v = distance;
v -= p;
v = VecNormalize(v);
v = view_rotation * v;
}
TVector3D v0 = TVector3D(0, 0, 0);
TVector3D v1 = TVector3D(sx, sy, 0);
drv->Begin(GD_QUADS);
//all calls here are essentially GL equivalents; the first argument of TexCoord3D() is the texture unit
drv->TexCoord3D(0, 0, 1, 0);
//TU1 texcoords are forwarded to the vViewPosition variable in the shader
drv->TexCoord3D(1, v[1]);
drv->Vertex3D(v0);
drv->TexCoord3D(0, 0, 0, 0);
drv->TexCoord3D(1, v[0]);
drv->Vertex3D(v0.x, v1.y, v1.z);
drv->TexCoord3D(0, 1, 0, 0);
drv->TexCoord3D(1, v[3]);
drv->Vertex3D(v1);
drv->TexCoord3D(0, 1, 1, 0);
drv->TexCoord3D(1, v[2]);
drv->Vertex3D(v1.x, v0.y, v0.z);
drv->End();
}
I think that's about it. I'm really not too sure about the code inside DrawFullscreenQuad(). The relevant snippet in the paper looks like this:
00 int pixels[4][2]={ { 0,0 },{0,sy},{sx,sy},{sx,0} };
01 int viewport[4]={ 0,0,sx,sy };
02
03 pMatrix view_rotation = view_matrix;
04 view_rotation.set_translate(0);
05
06 pVector v[4];
07 double d[3];
08 for( int i=0;i<4;i++ )
09 {
10 gluUnProject(
11 pixels[0],pixels[1],10,
12 model_matrix, proj_matrix, viewport,
13 &d[0],&d[1],&d[2]);
14 v.vec((float)d[0],(float)d[1],(float)d[2]);
15 v -= camera.pos;
16 v.normalize();
17 v = v*view_rotation;
18 }
Since I'm not maintaining my own matrices, I'm constructing one at runtime (I'll probably port my code when I get this up and running). Ugh - so, if someone had the patience to read through this and has a clue as to what the problem might be, then I could sure use some help