• Create Account

### #ActualAnfractu0us

Posted 06 January 2013 - 01:50 PM

Still haven't managed to fix it, but that is defintely one of the better explanations I've come across for the technique! I understand the idea a LOT better.

So, here's a giant wall of code!

While I'm thinking about it, I'm using SDL, GLEW, AssImp, and Bullet. I currently have Bullet disabled while I work on the shiny graphics junk.

static double modelView[16];
static double projection[16];
static float tex[16];
// This is matrix transform every coordinate x,y,z
// x = x* 0.5 + 0.5
// y = y* 0.5 + 0.5
// z = z* 0.5 + 0.5
// Moving from unit cube [-1,1] to [0,1]
const GLdouble bias[16] = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0};

// Grab modelview and transformation matrices
glGetDoublev(GL_MODELVIEW_MATRIX, modelView);
glGetDoublev(GL_PROJECTION_MATRIX, projection);

glActiveTexture(GL_TEXTURE7);
glMatrixMode(GL_TEXTURE);

// concatating all matrice into one.
glMultMatrixd (projection);
glMultMatrixd (modelView);
// Go back to normal matrix mode
glMatrixMode(GL_MODELVIEW);

I swapped the glActiveTexture before the glMatrixMode() call so that the data is actually writing to the appropriate texture matrix unit. Otherwise, in terms of FBO and depthmap generation are entirely identical to what's in the tutorial. I suspect after a whole bunch of goofing around that my issues lie in the values getting pushed to the texture matrix.

if(mode == R_NORM || mode == R_DEBUG)
{
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glTexCoordPointer(2, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(0));
glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glClientActiveTexture(GL_TEXTURE0+ model.mats.at(0).texid);

glUniform1i(sdw_loc,c_render.depth_tex);

glActiveTexture(GL_TEXTURE0 + model.mats.at(0).texid);
glBindTexture(GL_TEXTURE_2D, model.mats.at(0).texid);

glActiveTexture(GL_TEXTURE7);
glBindTexture(GL_TEXTURE_2D,c_render.depth_tex);

glDrawElements(GL_TRIANGLES,(model.f_index.size()*3),GL_UNSIGNED_INT,0);

glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glActiveTexture(GL_TEXTURE0);
glClientActiveTexture(GL_TEXTURE0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
c_render.use_program(0);
glBindTexture(GL_TEXTURE_2D,0);
glDisable(GL_TEXTURE_2D);
}
else
if(mode == R_DEPTH)
{
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glDrawElements(GL_TRIANGLES,(model.f_index.size())*3,GL_UNSIGNED_INT,0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
}

Is my rendering code(cut some of the extra crap out for readibility.) I use the section under R_DEPTH to generate the depth map, which is valid from what I can tell, and then R_NORM/R_DEBUG is what renders the scene.

What leads me to think that perhaps it's a texture matrix issue is that gDebugger spits out values that aren't even remotely clamped to 0,1 - which is what I assume they are supposed to be. He didn't mention in the tutorial but I suppose then that if your scene is of a decent scale/shadow map is of a larger resolution that you have to normalize the light proj/view matrix values?

Also, for clarification: the depth map looks like it should from the lights point of view. My texture is indeed initiated with both MIN/MAG filter settings. The one thing that raised a concern for me is that the tutorial code doesn't actually compile and run properly for me; the binary works quite fine but when I tried compiling his code and running it it didn't even create a FBO.

### #8Anfractu0us

Posted 05 January 2013 - 12:40 AM

Still haven't managed to fix it, but that is defintely one of the better explanations I've come across for the technique! I understand the idea a LOT better.

So, here's a giant wall of code!

While I'm thinking about it, I'm using SDL, GLEW, AssImp, and Bullet. I currently have Bullet disabled while I work on the shiny graphics junk.

static double modelView[16];
static double projection[16];
static float tex[16];
// This is matrix transform every coordinate x,y,z
// x = x* 0.5 + 0.5
// y = y* 0.5 + 0.5
// z = z* 0.5 + 0.5
// Moving from unit cube [-1,1] to [0,1]
const GLdouble bias[16] = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0};

// Grab modelview and transformation matrices
glGetDoublev(GL_MODELVIEW_MATRIX, modelView);
glGetDoublev(GL_PROJECTION_MATRIX, projection);

glActiveTexture(GL_TEXTURE7);
glMatrixMode(GL_TEXTURE);

// concatating all matrice into one.
glMultMatrixd (projection);
glMultMatrixd (modelView);
// Go back to normal matrix mode
glMatrixMode(GL_MODELVIEW);

I swapped the glActiveTexture before the glMatrixMode() call so that the data is actually writing to the appropriate texture matrix unit. Otherwise, in terms of FBO and depthmap generation are entirely identical to what's in the tutorial. I suspect after a whole bunch of goofing around that my issues lie in the values getting pushed to the texture matrix.

if(mode == R_NORM || mode == R_DEBUG)
{
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glTexCoordPointer(2, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(0));
glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glClientActiveTexture(GL_TEXTURE0+ model.mats.at(0).texid);

glUniform1i(sdw_loc,c_render.depth_tex);

glActiveTexture(GL_TEXTURE0 + model.mats.at(0).texid);
glBindTexture(GL_TEXTURE_2D, model.mats.at(0).texid);

glActiveTexture(GL_TEXTURE7);
glBindTexture(GL_TEXTURE_2D,c_render.depth_tex);

glDrawElements(GL_TRIANGLES,(model.f_index.size()*3),GL_UNSIGNED_INT,0);

glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glActiveTexture(GL_TEXTURE0);
glClientActiveTexture(GL_TEXTURE0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
c_render.use_program(0);
glBindTexture(GL_TEXTURE_2D,0);
glDisable(GL_TEXTURE_2D);
}
else
if(mode == R_DEPTH)
{
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glDrawElements(GL_TRIANGLES,(model.f_index.size())*3,GL_UNSIGNED_INT,0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
}

Is my rendering code(cut some of the extra crap out for readibility.) I use the section under R_DEPTH to generate the depth map, which is valid from what I can tell, and then R_NORM/R_DEBUG is what renders the scene.

What leads me to think that perhaps it's a texture matrix issue is that gDebugger spits out values that aren't even remotely clamped to 0,1 - which is what I assume they are supposed to be. He didn't mention in the tutorial but I suppose then that if your scene is of a decent scale/shadow map is of a larger resolution that you have to normalize the light proj/view matrix values?

### #7Anfractu0us

Posted 02 January 2013 - 07:03 AM

Still haven't managed to fix it, but that is defintely one of the better explanations I've come across for the technique! I understand the idea a LOT better.

So, here's a giant wall of code!

While I'm thinking about it, I'm using SDL, GLEW, AssImp, and Bullet. I currently have Bullet disabled while I work on the shiny graphics junk.

static double modelView[16];
static double projection[16];
static float tex[16];
// This is matrix transform every coordinate x,y,z
// x = x* 0.5 + 0.5
// y = y* 0.5 + 0.5
// z = z* 0.5 + 0.5
// Moving from unit cube [-1,1] to [0,1]
const GLdouble bias[16] = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0};

// Grab modelview and transformation matrices
glGetDoublev(GL_MODELVIEW_MATRIX, modelView);
glGetDoublev(GL_PROJECTION_MATRIX, projection);

glActiveTexture(GL_TEXTURE7);
glMatrixMode(GL_TEXTURE);

// concatating all matrice into one.
glMultMatrixd (projection);
glMultMatrixd (modelView);
// Go back to normal matrix mode
glMatrixMode(GL_MODELVIEW);

I swapped the glActiveTexture before the glMatrixMode() call so that the data is actually writing to the appropriate texture matrix unit. Otherwise, in terms of FBO and depthmap generation are entirely identical to what's in the tutorial. I suspect after a whole bunch of goofing around that my issues lie in the values getting pushed to the texture matrix.

if(mode == R_NORM || mode == R_DEBUG)
{
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glTexCoordPointer(2, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(0));
glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glClientActiveTexture(GL_TEXTURE0+ model.mats.at(0).texid);

glUniform1i(sdw_loc,c_render.depth_tex);

glActiveTexture(GL_TEXTURE0 + model.mats.at(0).texid);
glBindTexture(GL_TEXTURE_2D, model.mats.at(0).texid);

glActiveTexture(GL_TEXTURE7);
glBindTexture(GL_TEXTURE_2D,c_render.depth_tex);

glDrawElements(GL_TRIANGLES,(model.f_index.size()*3),GL_UNSIGNED_INT,0);

glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glActiveTexture(GL_TEXTURE0);
glClientActiveTexture(GL_TEXTURE0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
c_render.use_program(0);
glBindTexture(GL_TEXTURE_2D,0);
glDisable(GL_TEXTURE_2D);
}
else
if(mode == R_STENCIL)
{
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glDrawElements(GL_TRIANGLES,(model.f_index.size())*3,GL_UNSIGNED_INT,0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
}

Is my rendering code(cut some of the extra crap out for readibility.) I use the section under R_DEPTH to generate the depth map, which is valid from what I can tell, and then R_NORM/R_DEBUG is what renders the scene.

What leads me to think that perhaps it's a texture matrix issue is that gDebugger spits out values that aren't even remotely clamped to 0,1 - which is what I assume they are supposed to be. He didn't mention in the tutorial but I suppose then that if your scene is of a decent scale/shadow map is of a larger resolution that you have to normalize the light proj/view matrix values?

### #6Anfractu0us

Posted 02 January 2013 - 07:02 AM

Still haven't managed to fix it, but that is defintely one of the better explanations I've come across for the technique! I understand the idea a LOT better.

So, here's a giant wall of code!

While I'm thinking about it, I'm using SDL, GLEW, AssImp, and Bullet. I currently have Bullet disabled while I work on the shiny graphics junk.

static double modelView[16];
static double projection[16];
static float tex[16];
// This is matrix transform every coordinate x,y,z
// x = x* 0.5 + 0.5
// y = y* 0.5 + 0.5
// z = z* 0.5 + 0.5
// Moving from unit cube [-1,1] to [0,1]
const GLdouble bias[16] = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0};

// Grab modelview and transformation matrices
glGetDoublev(GL_MODELVIEW_MATRIX, modelView);
glGetDoublev(GL_PROJECTION_MATRIX, projection);

glActiveTexture(GL_TEXTURE7);
glMatrixMode(GL_TEXTURE);

// concatating all matrice into one.
glMultMatrixd (projection);
glMultMatrixd (modelView);
// Go back to normal matrix mode
glMatrixMode(GL_MODELVIEW);

I swapped the glActiveTexture before the glMatrixMode() call so that the data is actually writing to the appropriate texture matrix unit. Otherwise, in terms of FBO and depthmap generation are entirely identical to what's in the tutorial. I suspect after a whole bunch of goofing around that my issues lie in the values getting pushed to the texture matrix.

if(mode == R_NORM || mode == R_DEBUG)
{
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glTexCoordPointer(2, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(0));
glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glClientActiveTexture(GL_TEXTURE0+ model.mats.at(0).texid);

glUniform1i(sdw_loc,c_render.depth_tex);

glActiveTexture(GL_TEXTURE0 + model.mats.at(0).texid);
glBindTexture(GL_TEXTURE_2D, model.mats.at(0).texid);

glActiveTexture(GL_TEXTURE7);
glBindTexture(GL_TEXTURE_2D,c_render.depth_tex);

glDrawElements(GL_TRIANGLES,(model.f_index.size()*3),GL_UNSIGNED_INT,0);

glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glActiveTexture(GL_TEXTURE0);
glClientActiveTexture(GL_TEXTURE0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
c_render.use_program(0);
glBindTexture(GL_TEXTURE_2D,0);
glDisable(GL_TEXTURE_2D);
}
else
if(mode == R_STENCIL)
{
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);

glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));
glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));
glDrawElements(GL_TRIANGLES,(model.f_index.size())*3,GL_UNSIGNED_INT,0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
}}}

Is my rendering code(cut some of the extra crap out for readibility.) I use the section under R_DEPTH to generate the depth map, which is valid from what I can tell, and then R_NORM/R_DEBUG is what renders the scene.

What leads me to think that perhaps it's a texture matrix issue is that gDebugger spits out values that aren't even remotely clamped to 0,1 - which is what I assume they are supposed to be. He didn't mention in the tutorial but I suppose then that if your scene is of a decent scale/shadow map is of a larger resolution that you have to normalize the light proj/view matrix values?

### #5Anfractu0us

Posted 02 January 2013 - 06:58 AM

Still haven't managed to fix it, but that is defintely one of the better explanations I've come across for the technique! I understand the idea a LOT better.

So, here's a giant wall of code!

While I'm thinking about it, I'm using SDL, GLEW, AssImp, and Bullet. I currently have Bullet disabled while I work on the shiny graphics junk.

static double modelView[16];
static double projection[16];
static float tex[16];
// This is matrix transform every coordinate x,y,z
// x = x* 0.5 + 0.5
// y = y* 0.5 + 0.5
// z = z* 0.5 + 0.5
// Moving from unit cube [-1,1] to [0,1]
const GLdouble bias[16] = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0};

// Grab modelview and transformation matrices
glGetDoublev(GL_MODELVIEW_MATRIX, modelView);
glGetDoublev(GL_PROJECTION_MATRIX, projection);

glActiveTexture(GL_TEXTURE7);
glMatrixMode(GL_TEXTURE);

glLoadMatrixd(bias);    // concatating all matrice into one.
glMultMatrixd (projection);
glMultMatrixd (modelView);    // Go back to normal matrix mode
glMatrixMode(GL_MODELVIEW);

I swapped the glActiveTexture before the glMatrixMode() call so that the data is actually writing to the appropriate texture matrix unit. Otherwise, in terms of FBO and depthmap generation are entirely identical to what's in the tutorial. I suspect after a whole bunch of goofing around that my issues lie in the values getting pushed to the texture matrix.

if(mode == R_NORM || mode == R_DEBUG)	{		glEnable(GL_COLOR_MATERIAL);		glEnable(GL_TEXTURE_2D);		glEnableClientState(GL_TEXTURE_COORD_ARRAY);		glEnableClientState(GL_NORMAL_ARRAY);		glEnableClientState(GL_VERTEX_ARRAY);		glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);		glTexCoordPointer(2, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(0));		glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));		glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));		glClientActiveTexture(GL_TEXTURE0+ model.mats.at(0).texid);		c_render.use_program(model.shaderid);		int sdw_loc = glGetUniformLocation(models.shaderid,"ShadowMap");		glUniform1i(sdw_loc,depth_tex);		glActiveTexture(GL_TEXTURE0 + model.mats.at(0).texid);		glBindTexture(GL_TEXTURE_2D, model.mats.at(0).texid);		glActiveTexture(GL_TEXTURE7);		glBindTexture(GL_TEXTURE_2D,c_render.depth_tex);		glDrawElements(GL_TRIANGLES,(model.f_index.size()*3),GL_UNSIGNED_INT,0);		glBindBuffer(GL_ARRAY_BUFFER, 0);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);		glActiveTexture(GL_TEXTURE0);		glClientActiveTexture(GL_TEXTURE0);		glDisableClientState(GL_VERTEX_ARRAY);		glDisableClientState(GL_TEXTURE_COORD_ARRAY);		glDisableClientState(GL_NORMAL_ARRAY);		c_render.use_program(0);		glBindTexture(GL_TEXTURE_2D,0);		glDisable(GL_TEXTURE_2D);	}	if(mode == R_DEPTH)	{		glEnableClientState(GL_NORMAL_ARRAY);		glEnableClientState(GL_VERTEX_ARRAY);		glBindBuffer(GL_ARRAY_BUFFER,models.at(x).t_buff);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,models.at(x).i_buff);		glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));		glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));					glDrawElements(GL_TRIANGLES,(model.f_index.size())*3,GL_UNSIGNED_INT,0);		glBindBuffer(GL_ARRAY_BUFFER, 0);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);		glDisableClientState(GL_VERTEX_ARRAY);		glDisableClientState(GL_NORMAL_ARRAY);	}}

Is my rendering code(cut some of the extra crap out for readibility.) I use the section under R_DEPTH to generate the depth map, which is valid from what I can tell, and then R_NORM/R_DEBUG is what renders the scene.

What leads me to think that perhaps it's a texture matrix issue is that gDebugger spits out values that aren't even remotely clamped to 0,1 - which is what I assume they are supposed to be. He didn't mention in the tutorial but I suppose then that if your scene is of a decent scale/shadow map is of a larger resolution that you have to normalize the light proj/view matrix values?

### #4Anfractu0us

Posted 02 January 2013 - 06:57 AM

Still haven't managed to fix it, but that is defintely one of the better explanations I've come across for the technique! I understand the idea a LOT better.

So, here's a giant wall of code!

While I'm thinking about it, I'm using SDL, GLEW, AssImp, and Bullet. I currently have Bullet disabled while I work on the shiny graphics junk.

static double modelView[16];    static double projection[16];    static float tex[16];        // This is matrix transform every coordinate x,y,z    // x = x* 0.5 + 0.5    // y = y* 0.5 + 0.5    // z = z* 0.5 + 0.5    // Moving from unit cube [-1,1] to [0,1]      const GLdouble bias[16] = {            0.5, 0.0, 0.0, 0.0,        0.0, 0.5, 0.0, 0.0,        0.0, 0.0, 0.5, 0.0,    0.5, 0.5, 0.5, 1.0};        // Grab modelview and transformation matrices    glGetDoublev(GL_MODELVIEW_MATRIX, modelView);    glGetDoublev(GL_PROJECTION_MATRIX, projection);        glActiveTexture(GL_TEXTURE7);    glMatrixMode(GL_TEXTURE);      glLoadMatrixd(bias);    // concatating all matrice into one.    glMultMatrixd (projection);    glMultMatrixd (modelView);    // Go back to normal matrix mode    glMatrixMode(GL_MODELVIEW);

I swapped the glActiveTexture before the glMatrixMode() call so that the data is actually writing to the appropriate texture matrix unit. Otherwise, in terms of FBO and depthmap generation are entirely identical to what's in the tutorial. I suspect after a whole bunch of goofing around that my issues lie in the values getting pushed to the texture matrix.

if(mode == R_NORM || mode == R_DEBUG)	{		glEnable(GL_COLOR_MATERIAL);		glEnable(GL_TEXTURE_2D);		glEnableClientState(GL_TEXTURE_COORD_ARRAY);		glEnableClientState(GL_NORMAL_ARRAY);		glEnableClientState(GL_VERTEX_ARRAY);		glBindBuffer(GL_ARRAY_BUFFER,model.t_buff);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,model.i_buff);		glTexCoordPointer(2, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(0));		glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));		glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));		glClientActiveTexture(GL_TEXTURE0+ model.mats.at(0).texid);		c_render.use_program(model.shaderid);		int sdw_loc = glGetUniformLocation(models.shaderid,"ShadowMap");		glUniform1i(sdw_loc,depth_tex);		glActiveTexture(GL_TEXTURE0 + model.mats.at(0).texid);		glBindTexture(GL_TEXTURE_2D, model.mats.at(0).texid);		glActiveTexture(GL_TEXTURE7);		glBindTexture(GL_TEXTURE_2D,c_render.depth_tex);		glDrawElements(GL_TRIANGLES,(model.f_index.size()*3),GL_UNSIGNED_INT,0);		glBindBuffer(GL_ARRAY_BUFFER, 0);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);		glActiveTexture(GL_TEXTURE0);		glClientActiveTexture(GL_TEXTURE0);		glDisableClientState(GL_VERTEX_ARRAY);		glDisableClientState(GL_TEXTURE_COORD_ARRAY);		glDisableClientState(GL_NORMAL_ARRAY);		c_render.use_program(0);		glBindTexture(GL_TEXTURE_2D,0);		glDisable(GL_TEXTURE_2D);	}	if(mode == R_DEPTH)	{		glEnableClientState(GL_NORMAL_ARRAY);		glEnableClientState(GL_VERTEX_ARRAY);		glBindBuffer(GL_ARRAY_BUFFER,models.at(x).t_buff);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,models.at(x).i_buff);		glNormalPointer(GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*2));		glVertexPointer(3, GL_FLOAT,sizeof(vert),BUFFER_OFFSET(sizeof(GLfloat)*5));					glDrawElements(GL_TRIANGLES,(model.f_index.size())*3,GL_UNSIGNED_INT,0);		glBindBuffer(GL_ARRAY_BUFFER, 0);		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);		glDisableClientState(GL_VERTEX_ARRAY);		glDisableClientState(GL_NORMAL_ARRAY);	}}

Is my rendering code(cut some of the extra crap out for readibility.) I use the section under R_DEPTH to generate the depth map, which is valid from what I can tell, and then R_NORM/R_DEBUG is what renders the scene.

What leads me to think that perhaps it's a texture matrix issue is that gDebugger spits out values that aren't even remotely clamped to 0,1 - which is what I assume they are supposed to be. He didn't mention in the tutorial but I suppose then that if your scene is of a decent scale/shadow map is of a larger resolution that you have to normalize the light proj/view matrix values?

PARTNERS