I've been trying to get interleaved arrays working with core OpenGL for almost a day now, and nothing I try seems to work. I dunno if it has to do with the offsets for my vertex attributes, the way I'm representing my data, or what.
I hate to dump code on you all, but I have to show it to you before I can explain what's going wrong:
/*
* Vertex declaration
*/
ke_vertexattribute_t va[] =
{
{ 0, 3, GL_FLOAT, No, 6*sizeof(float), 0 },
{ 1, 3, GL_FLOAT, No, 6*sizeof(float), 3*sizeof(float) },
{ -1, 0, 0, 0, 0, 0 },
};
/*
* Default vertex/fragment programs
*/
char default_vp[] =
"#version 150\n"
"in vec3 in_pos;\n"
"in vec3 in_colour;\n"
"out vec3 out_colour;\n"
"void main(void)\n"
"{\n"
" gl_Position = vec4(in_pos, 1.0);\n"
" out_colour = in_colour;\n"
"}\n";
char default_fp[] =
"#version 150\n"
"in vec3 out_colour;\n"
"out vec4 colour;\n"
"void main(void)\n"
"{\n"
" colour = vec4(out_colour, 1.0);\n"
"}\n";
/*
* Working example
*/
GLfloat vertices[] =
{ -1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 0.0f,
1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f };
device = ke_create_renderdevice(&desc);
vb = new ke_ogl_vertexbuffer_t();
gp = new ke_ogl_gpu_program_t( default_vp, default_fp, NULL, NULL );
vb->set_buffer_data( sizeof( float ) * 18, vertices, No );
device->set_clear_colour_fv( colour );
while( index++ < 60000 )
{
device->clear_colour_buffer();
device->clear_depth_buffer();
device->set_program( gp );
device->set_vertex_buffer( vb );
device->set_vertex_attributes( va );
device->draw_vertices( GL_TRIANGLES, 0, 3 );
device->swap();
}
/*
* Name: ke_ogl_gpu_program_t::ke_ogl_gpu_program_t
* Desc: Initializes a GPU program with the shader sources of a vertex and fragment
* program. Tesselation and geometry programs are optional.
* TODO: Geometry and tesselation programs, allow user defined attribute locations.
*/
ke_ogl_gpu_program_t::ke_ogl_gpu_program_t( char* vertex_program, char* fragment_program, char* tesselation_program, char* geometry_program )
{
/* Set to uninitialized */
initialized = No;
GLuint p, f, v, t, g;
v = glCreateShader(GL_VERTEX_SHADER);
f = glCreateShader(GL_FRAGMENT_SHADER);
g = glCreateShader(GL_GEOMETRY_SHADER);
const char * vv = vertex_program;
const char * ff = fragment_program;
glShaderSource(v, 1, &vv, NULL);
glShaderSource(f, 1, &ff, NULL);
GLint compiled;
glCompileShader(v);
glGetShaderiv(v, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
printf("Vertex shader not compiled.\n");
}
glCompileShader(f);
glGetShaderiv(f, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
printf("Fragment shader not compiled.\n");
}
p = glCreateProgram();
glBindAttribLocation(p, 0, "in_pos");
glBindAttribLocation(p, 2, "in_normal");
glBindAttribLocation(p, 1, "in_colour");
glBindAttribLocation(p, 3, "in_tex1");
glBindAttribLocation(p, 4, "in_tex2");
glBindAttribLocation(p, 5, "in_tex3");
glBindAttribLocation(p, 6, "in_tex4");
glAttachShader(p,v);
glAttachShader(p,f);
glLinkProgram(p);
glUseProgram(p);
glDeleteShader(v);
glDeleteShader(f);
glDeleteShader(g);
program[0] = p;
initialized = Yes;
}
/*
* Name: ke_ogl_vertexbuffer::
* Desc:
*/
ke_ogl_vertexbuffer_t::ke_ogl_vertexbuffer_t()
{
/* Mark as uninitialized until the end */
initialized = No;
buffers[0] = 0;
buffers[1] = 0;
GLenum error = glGetError();
/* Generate vertex buffer object and array */
glGenVertexArrays( 1, &buffers[0] );
error = glGetError();
glGenBuffers( 1, &buffers[1] );
error = glGetError();
initialized = Yes;
}
void ke_ogl_vertexbuffer_t::set_buffer_data( uint32_t buffer_size, void* vertex_data, bool dynamic )
{
GLenum error = glGetError();
glBindVertexArray( buffers[0] );
error = glGetError();
glBindBuffer( GL_ARRAY_BUFFER, buffers[1] );
error = glGetError();
glBufferData( GL_ARRAY_BUFFER, buffer_size, vertex_data, dynamic ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW );
error = glGetError();
glBindVertexArray(0);
}
/*
* Name: ke_ogl_renderdevice::set_vertex_buffer
* Desc: Sets the current vertex buffer to be used when rendering geometry.
*/
void ke_ogl_renderdevice_t::set_vertex_buffer( ke_vertexbuffer_t* vertexbuffer )
{
current_vertexbuffer = vertexbuffer;
}
void ke_ogl_renderdevice_t::set_program( ke_gpu_program_t* gpu_program )
{
/* Save a copy of this program */
current_gpu_program = gpu_program;
glUseProgram( ((uint32_t*)current_gpu_program->get_private_data())[0] );
}
void ke_ogl_renderdevice_t::set_vertex_attributes( ke_vertexattribute_t* vertex_attributes )
{
int i = 0;
/* Copy the vertex attributes */
while( vertex_attributes[i].index != -1 )
{
memmove( ¤t_vertexattribute[i], &vertex_attributes[i], sizeof( ke_vertexattribute_t ) );
i++;
}
}
/*
* Name: ke_ogl_renderdevice::draw_vertices
* Desc: Draws vertices from the current vertex buffer
*/
void ke_ogl_renderdevice_t::draw_vertices( uint32_t primtype, int first, int count )
{
GLenum error = glGetError();
/* Get a handle to the vertex array object */
uint32_t* buffers = (uint32_t*) current_vertexbuffer->get_private_data();
/* Bind the vertex array object and draw the vertices */
glBindVertexArray( buffers[0] );
error = glGetError();
glBindBuffer( GL_ARRAY_BUFFER, buffers[1] );
error = glGetError();
/* Set the vertex attributes before rendering */
for( int i = 0; current_vertexattribute[i].index != -1; i++ )
{
glVertexAttribPointer( current_vertexattribute[i].index,
current_vertexattribute[i].size,
current_vertexattribute[i].type,
current_vertexattribute[i].normalize,
current_vertexattribute[i].stride,
BUFFER_OFFSET( current_vertexattribute[i].offset ) );
glEnableVertexAttribArray(i);
}
glDrawArrays( primtype, first, count );
error = glGetError();
glBindVertexArray(0);
}
Okay, this is what goes on when I run this. If I run this code the way it is, nothing shows up at all. But if I comment out the 2nd line in my vertex declaration, like so:
/*
* Vertex declaration
*/
ke_vertexattribute_t va[] =
{
{ 0, 3, GL_FLOAT, No, 6*sizeof(float), 0 },
// { 1, 3, GL_FLOAT, No, 6*sizeof(float), 3*sizeof(float) },
{ -1, 0, 0, 0, 0, 0 },
};
Then the triangle will render, but solid black. I've checked the vertex attribute thing many times, and I can't see what's wrong with it.
And lastly, I don't want to limit my engine's rendering capabilities by only allowing multiple vertex buffers per attribute (especially since this is meant to be cross platform compatible with Direct3D), so I'm getting interleaved vertex attributes out of the way early. Some say that there's likely to be a performance hit, but after doing a little reasearch, it's clear to me that interleaved vertex data works best when data is aligned properly (or is that only for OpenGL ES?). Any ideas? Thanks.
Shogun