I really hope somebody can help me here.
But I'm not sure if I'm on the right "debugging track" ...
I load Tga files like in the Nehe tutorial #33.
But I use Streams instead of the FILE * stuff.
And I don't want to use malloc any more.
In the current version I use it and I guess that's where the problem is.
Another difference to the Nehe tutorial is that the image data is stored in class members
(all except for the image data)
if(memcmp( uCompare, &header, 12) == 0 )
{
if( LoadUncompressed( rstrFileName.c_str() ) )
{
glTexImage2D(
GL_TEXTURE_2D, 0, texture[0].type,
texture[0].width, texture[0].height, 0, texture[0].type,
GL_UNSIGNED_BYTE, texture[0].imageData );
free( texture->imageData );
return true;
}
else
{
return false;
}
}
else if(memcmp( cCompare, &header, 12) == 0 )
{
if( LoadCompressed( rstrFileName.c_str() ) )
{
glTexImage2D(
GL_TEXTURE_2D, 0, texture[0].type,
texture[0].width, texture[0].height, 0, texture[0].type,
GL_UNSIGNED_BYTE, texture[0].imageData );
free( texture->imageData );
return true;
}
else
{
return false;
}
}
else
{
Engine->QuitWithError( "TGA Type not supported" );
}
return false;
Then in LoadUncompressed() ...
.
.
.
texture->imageData=(byte*)malloc(tga.imageSize);
.
.
.
do
{
GLubyte chunkheader = 0;
StreamIn.read( (char*)&chunkheader, sizeof(GLubyte) );
if(chunkheader < 128)
{
chunkheader++;
for(short counter = 0; counter < chunkheader; counter++)
{
for( int i = 0; i < tga.bytesPerPixel; i++ )
{
StreamIn.read((char*)(&colorbuffer), sizeof(GLubyte));
}
texture->imageData[currentbyte] = colorbuffer[2];
texture->imageData[currentbyte + 1 ] = colorbuffer[1];
texture->imageData[currentbyte + 2 ] = colorbuffer[0];
if(tga.bytesPerPixel == 4)
{
texture->imageData[currentbyte + 3] = colorbuffer[3];
}
currentbyte += tga.bytesPerPixel;
currentpixel++;
if(currentpixel > pixelcount)
{
return false;
}
}
}
else
{
chunkheader -= 127;
for( int i = 0; i < tga.bytesPerPixel; i++ )
{
StreamIn.read((char*)(&colorbuffer), sizeof(GLubyte) );
}
for( short counter = 0; counter < chunkheader; counter++ )
{
texture->imageData[currentbyte] = colorbuffer[2];
texture->imageData[currentbyte + 1 ] = colorbuffer[1];
texture->imageData[currentbyte + 2 ] = colorbuffer[0];
if(tga.bytesPerPixel == 4)
{
texture->imageData[currentbyte + 3] = colorbuffer[3];
}
currentbyte += tga.bytesPerPixel;
currentpixel++;
if(currentpixel > pixelcount)
{
return false;
}
}
}
}
while(currentpixel < pixelcount);
StreamIn.close();
return true;
Here is what happens:
When I load one image at the right time it works.
When I load it a few lines later (after other resources have been loaded) it crashes.
It still finishes the do-while loop, but for some reason LoadUncompressed() returns false.
Each image has it's own instance.
I guess it would work if I could save the image data in a C++ way.
Maybe it is just a pointer / reference problem ...
Are the arguments for glTexImage2D ok?
Or does anybody know what I could do to narrow down where the problem is?
[Edited by - Clueless on July 8, 2004 7:57:26 AM]