• Advertisement
Sign in to follow this  

OpenGL Texture loading

This topic is 1713 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hi there,

First of all sorry for the noobish question. I just want to render a model with a texture. Without the usage of  texture everything works fine, it I use a texture nothing shows up.

 

Here's the relevant code:

 

In initBuffers

 

	glGenBuffers(1, &vertexTextureCoordBuffer);
	glBindBuffer(GL_ARRAY_BUFFER, vertexTextureCoordBuffer);

	glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat)*loadedModel.textures.size(), &(loadedModel.positions[0]), GL_STATIC_DRAW);
	vertexTextField.itemSize=2;
	vertexTextField.numItems=loadedModel.textures.size() / 2;

 

Then somewhere 

 

	GLuint tex=loadBMP_custom("mypath\green.bmp");

 

 

Then in drawScene

 

		glActiveTexture( GL_TEXTURE0 );
		glBindTexture(GL_TEXTURE_2D, tex);
		glUniform1i(program.samplerUniform, 0);

		glBindBuffer(GL_ARRAY_BUFFER, vertexTextureCoordBuffer);
		glVertexAttribPointer(
			program.textureCoordAttribute,
			vertexTextField.itemSize,
			GL_FLOAT,
			GL_FALSE,
			0,
			(void*)0
			);

 

 

 

Whereas load_BMP is something found on opegl-tutorials.com and here it is:

 


GLuint loadBMP_custom(const char * imagepath){

	printf("Reading image %s\n", imagepath);

	// Data read from the header of the BMP file
	unsigned char header[54];
	unsigned int dataPos;
	unsigned int imageSize;
	unsigned int width, height;
	// Actual RGB data
	unsigned char * data;

	// Open the file
	FILE * file = fopen(imagepath,"rb");
	if (!file)							    {printf("%s could not be opened. Are you in the right directory ? Don't forget to read the FAQ !\n", imagepath); return 0;}

	// Read the header, i.e. the 54 first bytes

	// If less than 54 byes are read, problem
	if ( fread(header, 1, 54, file)!=54 ){ 
		printf("Not a correct BMP file\n");
		return 0;
	}
	// A BMP files always begins with "BM"
	if ( header[0]!='B' || header[1]!='M' ){
		printf("Not a correct BMP file\n");
		return 0;
	}
	// Make sure this is a 24bpp file
	if ( *(int*)&(header[0x1E])!=0  )         {printf("Not a correct BMP file\n");    return 0;}
	if ( *(int*)&(header[0x1C])!=24 )         {printf("Not a correct BMP file\n");    return 0;}

	// Read the information about the image
	dataPos    = *(int*)&(header[0x0A]);
	imageSize  = *(int*)&(header[0x22]);
	width      = *(int*)&(header[0x12]);
	height     = *(int*)&(header[0x16]);

	// Some BMP files are misformatted, guess missing information
	if (imageSize==0)    imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
	if (dataPos==0)      dataPos=54; // The BMP header is done that way

	// Create a buffer
	data = new unsigned char [imageSize];

	// Read the actual data from the file into the buffer
	fread(data,1,imageSize,file);

	// Everything is in memory now, the file wan be closed
	fclose (file);

	// Create one OpenGL texture
	GLuint textureID;
	glGenTextures(1, &textureID);
	
	// "Bind" the newly created texture : all future texture functions will modify this texture
	glBindTexture(GL_TEXTURE_2D, textureID);

	// Give the image to OpenGL
	glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);

	// OpenGL has now copied the data. Free our own version
	delete [] data;

	// Poor filtering, or ...
	//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
	//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); 

	// ... nice trilinear filtering.
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); 
	glGenerateMipmap(GL_TEXTURE_2D);

	// Return the ID of the texture we just created
	return textureID;
}

 

 

 

 

The shaders are: (I know that something is deprecated, but I need those things)

 

 



#version 330 core 

precision mediump float;

varying vec3 vTransformedNormal;
    varying vec2 vTextureCoord;

varying vec4 vPosition;

uniform vec3 uPointLightingLocation;
uniform vec3 uPointLightingDiffuseColor;
uniform vec3 uAmbientColor;
uniform vec3 uColor;

uniform sampler2D uSampler;

void main(void){
  
  vec3 lightWeighting;
  vec3 lightDirection = normalize(uPointLightingLocation - vPosition.xyz);
  vec3 normal = normalize(vTransformedNormal);
  float diffuseLightWeighting=max(dot(normal, lightDirection),0.0);
  lightWeighting=uAmbientColor+uPointLightingDiffuseColor*diffuseLightWeighting;

  vec4 fragmentColor=vec4(uColor.rgb,1.0);
  fragmentColor = texture(uSampler, vTextureCoord);
  gl_FragColor=vec4(fragmentColor.rgb*lightWeighting, fragmentColor.a);


}

#version 330 core 
 
attribute vec2 aTextureCoord;
attribute vec3 aVertexPosition;
attribute vec3 aVertexNormal;

uniform mat3 uNMatrix;
uniform mat4 uPMatrix;
uniform mat4 uMVMatrix;

varying vec4 vPosition;
varying vec3 vTransformedNormal;
varying vec2 vTextureCoord;


void main(void){
	vPosition=uMVMatrix*vec4(aVertexPosition, 1.0);
	vTransformedNormal=uNMatrix*aVertexNormal;
    vTextureCoord = aTextureCoord;

	gl_Position=uPMatrix*vPosition;
}

 

 

Thank you so much :D 

Edited by cifa

Share this post


Link to post
Share on other sites
Advertisement

i just wonder where is glEnable(GL_TEXUTRE_2D);     ?

 

tell me if that works ;]

 

Nope, not working :( 

Thank you anyway

Share this post


Link to post
Share on other sites

What do you mean with "nothing shows up"? Does stuff render on the screen if you replace the output color with red for example? I'd suggest to run your program with gDebugger, you can check with it that your texture is loaded correctly and shaders linked. And does that "mypath\green.bmp" really work? \ is used for escape sequences, so I'd suggest you to change it to /

Share this post


Link to post
Share on other sites

i just wonder where is glEnable(GL_TEXUTRE_2D);     ?

 

tell me if that works ;]

 

This is not used with shaders.

 

Some things you can try:

 

As Sponji said, this line is bad:

 

	GLuint tex=loadBMP_custom("mypath\green.bmp");

 

Instead of "mypath\green.bmp" use "mypath\\green.bmp" or "mypath/green.bmp" - the compiler will interpret "\g" as an escape sequence.  The fact that you don't appear ro be error checking this is not a good sign; at the very least you should be checking that you are actually getting a non-zero return value from your loadBMP_custom function.  I wonder what compiler you're using that you're not getting at least a warning from trying to use "\g"?

 

Reduce your fragment shader to an absolute minimum; i.e just a "gl_FragColor = texture(uSampler, vTextureCoord);" line.  If this works then your texture loading is OK and the problem is in your lighting code.  Note that I'm not recommending this as a solution - it's a testing/debugging step.

 

Check that your shaders have compiled and linked successfully.

 

Double-check your source bitmap image.  Is it a power of two?  Is it's width * bpp a multiple of 4?  These are all things that can cause problems.

 

Sprinkle some "assert (glGetError () == GL_NO_ERROR);" calls at key points in your code - after texture loading, shader compilation, vertex setup, etc.  Run in the debugger and ensure that everything works without internal GL errors.

Share this post


Link to post
Share on other sites

What do you mean with "nothing shows up"? Does stuff render on the screen if you replace the output color with red for example? I'd suggest to run your program with gDebugger, you can check with it that your texture is loaded correctly and shaders linked. And does that "mypath\green.bmp" really work? \ is used for escape sequences, so I'd suggest you to change it to /

 
 
Yup, if I switch program to one with a fragment shader that output a color, it works.
The path string is a not the real one, it is correct in code biggrin.png
 


i just wonder where is glEnable(GL_TEXUTRE_2D);     ?
 
tell me if that works ;]

 
This is not used with shaders.
 
Some things you can try:
 
As Sponji said, this line is bad:
 
	GLuint tex=loadBMP_custom("mypath\green.bmp");
 
Instead of "mypath\green.bmp" use "mypath\\green.bmp" or "mypath/green.bmp" - the compiler will interpret "\g" as an escape sequence.  The fact that you don't appear ro be error checking this is not a good sign; at the very least you should be checking that you are actually getting a non-zero return value from your loadBMP_custom function.  I wonder what compiler you're using that you're not getting at least a warning from trying to use "\g"?
 
Reduce your fragment shader to an absolute minimum; i.e just a "gl_FragColor = texture(uSampler, vTextureCoord);" line.  If this works then your texture loading is OK and the problem is in your lighting code.  Note that I'm not recommending this as a solution - it's a testing/debugging step.
 
Check that your shaders have compiled and linked successfully.
 
Double-check your source bitmap image.  Is it a power of two?  Is it's width * bpp a multiple of 4?  These are all things that can cause problems.
 
Sprinkle some "assert (glGetError () == GL_NO_ERROR);" calls at key points in your code - after texture loading, shader compilation, vertex setup, etc.  Run in the debugger and ensure that everything works without internal GL errors.


 
As I told to Sponji, the path is not written that way in code. 
The same shader without the texture stuff, works fine. I've a specular code with everything that's the same, except for the texture stuff, so I'm pretty sure that my problem is there.
Oh and the .bmp is fine! 
I'll play around with assert now biggrin.png
 
 
Thank you both for your answer.
 
 
 
EDIT: 
 
It appears that I'm a was a little to drowsy when I was writing my initShader function, I was searching for the texture coord attribute into the wrong program (I have 3 of them, one of those without texture).
Thank you for your support and I'm sorry for the lost of time biggrin.png Edited by cifa

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement
  • Advertisement
  • Popular Now

  • Advertisement
  • Similar Content

    • By Balma Alparisi
      i got error 1282 in my code.
      sf::ContextSettings settings; settings.majorVersion = 4; settings.minorVersion = 5; settings.attributeFlags = settings.Core; sf::Window window; window.create(sf::VideoMode(1600, 900), "Texture Unit Rectangle", sf::Style::Close, settings); window.setActive(true); window.setVerticalSyncEnabled(true); glewInit(); GLuint shaderProgram = createShaderProgram("FX/Rectangle.vss", "FX/Rectangle.fss"); float vertex[] = { -0.5f,0.5f,0.0f, 0.0f,0.0f, -0.5f,-0.5f,0.0f, 0.0f,1.0f, 0.5f,0.5f,0.0f, 1.0f,0.0f, 0.5,-0.5f,0.0f, 1.0f,1.0f, }; GLuint indices[] = { 0,1,2, 1,2,3, }; GLuint vao; glGenVertexArrays(1, &vao); glBindVertexArray(vao); GLuint vbo; glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex), vertex, GL_STATIC_DRAW); GLuint ebo; glGenBuffers(1, &ebo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices,GL_STATIC_DRAW); glVertexAttribPointer(0, 3, GL_FLOAT, false, sizeof(float) * 5, (void*)0); glEnableVertexAttribArray(0); glVertexAttribPointer(1, 2, GL_FLOAT, false, sizeof(float) * 5, (void*)(sizeof(float) * 3)); glEnableVertexAttribArray(1); GLuint texture[2]; glGenTextures(2, texture); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, texture[0]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); sf::Image* imageOne = new sf::Image; bool isImageOneLoaded = imageOne->loadFromFile("Texture/container.jpg"); if (isImageOneLoaded) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageOne->getSize().x, imageOne->getSize().y, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageOne->getPixelsPtr()); glGenerateMipmap(GL_TEXTURE_2D); } delete imageOne; glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, texture[1]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); sf::Image* imageTwo = new sf::Image; bool isImageTwoLoaded = imageTwo->loadFromFile("Texture/awesomeface.png"); if (isImageTwoLoaded) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageTwo->getSize().x, imageTwo->getSize().y, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageTwo->getPixelsPtr()); glGenerateMipmap(GL_TEXTURE_2D); } delete imageTwo; glUniform1i(glGetUniformLocation(shaderProgram, "inTextureOne"), 0); glUniform1i(glGetUniformLocation(shaderProgram, "inTextureTwo"), 1); GLenum error = glGetError(); std::cout << error << std::endl; sf::Event event; bool isRunning = true; while (isRunning) { while (window.pollEvent(event)) { if (event.type == event.Closed) { isRunning = false; } } glClear(GL_COLOR_BUFFER_BIT); if (isImageOneLoaded && isImageTwoLoaded) { glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, texture[0]); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, texture[1]); glUseProgram(shaderProgram); } glBindVertexArray(vao); glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, nullptr); glBindVertexArray(0); window.display(); } glDeleteVertexArrays(1, &vao); glDeleteBuffers(1, &vbo); glDeleteBuffers(1, &ebo); glDeleteProgram(shaderProgram); glDeleteTextures(2,texture); return 0; } and this is the vertex shader
      #version 450 core layout(location=0) in vec3 inPos; layout(location=1) in vec2 inTexCoord; out vec2 TexCoord; void main() { gl_Position=vec4(inPos,1.0); TexCoord=inTexCoord; } and the fragment shader
      #version 450 core in vec2 TexCoord; uniform sampler2D inTextureOne; uniform sampler2D inTextureTwo; out vec4 FragmentColor; void main() { FragmentColor=mix(texture(inTextureOne,TexCoord),texture(inTextureTwo,TexCoord),0.2); } I was expecting awesomeface.png on top of container.jpg

    • By khawk
      We've just released all of the source code for the NeHe OpenGL lessons on our Github page at https://github.com/gamedev-net/nehe-opengl. code - 43 total platforms, configurations, and languages are included.
      Now operated by GameDev.net, NeHe is located at http://nehe.gamedev.net where it has been a valuable resource for developers wanting to learn OpenGL and graphics programming.

      View full story
    • By TheChubu
      The Khronos™ Group, an open consortium of leading hardware and software companies, announces from the SIGGRAPH 2017 Conference the immediate public availability of the OpenGL® 4.6 specification. OpenGL 4.6 integrates the functionality of numerous ARB and EXT extensions created by Khronos members AMD, Intel, and NVIDIA into core, including the capability to ingest SPIR-V™ shaders.
      SPIR-V is a Khronos-defined standard intermediate language for parallel compute and graphics, which enables content creators to simplify their shader authoring and management pipelines while providing significant source shading language flexibility. OpenGL 4.6 adds support for ingesting SPIR-V shaders to the core specification, guaranteeing that SPIR-V shaders will be widely supported by OpenGL implementations.
      OpenGL 4.6 adds the functionality of these ARB extensions to OpenGL’s core specification:
      GL_ARB_gl_spirv and GL_ARB_spirv_extensions to standardize SPIR-V support for OpenGL GL_ARB_indirect_parameters and GL_ARB_shader_draw_parameters for reducing the CPU overhead associated with rendering batches of geometry GL_ARB_pipeline_statistics_query and GL_ARB_transform_feedback_overflow_querystandardize OpenGL support for features available in Direct3D GL_ARB_texture_filter_anisotropic (based on GL_EXT_texture_filter_anisotropic) brings previously IP encumbered functionality into OpenGL to improve the visual quality of textured scenes GL_ARB_polygon_offset_clamp (based on GL_EXT_polygon_offset_clamp) suppresses a common visual artifact known as a “light leak” associated with rendering shadows GL_ARB_shader_atomic_counter_ops and GL_ARB_shader_group_vote add shader intrinsics supported by all desktop vendors to improve functionality and performance GL_KHR_no_error reduces driver overhead by allowing the application to indicate that it expects error-free operation so errors need not be generated In addition to the above features being added to OpenGL 4.6, the following are being released as extensions:
      GL_KHR_parallel_shader_compile allows applications to launch multiple shader compile threads to improve shader compile throughput WGL_ARB_create_context_no_error and GXL_ARB_create_context_no_error allow no error contexts to be created with WGL or GLX that support the GL_KHR_no_error extension “I’m proud to announce OpenGL 4.6 as the most feature-rich version of OpenGL yet. We've brought together the most popular, widely-supported extensions into a new core specification to give OpenGL developers and end users an improved baseline feature set. This includes resolving previous intellectual property roadblocks to bringing anisotropic texture filtering and polygon offset clamping into the core specification to enable widespread implementation and usage,” said Piers Daniell, chair of the OpenGL Working Group at Khronos. “The OpenGL working group will continue to respond to market needs and work with GPU vendors to ensure OpenGL remains a viable and evolving graphics API for all its customers and users across many vital industries.“
      The OpenGL 4.6 specification can be found at https://khronos.org/registry/OpenGL/index_gl.php. The GLSL to SPIR-V compiler glslang has been updated with GLSL 4.60 support, and can be found at https://github.com/KhronosGroup/glslang.
      Sophisticated graphics applications will also benefit from a set of newly released extensions for both OpenGL and OpenGL ES to enable interoperability with Vulkan and Direct3D. These extensions are named:
      GL_EXT_memory_object GL_EXT_memory_object_fd GL_EXT_memory_object_win32 GL_EXT_semaphore GL_EXT_semaphore_fd GL_EXT_semaphore_win32 GL_EXT_win32_keyed_mutex They can be found at: https://khronos.org/registry/OpenGL/index_gl.php
      Industry Support for OpenGL 4.6
      “With OpenGL 4.6 our customers have an improved set of core features available on our full range of OpenGL 4.x capable GPUs. These features provide improved rendering quality, performance and functionality. As the graphics industry’s most popular API, we fully support OpenGL and will continue to work closely with the Khronos Group on the development of new OpenGL specifications and extensions for our customers. NVIDIA has released beta OpenGL 4.6 drivers today at https://developer.nvidia.com/opengl-driver so developers can use these new features right away,” said Bob Pette, vice president, Professional Graphics at NVIDIA.
      "OpenGL 4.6 will be the first OpenGL release where conformant open source implementations based on the Mesa project will be deliverable in a reasonable timeframe after release. The open sourcing of the OpenGL conformance test suite and ongoing work between Khronos and X.org will also allow for non-vendor led open source implementations to achieve conformance in the near future," said David Airlie, senior principal engineer at Red Hat, and developer on Mesa/X.org projects.

      View full story
    • By _OskaR
      Hi,
      I have an OpenGL application but without possibility to wite own shaders.
      I need to perform small VS modification - is possible to do it in an alternative way? Do we have apps or driver modifictions which will catch the shader sent to GPU and override it?
    • By xhcao
      Does sync be needed to read texture content after access texture image in compute shader?
      My simple code is as below,
      glUseProgram(program.get());
      glBindImageTexture(0, texture[0], 0, GL_FALSE, 3, GL_READ_ONLY, GL_R32UI);
      glBindImageTexture(1, texture[1], 0, GL_FALSE, 4, GL_WRITE_ONLY, GL_R32UI);
      glDispatchCompute(1, 1, 1);
      // Does sync be needed here?
      glUseProgram(0);
      glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer);
      glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
                                     GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, texture[1], 0);
      glReadPixels(0, 0, kWidth, kHeight, GL_RED_INTEGER, GL_UNSIGNED_INT, outputValues);
       
      Compute shader is very simple, imageLoad content from texture[0], and imageStore content to texture[1]. Does need to sync after dispatchCompute?
  • Advertisement