• Advertisement
Sign in to follow this  

OpenGL GLSL normal mapping strange behaviour

This topic is 1396 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hi,

 

I was trying to add a normal map effect to a shader tutorial I have found here but it's strangely flickering O.O

The vertex shader code:

 

UPDATE 2:

http://www.gamedev.net/topic/654758-glsl-normal-mapping-strange-behaviour/?view=findpost&p=5141727

 

UPDATE 1:

http://www.gamedev.net/topic/654758-glsl-normal-mapping-strange-behaviour/?view=findpost&p=5141553

#version 330


in vec3 inPosition;
in vec3 vertNormal;
in vec2 vertTexCoord;
in vec4 vertNormalMapping;

out vec3 surfacePos;
out vec3 fragNormal;
out vec2 fragTexCoord;
out vec4 fragNormalMapping;

uniform mat4 modelViewProjectionMatrix;
uniform mat4 camera;

void main(){	
    fragTexCoord = vertTexCoord;
    fragNormal = vertNormal;
    surfacePos = vec3(modelViewProjectionMatrix * vec4(inPosition, 1));
    fragNormalMapping = vertNormalMapping;



    gl_Position  = camera * modelViewProjectionMatrix * vec4(inPosition, 1.0);

	
	
}

The fragment shader

#version 330

precision highp float;


uniform vec3 cameraPosition;
uniform mat4 modelViewProjectionMatrix;
uniform sampler2D tex;
uniform sampler2D normalMap; 



uniform float materialShininess;
uniform vec3 materialSpecularColor;

uniform struct Light {
   vec3 position;
   vec3 intensities; //a.k.a the color of the light
   float attenuation;
   float ambientCoefficient;
} light;


in vec3 fragNormal;
in vec3 surfacePos;
in vec2 fragTexCoord;
in vec4 fragNormalMapping;

out vec4 finalColor;

void main() {

	
	
	 vec3 normalMap;
	 
	if(fragNormalMapping[0] == 1.0f){	//has a normal map?			
		normalMap =  texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0;
		normalMap.g = 1.0-normalMap.g;

		normalMap = normalize(normalMap);
	}else{ //No normal map texture so just normal calculation
		normalMap = normalize(transpose(inverse(mat3(modelViewProjectionMatrix))) * (fragNormal));
	}
	


   
   
    vec4 surfaceColor = texture(tex, fragTexCoord);
    vec3 surfaceToLight = normalize(light.position - surfacePos);
    vec3 surfaceToCamera = normalize(cameraPosition - surfacePos);
    
    //ambient
    vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;

    //diffuse0.0
    float diffuseCoefficient =max( dot(normalMap, surfaceToLight),0.0);
    vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;
    
    //specular
    float specularCoefficient = 0.0;

    if(diffuseCoefficient > 0.0){		
		specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normalMap))), materialShininess);       
	}
    vec3 specular = specularCoefficient * materialSpecularColor * light.intensities;
    
    //attenuation
    float distanceToLight = length(light.position - surfacePos);
    float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));

    //linear color (color before gamma correction)	 
	vec3 linearColor = ambient + attenuation*(diffuse + specular);
   
    
    //final color (after gamma correction)
    vec3 gamma = vec3(1.0/2.2);

    finalColor = vec4(pow(linearColor, gamma), surfaceColor.a);
}

For better testing I'm using a plane with a one color texture and a bump texture

this is the result

ibahVfbbiKuHRO.jpg

The light is following the camera and I can see that the normal map is calculated correctly. But as you can see there is something wrong with the "shadows" and I really don't know what it is.

 

 

 

If I load objects without a bump map the result with the same shader is perfect.

ib01goUaO1QMeA.jpg

 

thank you everyone for the help =D

Edited by DaviDeMo

Share this post


Link to post
Share on other sites
Advertisement

Don't calculate surfacePos using the (wv-)projection matrix; transform only by the world-transform part.

Share this post


Link to post
Share on other sites

Don't calculate surfacePos using the (wv-)projection matrix; transform only by the world-transform part.

seems like nothing changed.. oh, yes, the normal map changed but the lines are still there :|

Later I will post the other code. Thank you

Share this post


Link to post
Share on other sites
if(fragNormalMapping[0] == 1.0f){	//has a normal map?			
		normalMap =  texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0;
		normalMap.g = 1.0-normalMap.g;

		normalMap = normalize(normalMap);
	}else{ //No normal map texture so just normal calculation
		normalMap = normalize(transpose(inverse(mat3(modelViewProjectionMatrix))) * (fragNormal));
	}

this seems suspicious to me. In case of no normal map, you tranform object space normal to projection space, but, in case of texture normal, you just sample from texture and do not transform it to projection space. You need to create tangent space matrix, and apply tangent*world*view*projection transformation to sampled normal from texture, unless you use object space normals in your normal map.

 

 

Share this post


Link to post
Share on other sites

Thanks everyone. I edited the code, here is the new version

 

 

 

Vertex shader

#version 330


in vec3 inPosition;
in vec3 vertNormal;
in vec2 vertTexCoord;
in vec4 vertNormalMapping;

out vec3 fragVert;
out vec3 fragNormal;
out vec2 fragTexCoord;
out vec4 fragNormalMapping;
out mat3 TBNMatrix;

uniform mat4 modelViewProjectionMatrix;
uniform mat4 camera;

void main(){	

	vec3 tangent; 
	vec3 binormal; 

	vec3 c1 = cross( vertNormal, vec3(0.0, 0.0, 1.0) ); 
	vec3 c2 = cross( vertNormal, vec3(0.0, 1.0, 0.0) ); 

	if( length(c1)>length(c2) )
	{
		tangent = c1;	
	}
	else
	{
		tangent = c2;	
	}

	tangent = normalize(tangent);

	binormal = cross(vertNormal, tangent); 
	binormal = normalize(binormal);

	mat3 normalMatrix = transpose(inverse(mat3(camera * modelViewProjectionMatrix )));
	vec3 n = normalize(normalMatrix * vertNormal);
	vec3 t = normalize(normalMatrix * tangent.xyz);
	vec3 b = normalize(normalMatrix * binormal.xyz);
	TBNMatrix = mat3(t, b, n);

	

	fragTexCoord = vertTexCoord;
    fragNormal = vertNormal;
	fragVert = inPosition;
	fragNormalMapping = vertNormalMapping;


	gl_Position  = camera * modelViewProjectionMatrix * vec4(inPosition, 1.0);

	
	
}

Fragment shader

#version 330

precision highp float;


uniform vec3 cameraPosition;
uniform mat4 modelViewProjectionMatrix;
uniform mat4 camera;
uniform sampler2D tex;
uniform sampler2D normalMap; 



uniform float materialShininess;
uniform vec3 materialSpecularColor;

uniform struct Light {
   vec3 position;
   vec3 intensities; //a.k.a the color of the light
   float attenuation;
   float ambientCoefficient;
} light;


in vec3 fragNormal;
in vec3 fragVert;
in vec2 fragTexCoord;
in vec4 fragNormalMapping;
in mat3 TBNMatrix;

out vec4 finalColor;

void main() {

	vec3 surfacePos = vec3(modelViewProjectionMatrix * vec4(fragVert, 1));
    vec4 surfaceColor = texture(tex, fragTexCoord);
    vec3 surfaceToLight = TBNMatrix * (light.position - surfacePos) ;
    vec3 surfaceToCamera = TBNMatrix * (cameraPosition - surfacePos);



	vec3 normal = normalize(texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0);
	

	//ambient
	vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;
	
	//diffuse
    float diffuseCoefficient = max(0.0, dot(normal, surfaceToLight));
    vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;

	//specular
    float specularCoefficient = 0.0;
    if(diffuseCoefficient > 0.0)
        specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), materialShininess);
    vec3 specular = specularCoefficient * materialSpecularColor * light.intensities;
    
    //attenuation
    float distanceToLight = length(light.position - surfacePos);
    float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));

    //linear color (color before gamma correction)
    vec3 linearColor = ambient + attenuation*(diffuse + specular);
    
    //final color (after gamma correction)
    vec3 gamma = vec3(1.0/2.2);
    finalColor = vec4(pow(linearColor, gamma), surfaceColor.a);
	
}

There is no more flickering and I see something that someone can say "mhm... it seems like this should be a normal map" but the light is... seems random O.O

When I rotate the light rotate with me in another direction creating some very strange darker sides somewhere O.O

 

You see this line? yeah. Maybe tomorrow I see where I'm wrong

i5J3tdtjFF4gU.jpg

Edited by DaviDeMo

Share this post


Link to post
Share on other sites
if(fragNormalMapping[0] == 1.0f){
        normal = normalize(texture(heightMap, fragTexCoord).xyz * 2.0 - 1.0);
    }else{
        normal = normalize(texture(heightMap, fragTexCoord).xyz * 2.0 - 1.0);
    }

Errr I think you have a typo  

 

You are sampling the height map in both cases, shouldn't one be the normal map?

Share this post


Link to post
Share on other sites
if(fragNormalMapping[0] == 1.0f){
        normal = normalize(texture(heightMap, fragTexCoord).xyz * 2.0 - 1.0);
    }else{
        normal = normalize(texture(heightMap, fragTexCoord).xyz * 2.0 - 1.0);
    }

Errr I think you have a typo  

 

You are sampling the height map in both cases, shouldn't one be the normal map?

 

woops sorry, I edited. I did this for compile the shader without changing the c++ code xD I had to use somewhere fragNormalMapping variable for compile it =D

Share this post


Link to post
Share on other sites

Update of the code
 
I have added a correct TBN matrix... or I hope.
 
 
vert shader

#version 330


in vec3 inPosition;
in vec3 vertNormal;
in vec2 vertTexCoord;
in vec4 vertNormalMapping;

out vec3 fragVert;
out vec3 fragNormal;
out vec2 fragTexCoord;
out vec4 fragNormalMapping;
out mat3 TBNMatrix;
out mat3 normalMatrix;

uniform mat4 transform;
uniform mat4 camera;
uniform mat4 proj;

void main(){	

	vec3 tangent; 
	
	
	vec3 c1 = cross( vertNormal, vec3(0.0, 0.0, 1.0) ); 
	vec3 c2 = cross( vertNormal, vec3(0.0, 1.0, 0.0) ); 
	vec3 b;
	if( length(c1)>length(c2) )
	{
		tangent = c1;	
	}
	else
	{
		tangent = c2;	
	}

	tangent = normalize(tangent);
	
	

	mat4 mv = transform  * camera;//modelview
	mat4 mvp = proj * camera * transform;//modelviewprojection
	mat4 mvi = transpose(inverse(mv));//modelview inverse (=gl_NormalMatrix)

	mat3 normalMatrix = mat3(mvi);
	vec3 n = normalize( ( normalMatrix *  vertNormal )  );
	vec3 t = normalize( ( normalMatrix *  tangent.xyz ) );
	b = cross(vertNormal, tangent); 
	b = normalize(normalMatrix * b);

	TBNMatrix = mat3(t, b, n);
	TBNMatrix = transpose(TBNMatrix);
	

	fragTexCoord = vertTexCoord;
    fragNormal = vertNormal;
	fragVert = inPosition;
	fragNormalMapping = vertNormalMapping;



	gl_Position  = mvp * vec4(inPosition, 1.0);

	
	
}

 
fragment shader
 

#version 330

precision highp float;


uniform vec3 cameraPosition;
uniform mat4 transform;
uniform mat4 camera;
uniform sampler2D tex;
uniform sampler2D normalMap; 



uniform float materialShininess;
uniform vec3 materialSpecularColor;

uniform struct Light {
   vec3 position;
   vec3 intensities; //a.k.a the color of the light
   float attenuation;
   float ambientCoefficient;
} light;


in vec3 fragNormal;
in vec3 fragVert;
in vec2 fragTexCoord;
in vec4 fragNormalMapping;
in mat3 TBNMatrix;
in mat3 normalMatrix;

out vec4 finalColor;

void main() {

	vec3 surfacePos = vec3(transform  * vec4(fragVert,1.0));
    vec4 surfaceColor = texture(tex, fragTexCoord);
    vec3 surfaceToLight = normalize( light.position - surfacePos)  ;
    vec3 surfaceToCamera = normalize( cameraPosition - surfacePos) ;



	vec3 normal;
	if(fragNormalMapping[0] == 1.0f){
		normal = normalize(texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0);
	}else{
		normal = normalize(texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0);
	}

	//ambient
	vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;
	
	//diffuse
    float diffuseCoefficient = max(0.0, dot(normal, surfaceToLight));
    vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;

	//specular
    float specularCoefficient = 0.0;
    if(diffuseCoefficient > 0.0)
        specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), materialShininess);
    vec3 specular = specularCoefficient * materialSpecularColor * light.intensities;
    
    //attenuation
    float distanceToLight = length(light.position - surfacePos);
    float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));

    //linear color (color before gamma correction)
    vec3 linearColor = ambient + attenuation*(diffuse + specular);
    
    //final color (after gamma correction)
    vec3 gamma = vec3(1.0/2.2);
    finalColor = vec4(pow(linearColor, gamma), surfaceColor.a);
	
} 

The result is still wrong :-(

 

ior1DN7SXURAI.jpg

 

mhm... O.O

Share this post


Link to post
Share on other sites

First thing is first: you're not even using the TBN! :D You need to take the TBN and multiply it to your normal map, otherwise what's the point? tongue.png

 

A side note: I don't trust your TBN calculations as they don't actually use the triangle data, but some arbitrary directions. This isn't a trustworthy approach and even if you use it, I don't expect proper results from it.

 

Instead, try this in your fragment shader (pulled from my engine, you're welcome happy.png):

mat3 ComputeCotangentFrame( vec3 vNormal, vec3 vPosition, vec2 vTexCoord )
{
	vec3 ddxPos = dFdx(vPosition);
	vec3 ddyPos = dFdy(vPosition);
	vec2 ddxUV = dFdx(vTexCoord);
	vec2 ddyUV = dFdy(vTexCoord);

	vec3 vCrossVec1 = cross( ddyPos, vNormal );
	vec3 vCrossVec2 = cross( vNormal, ddxPos );
	vec3 vTangent = vCrossVec1 * ddxUV.x + vCrossVec2 * ddyUV.x;
	vec3 vBinormal = vCrossVec1 * ddxUV.y + vCrossVec2 * ddyUV.y;

	float fDotT = dot( vTangent, vTangent );
	float fDotB = dot( vBinormal, vBinormal );
	float fInvMax = 1.0 / sqrt(max(fDotT, fDotB));

	return mat3( vTangent * fInvMax, vBinormal * fInvMax, vNormal );
}

Inputs are vertex normal, world position and texture coordinates.

 

 

Next, why do you apply a transform matrix to the vertex position in the fragment shader? This is most definitely better done in the vertex shader and passed in.

 

And finally, what's the point of this:

vec3 normal;
	if(fragNormalMapping[0] == 1.0f){
		normal = normalize(texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0);
	}else{
		normal = normalize(texture(normalMap, fragTexCoord).xyz * 2.0 - 1.0);
	}

You're doing the same thing twice regardless of the branch, so why bother?

 

 

Once you get everything working, you can look into replacing the Cotangent stuff I supplied with standard tangent/binormal calculations on the CPU and pass them in as vertex attributes. Unless you don't mind the performance overhead of computing it all on the fly per-pixel (you'll gain some memory savings by not storing them ;D), or you're doing all shading/materials on the GPU in a procedural deferred pass like I am... which I highly doubt anyone is doing. tongue.png

Edited by Styves

Share this post


Link to post
Share on other sites

Whoa, thank you.

Sorry for the "fragNormalMapping", it is initialized and I have to use it somewhere for build the shader D= I need to change the c++ code, I know :-( I will do it when everything is working =D

 

Your code is very usefull, now I can make some testing. I have written the code (with the help of google and this guy www.terathon.com/code/tangent.html ) for make the calculatio in CPU, but for now let see if it's working with your code.

 

I have only a question that maybe... maybe it's a little stupid. The sampler2d normalMap variable is just a RGB as it is from the normal map texture. Do I have to normalize it to -1 and +1 via CPU or the code I have should work?

Share this post


Link to post
Share on other sites

:D

 

Leave your normal map as is. Scaling it to -1,1 on CPU would require storage that supports signed data (so more than your standard RGB texture).

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement
  • Advertisement
  • Popular Now

  • Advertisement
  • Similar Content

    • By Balma Alparisi
      i got error 1282 in my code.
      sf::ContextSettings settings; settings.majorVersion = 4; settings.minorVersion = 5; settings.attributeFlags = settings.Core; sf::Window window; window.create(sf::VideoMode(1600, 900), "Texture Unit Rectangle", sf::Style::Close, settings); window.setActive(true); window.setVerticalSyncEnabled(true); glewInit(); GLuint shaderProgram = createShaderProgram("FX/Rectangle.vss", "FX/Rectangle.fss"); float vertex[] = { -0.5f,0.5f,0.0f, 0.0f,0.0f, -0.5f,-0.5f,0.0f, 0.0f,1.0f, 0.5f,0.5f,0.0f, 1.0f,0.0f, 0.5,-0.5f,0.0f, 1.0f,1.0f, }; GLuint indices[] = { 0,1,2, 1,2,3, }; GLuint vao; glGenVertexArrays(1, &vao); glBindVertexArray(vao); GLuint vbo; glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex), vertex, GL_STATIC_DRAW); GLuint ebo; glGenBuffers(1, &ebo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices,GL_STATIC_DRAW); glVertexAttribPointer(0, 3, GL_FLOAT, false, sizeof(float) * 5, (void*)0); glEnableVertexAttribArray(0); glVertexAttribPointer(1, 2, GL_FLOAT, false, sizeof(float) * 5, (void*)(sizeof(float) * 3)); glEnableVertexAttribArray(1); GLuint texture[2]; glGenTextures(2, texture); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, texture[0]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); sf::Image* imageOne = new sf::Image; bool isImageOneLoaded = imageOne->loadFromFile("Texture/container.jpg"); if (isImageOneLoaded) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageOne->getSize().x, imageOne->getSize().y, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageOne->getPixelsPtr()); glGenerateMipmap(GL_TEXTURE_2D); } delete imageOne; glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, texture[1]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); sf::Image* imageTwo = new sf::Image; bool isImageTwoLoaded = imageTwo->loadFromFile("Texture/awesomeface.png"); if (isImageTwoLoaded) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageTwo->getSize().x, imageTwo->getSize().y, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageTwo->getPixelsPtr()); glGenerateMipmap(GL_TEXTURE_2D); } delete imageTwo; glUniform1i(glGetUniformLocation(shaderProgram, "inTextureOne"), 0); glUniform1i(glGetUniformLocation(shaderProgram, "inTextureTwo"), 1); GLenum error = glGetError(); std::cout << error << std::endl; sf::Event event; bool isRunning = true; while (isRunning) { while (window.pollEvent(event)) { if (event.type == event.Closed) { isRunning = false; } } glClear(GL_COLOR_BUFFER_BIT); if (isImageOneLoaded && isImageTwoLoaded) { glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, texture[0]); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, texture[1]); glUseProgram(shaderProgram); } glBindVertexArray(vao); glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, nullptr); glBindVertexArray(0); window.display(); } glDeleteVertexArrays(1, &vao); glDeleteBuffers(1, &vbo); glDeleteBuffers(1, &ebo); glDeleteProgram(shaderProgram); glDeleteTextures(2,texture); return 0; } and this is the vertex shader
      #version 450 core layout(location=0) in vec3 inPos; layout(location=1) in vec2 inTexCoord; out vec2 TexCoord; void main() { gl_Position=vec4(inPos,1.0); TexCoord=inTexCoord; } and the fragment shader
      #version 450 core in vec2 TexCoord; uniform sampler2D inTextureOne; uniform sampler2D inTextureTwo; out vec4 FragmentColor; void main() { FragmentColor=mix(texture(inTextureOne,TexCoord),texture(inTextureTwo,TexCoord),0.2); } I was expecting awesomeface.png on top of container.jpg

    • By khawk
      We've just released all of the source code for the NeHe OpenGL lessons on our Github page at https://github.com/gamedev-net/nehe-opengl. code - 43 total platforms, configurations, and languages are included.
      Now operated by GameDev.net, NeHe is located at http://nehe.gamedev.net where it has been a valuable resource for developers wanting to learn OpenGL and graphics programming.

      View full story
    • By TheChubu
      The Khronos™ Group, an open consortium of leading hardware and software companies, announces from the SIGGRAPH 2017 Conference the immediate public availability of the OpenGL® 4.6 specification. OpenGL 4.6 integrates the functionality of numerous ARB and EXT extensions created by Khronos members AMD, Intel, and NVIDIA into core, including the capability to ingest SPIR-V™ shaders.
      SPIR-V is a Khronos-defined standard intermediate language for parallel compute and graphics, which enables content creators to simplify their shader authoring and management pipelines while providing significant source shading language flexibility. OpenGL 4.6 adds support for ingesting SPIR-V shaders to the core specification, guaranteeing that SPIR-V shaders will be widely supported by OpenGL implementations.
      OpenGL 4.6 adds the functionality of these ARB extensions to OpenGL’s core specification:
      GL_ARB_gl_spirv and GL_ARB_spirv_extensions to standardize SPIR-V support for OpenGL GL_ARB_indirect_parameters and GL_ARB_shader_draw_parameters for reducing the CPU overhead associated with rendering batches of geometry GL_ARB_pipeline_statistics_query and GL_ARB_transform_feedback_overflow_querystandardize OpenGL support for features available in Direct3D GL_ARB_texture_filter_anisotropic (based on GL_EXT_texture_filter_anisotropic) brings previously IP encumbered functionality into OpenGL to improve the visual quality of textured scenes GL_ARB_polygon_offset_clamp (based on GL_EXT_polygon_offset_clamp) suppresses a common visual artifact known as a “light leak” associated with rendering shadows GL_ARB_shader_atomic_counter_ops and GL_ARB_shader_group_vote add shader intrinsics supported by all desktop vendors to improve functionality and performance GL_KHR_no_error reduces driver overhead by allowing the application to indicate that it expects error-free operation so errors need not be generated In addition to the above features being added to OpenGL 4.6, the following are being released as extensions:
      GL_KHR_parallel_shader_compile allows applications to launch multiple shader compile threads to improve shader compile throughput WGL_ARB_create_context_no_error and GXL_ARB_create_context_no_error allow no error contexts to be created with WGL or GLX that support the GL_KHR_no_error extension “I’m proud to announce OpenGL 4.6 as the most feature-rich version of OpenGL yet. We've brought together the most popular, widely-supported extensions into a new core specification to give OpenGL developers and end users an improved baseline feature set. This includes resolving previous intellectual property roadblocks to bringing anisotropic texture filtering and polygon offset clamping into the core specification to enable widespread implementation and usage,” said Piers Daniell, chair of the OpenGL Working Group at Khronos. “The OpenGL working group will continue to respond to market needs and work with GPU vendors to ensure OpenGL remains a viable and evolving graphics API for all its customers and users across many vital industries.“
      The OpenGL 4.6 specification can be found at https://khronos.org/registry/OpenGL/index_gl.php. The GLSL to SPIR-V compiler glslang has been updated with GLSL 4.60 support, and can be found at https://github.com/KhronosGroup/glslang.
      Sophisticated graphics applications will also benefit from a set of newly released extensions for both OpenGL and OpenGL ES to enable interoperability with Vulkan and Direct3D. These extensions are named:
      GL_EXT_memory_object GL_EXT_memory_object_fd GL_EXT_memory_object_win32 GL_EXT_semaphore GL_EXT_semaphore_fd GL_EXT_semaphore_win32 GL_EXT_win32_keyed_mutex They can be found at: https://khronos.org/registry/OpenGL/index_gl.php
      Industry Support for OpenGL 4.6
      “With OpenGL 4.6 our customers have an improved set of core features available on our full range of OpenGL 4.x capable GPUs. These features provide improved rendering quality, performance and functionality. As the graphics industry’s most popular API, we fully support OpenGL and will continue to work closely with the Khronos Group on the development of new OpenGL specifications and extensions for our customers. NVIDIA has released beta OpenGL 4.6 drivers today at https://developer.nvidia.com/opengl-driver so developers can use these new features right away,” said Bob Pette, vice president, Professional Graphics at NVIDIA.
      "OpenGL 4.6 will be the first OpenGL release where conformant open source implementations based on the Mesa project will be deliverable in a reasonable timeframe after release. The open sourcing of the OpenGL conformance test suite and ongoing work between Khronos and X.org will also allow for non-vendor led open source implementations to achieve conformance in the near future," said David Airlie, senior principal engineer at Red Hat, and developer on Mesa/X.org projects.

      View full story
    • By _OskaR
      Hi,
      I have an OpenGL application but without possibility to wite own shaders.
      I need to perform small VS modification - is possible to do it in an alternative way? Do we have apps or driver modifictions which will catch the shader sent to GPU and override it?
    • By xhcao
      Does sync be needed to read texture content after access texture image in compute shader?
      My simple code is as below,
      glUseProgram(program.get());
      glBindImageTexture(0, texture[0], 0, GL_FALSE, 3, GL_READ_ONLY, GL_R32UI);
      glBindImageTexture(1, texture[1], 0, GL_FALSE, 4, GL_WRITE_ONLY, GL_R32UI);
      glDispatchCompute(1, 1, 1);
      // Does sync be needed here?
      glUseProgram(0);
      glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer);
      glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
                                     GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, texture[1], 0);
      glReadPixels(0, 0, kWidth, kHeight, GL_RED_INTEGER, GL_UNSIGNED_INT, outputValues);
       
      Compute shader is very simple, imageLoad content from texture[0], and imageStore content to texture[1]. Does need to sync after dispatchCompute?
  • Advertisement