• Advertisement
  • Popular Tags

  • Popular Now

  • Advertisement
  • Similar Content

    • By fleissi
      Hey guys!

      I'm new here and I recently started developing my own rendering engine. It's open source, based on OpenGL/DirectX and C++.
      The full source code is hosted on github:
      https://github.com/fleissna/flyEngine

      I would appreciate if people with experience in game development / engine desgin could take a look at my source code. I'm looking for honest, constructive criticism on how to improve the engine.
      I'm currently writing my master's thesis in computer science and in the recent year I've gone through all the basics about graphics programming, learned DirectX and OpenGL, read some articles on Nvidia GPU Gems, read books and integrated some of this stuff step by step into the engine.

      I know about the basics, but I feel like there is some missing link that I didn't get yet to merge all those little pieces together.

      Features I have so far:
      - Dynamic shader generation based on material properties
      - Dynamic sorting of meshes to be renderd based on shader and material
      - Rendering large amounts of static meshes
      - Hierarchical culling (detail + view frustum)
      - Limited support for dynamic (i.e. moving) meshes
      - Normal, Parallax and Relief Mapping implementations
      - Wind animations based on vertex displacement
      - A very basic integration of the Bullet physics engine
      - Procedural Grass generation
      - Some post processing effects (Depth of Field, Light Volumes, Screen Space Reflections, God Rays)
      - Caching mechanisms for textures, shaders, materials and meshes

      Features I would like to have:
      - Global illumination methods
      - Scalable physics
      - Occlusion culling
      - A nice procedural terrain generator
      - Scripting
      - Level Editing
      - Sound system
      - Optimization techniques

      Books I have so far:
      - Real-Time Rendering Third Edition
      - 3D Game Programming with DirectX 11
      - Vulkan Cookbook (not started yet)

      I hope you guys can take a look at my source code and if you're really motivated, feel free to contribute :-)
      There are some videos on youtube that demonstrate some of the features:
      Procedural grass on the GPU
      Procedural Terrain Engine
      Quadtree detail and view frustum culling

      The long term goal is to turn this into a commercial game engine. I'm aware that this is a very ambitious goal, but I'm sure it's possible if you work hard for it.

      Bye,

      Phil
    • By tj8146
      I have attached my project in a .zip file if you wish to run it for yourself.
      I am making a simple 2d top-down game and I am trying to run my code to see if my window creation is working and to see if my timer is also working with it. Every time I run it though I get errors. And when I fix those errors, more come, then the same errors keep appearing. I end up just going round in circles.  Is there anyone who could help with this? 
       
      Errors when I build my code:
      1>Renderer.cpp 1>c:\users\documents\opengl\game\game\renderer.h(15): error C2039: 'string': is not a member of 'std' 1>c:\program files (x86)\windows kits\10\include\10.0.16299.0\ucrt\stddef.h(18): note: see declaration of 'std' 1>c:\users\documents\opengl\game\game\renderer.h(15): error C2061: syntax error: identifier 'string' 1>c:\users\documents\opengl\game\game\renderer.cpp(28): error C2511: 'bool Game::Rendering::initialize(int,int,bool,std::string)': overloaded member function not found in 'Game::Rendering' 1>c:\users\documents\opengl\game\game\renderer.h(9): note: see declaration of 'Game::Rendering' 1>c:\users\documents\opengl\game\game\renderer.cpp(35): error C2597: illegal reference to non-static member 'Game::Rendering::window' 1>c:\users\documents\opengl\game\game\renderer.cpp(36): error C2597: illegal reference to non-static member 'Game::Rendering::window' 1>c:\users\documents\opengl\game\game\renderer.cpp(43): error C2597: illegal reference to non-static member 'Game::Rendering::window' 1>Done building project "Game.vcxproj" -- FAILED. ========== Build: 0 succeeded, 1 failed, 0 up-to-date, 0 skipped ==========  
       
      Renderer.cpp
      #include <GL/glew.h> #include <GLFW/glfw3.h> #include "Renderer.h" #include "Timer.h" #include <iostream> namespace Game { GLFWwindow* window; /* Initialize the library */ Rendering::Rendering() { mClock = new Clock; } Rendering::~Rendering() { shutdown(); } bool Rendering::initialize(uint width, uint height, bool fullscreen, std::string window_title) { if (!glfwInit()) { return -1; } /* Create a windowed mode window and its OpenGL context */ window = glfwCreateWindow(640, 480, "Hello World", NULL, NULL); if (!window) { glfwTerminate(); return -1; } /* Make the window's context current */ glfwMakeContextCurrent(window); glViewport(0, 0, (GLsizei)width, (GLsizei)height); glOrtho(0, (GLsizei)width, (GLsizei)height, 0, 1, -1); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glfwSwapInterval(1); glEnable(GL_SMOOTH); glEnable(GL_DEPTH_TEST); glEnable(GL_BLEND); glDepthFunc(GL_LEQUAL); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glEnable(GL_TEXTURE_2D); glLoadIdentity(); return true; } bool Rendering::render() { /* Loop until the user closes the window */ if (!glfwWindowShouldClose(window)) return false; /* Render here */ mClock->reset(); glfwPollEvents(); if (mClock->step()) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glfwSwapBuffers(window); mClock->update(); } return true; } void Rendering::shutdown() { glfwDestroyWindow(window); glfwTerminate(); } GLFWwindow* Rendering::getCurrentWindow() { return window; } } Renderer.h
      #pragma once namespace Game { class Clock; class Rendering { public: Rendering(); ~Rendering(); bool initialize(uint width, uint height, bool fullscreen, std::string window_title = "Rendering window"); void shutdown(); bool render(); GLFWwindow* getCurrentWindow(); private: GLFWwindow * window; Clock* mClock; }; } Timer.cpp
      #include <GL/glew.h> #include <GLFW/glfw3.h> #include <time.h> #include "Timer.h" namespace Game { Clock::Clock() : mTicksPerSecond(50), mSkipTics(1000 / mTicksPerSecond), mMaxFrameSkip(10), mLoops(0) { mLastTick = tick(); } Clock::~Clock() { } bool Clock::step() { if (tick() > mLastTick && mLoops < mMaxFrameSkip) return true; return false; } void Clock::reset() { mLoops = 0; } void Clock::update() { mLastTick += mSkipTics; mLoops++; } clock_t Clock::tick() { return clock(); } } TImer.h
      #pragma once #include "Common.h" namespace Game { class Clock { public: Clock(); ~Clock(); void update(); bool step(); void reset(); clock_t tick(); private: uint mTicksPerSecond; ufloat mSkipTics; uint mMaxFrameSkip; uint mLoops; uint mLastTick; }; } Common.h
      #pragma once #include <cstdio> #include <cstdlib> #include <ctime> #include <cstring> #include <cmath> #include <iostream> namespace Game { typedef unsigned char uchar; typedef unsigned short ushort; typedef unsigned int uint; typedef unsigned long ulong; typedef float ufloat; }  
      Game.zip
    • By lxjk
      Hi guys,
      There are many ways to do light culling in tile-based shading. I've been playing with this idea for a while, and just want to throw it out there.
      Because tile frustums are general small compared to light radius, I tried using cone test to reduce false positives introduced by commonly used sphere-frustum test.
      On top of that, I use distance to camera rather than depth for near/far test (aka. sliced by spheres).
      This method can be naturally extended to clustered light culling as well.
      The following image shows the general ideas

       
      Performance-wise I get around 15% improvement over sphere-frustum test. You can also see how a single light performs as the following: from left to right (1) standard rendering of a point light; then tiles passed the test of (2) sphere-frustum test; (3) cone test; (4) spherical-sliced cone test
       

       
      I put the details in my blog post (https://lxjk.github.io/2018/03/25/Improve-Tile-based-Light-Culling-with-Spherical-sliced-Cone.html), GLSL source code included!
       
      Eric
    • By Fadey Duh
      Good evening everyone!

      I was wondering if there is something equivalent of  GL_NV_blend_equation_advanced for AMD?
      Basically I'm trying to find more compatible version of it.

      Thank you!
    • By Jens Eckervogt
      Hello guys, 
       
      Please tell me! 
      How do I know? Why does wavefront not show for me?
      I already checked I have non errors yet.
      using OpenTK; using System.Collections.Generic; using System.IO; using System.Text; namespace Tutorial_08.net.sourceskyboxer { public class WaveFrontLoader { private static List<Vector3> inPositions; private static List<Vector2> inTexcoords; private static List<Vector3> inNormals; private static List<float> positions; private static List<float> texcoords; private static List<int> indices; public static RawModel LoadObjModel(string filename, Loader loader) { inPositions = new List<Vector3>(); inTexcoords = new List<Vector2>(); inNormals = new List<Vector3>(); positions = new List<float>(); texcoords = new List<float>(); indices = new List<int>(); int nextIdx = 0; using (var reader = new StreamReader(File.Open("Contents/" + filename + ".obj", FileMode.Open), Encoding.UTF8)) { string line = reader.ReadLine(); int i = reader.Read(); while (true) { string[] currentLine = line.Split(); if (currentLine[0] == "v") { Vector3 pos = new Vector3(float.Parse(currentLine[1]), float.Parse(currentLine[2]), float.Parse(currentLine[3])); inPositions.Add(pos); if (currentLine[1] == "t") { Vector2 tex = new Vector2(float.Parse(currentLine[1]), float.Parse(currentLine[2])); inTexcoords.Add(tex); } if (currentLine[1] == "n") { Vector3 nom = new Vector3(float.Parse(currentLine[1]), float.Parse(currentLine[2]), float.Parse(currentLine[3])); inNormals.Add(nom); } } if (currentLine[0] == "f") { Vector3 pos = inPositions[0]; positions.Add(pos.X); positions.Add(pos.Y); positions.Add(pos.Z); Vector2 tc = inTexcoords[0]; texcoords.Add(tc.X); texcoords.Add(tc.Y); indices.Add(nextIdx); ++nextIdx; } reader.Close(); return loader.loadToVAO(positions.ToArray(), texcoords.ToArray(), indices.ToArray()); } } } } } And It have tried other method but it can't show for me.  I am mad now. Because any OpenTK developers won't help me.
      Please help me how do I fix.

      And my download (mega.nz) should it is original but I tried no success...
      - Add blend source and png file here I have tried tried,.....  
       
      PS: Why is our community not active? I wait very longer. Stop to lie me!
      Thanks !
  • Advertisement
  • Advertisement
Sign in to follow this  

OpenGL Screen space reflections: Issues

This topic is 1891 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hey there gamedev people,

 

this is my firt post on the forums, even if I'm reading and searching here for some years now.

I'm studying computer visualistics in Germany and do a lot of OpenGL programming at the moment.

 

I'm currently trying to implement screen space reflections, but got some issues.

I first gather all informations in a G-Buffer (first pass rendering to frame buffer object) and do the lighting in a second pass.

Then I have a third pass for calculating the reflections.

 

Unfortunatelly, my reflections seem to be dependent of a certain viewing direction. They start to appear or disappear depending on the incident angle.

After implementing the approach several times and trying to understand the geometry on paper, this seems to be one of my last issues, until the reflections looks correct. I would be very happy if you'd take a lot at my code and hopefully someone can tell me my mistakes, I'd really appreciate that smile.png

 

Here's a short video to better show the problem:

">

 

And here's my shader code:

 

/******************************************************************************/
// LATEST (working, with some perspective errors)
/* SSR (screen space reflections)
 ******************************************************************************/
vec4 SSR()
{
   // Variables
   vec4 fragmentColor = vec4(0.0f, 0.0f, 0.0f, 0.0f);
   float initalStep = 0.001f;
   float stepSize = 0.01f;
   float blurSize = 1.0f;

   // Current fragment
   vec2 fragment = gl_FragCoord.xy/vec2(Screen.Width, Screen.Height);
   vec2 ssfragment = 0.5f * fragment + 0.5f;
   vec3 ssPosition = vec3(ssfragment, 0.0f);
   ssPosition.z = linearizeDepth( texture(deferredDepthTex, vert_UV) ); 

   // Normal & position
   vec3 vsNormal = normalize(texture(deferredNormalTex, fragment).xyz);
   vec3 vsPosition = texture(deferredPositionTex, fragment).xyz;
	
   // View vector
   vec3 vsViewVec = normalize( -ssPosition );
   vsViewVec.y -= Camera.Position.y;

   // Reflection vector
   vec3 vsReflectVec = reflect(vsViewVec, vsNormal);
   vsReflectVec = normalize(vsReflectVec);

   // Initialze traced ray
   vec3 initialRay = vsReflectVec * initalStep; 

   vec3 tracedRay = initialRay;
   // Get depth informations
   float fragmentDepth = linearizeDepth(texture(deferredDepthTex, fragment)); 
   vec3 samplingPosition = ssPosition + tracedRay;
   float sampledDepth = linearizeDepth(texture(deferredDepthTex, samplingPosition.xy));
   float rayDepth =  ssPosition.z + tracedRay.z * fragmentDepth;

   // Ray tracing while in screen space
   int count = 0;
   while(
	samplingPosition.x > 0.0f && samplingPosition.x < 1.0f &&
        samplingPosition.y > 0.0f && samplingPosition.y < 1.0f)
	{
		// Update sampling position and depth values
		samplingPosition.x = (2.0f * ssPosition.x - 1.0f) + tracedRay.x;
		samplingPosition.y = (2.0f * ssPosition.y - 1.0f) + tracedRay.y;
		sampledDepth = linearizeDepth( texture(deferredDepthTex, samplingPosition.xy) );
		rayDepth = ssPosition.z + tracedRay.z * fragmentDepth;
		
		// intersection found
		if(rayDepth > sampledDepth)
		{
			if(abs(rayDepth - sampledDepth) < 0.005f)
			{
				fragmentColor = vec4(
                                    texture(deferredDiffuseTex, samplingPosition.xy).rgb, 1.0f );
				break;
			}
			// Ray tracing termination
			break;
		}
		else
		    fragmentColor = vec4(texture(deferredDiffuseTex, fragment).rgb, 1.0f);

		tracedRay += tracedRay * stepSize;
		count++;
	}

	// Return color from sampled fragment
	return fragmentColor;
}

 

 

Share this post


Link to post
Share on other sites
Advertisement

You're definitely doing something wrong.

 

Basically your reflection rays are terribly wrong because reflections shouldn't transform when you rotate viewport (but they do). I'd say your screen space ray origins are wrong, shouldn't initial ray be viewVector * depth (although I'm not entirely sure, I'd probably have to test & go through your code at some better time than 4am)?

Share this post


Link to post
Share on other sites

I recently changed my code. I now calculate the reflected ray in the fragment shader of the g-Buffer using:

 

vec4 ReflectVec = normalize( reflect(vert_EyePosition, Normal) );

 

Where vert_EyePosition is the negative vertex position, which is multiplied with the model-view-matrix before in the vertex shader and

Normal is the interpolated normal from the vertex shader.

My resulting image of the reflected vector looks like this:

 

bsc22-reflectvec.jpg

 

But acctually my tracing doesn't work now, so obviously my reflection vector wasn't correct before. Now I just have to get the tracing right...

 

 

 

Share this post


Link to post
Share on other sites

Okay, I got it working smile.png , had' some space conversion problems. And adding the camera's near-plane value to the view vector helped very much:

 

 

bsc24.jpg

Edited by gs2912

Share this post


Link to post
Share on other sites

Okay, I got it working smile.png , had' some space conversion problems. And adding the camera's near-plane value to the view vector helped very much:
 
 
bsc24.jpg

Could you post the final working code as reference?

Share this post


Link to post
Share on other sites

Sorry for the late response, but here your are.

I split the algorithm into two functions and remove some debugging stuff, I had there (e.g. a counter in the raytracing while loop for manually breaking the loop):

vec4 SSR()
{
	vec3 reflectedColor = vec3(0.0f);

	vec3 normal = normalize( texture(deferredNormalTex, vert_UV) ).xyz;

	// Depth at current fragment
	float currDepth = linearizeDepth( texture(deferredDepthTex, vert_UV).z );

	// Eye position, camera is at (0, 0, 0), we look along negative z, add near plane to correct parallax
	vec3 eyePosition = normalize( vec3(0, 0, Camera.NearPlane) );
	vec4 reflectionVector = ProjectionMatrix * reflect( vec4(-eyePosition, 0), vec4(normal, 0) ) ;

        // Call raytrace to get reflected color
	reflectedColor = raytrace(reflectionVector.xyz, currDepth);	


	return vec4(reflectedColor, 1.0f);
}
/*
 * Raytracing to get reflected color
 */
vec3 raytrace(in vec3 reflectionVector, in float startDepth)
{
	vec3 color = vec3(0.0f);
	float stepSize = rayStepSize; 

	float size = length(reflectionVector.xy);
	reflectionVector = normalize(reflectionVector/size);
	reflectionVector = reflectionVector * stepSize;
        
        // Current sampling position is at current fragment
	vec2 sampledPosition = vert_UV;
        // Current depth at current fragment
	float currentDepth = startDepth;
        // The sampled depth at the current sampling position
	float sampledDepth = linearizeDepth( texture(deferredDepthTex, sampledPosition).z );

        // Raytrace as long as in texture space of depth buffer (between 0 and 1)
	while(sampledPosition.x <= 1.0 && sampledPosition.x >= 0.0 &&
	      sampledPosition.y <= 1.0 && sampledPosition.y >= 0.0)
	{
                // Update sampling position by adding reflection vector's xy and y components
		sampledPosition = sampledPosition + reflectionVector.xy;
                // Updating depth values
		currentDepth = currentDepth + reflectionVector.z * startDepth;
		float sampledDepth = linearizeDepth( texture(deferredDepthTex, sampledPosition).z );
                
                // If current depth is greater than sampled depth of depth buffer, intersection is found
		if(currentDepth > sampledDepth)
		{
                        // Delta is for stop the raytracing after the first intersection is found
                        // Not using delta will create "repeating artifacts"
			float delta = (currentDepth - sampledDepth);
			if(delta < 0.003f )
			{
				color = texture(deferredDiffuseTex, sampledPosition).rgb;
				break;
			}
		}
	}

	return color;
}

 

If you have some questions or comments, give it to me :). I'd like to get more into that whole screen space stuff and understand it more.

 

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement