Jump to content
  • Advertisement

OpenGL GLSL Screen space shadows few issues

Recommended Posts

So, algorithm looks like this:

Use fbo

Clear depth and color buffers

Write depth

Stretch fbo depth texture to screen size and compare that with final scene

 

GLSL algo looks like this:

Project light position and vertex position to screen space coords then move them to 0..1 space

Compute projected vertex to light vector

Then by defined number of samples go from projected vertex position to projected light position:

- Get the depth from depth texture

- unproject this given texture coord and depth value * 2.0 - 1.0 using inverse of (model*view)*projection matrix

Find closest point on line (world_vertex_pos, wirld light pos) to unprojected point, if its less than 0.0001 then i say the ray hit something and original fragment is in shadow

 

Now i forgot few things, so i'll have to ask:

In vertex shader i do something like this


vertexClip.x = dp43(MVP1, Vpos);
vertexClip.y = dp43(MVP2, Vpos);
vertexClip.z = dp43(MVP3, Vpos);
vertexClip.w = dp43(MVP4, Vpos);

scrcoord = vec3(vertexClip.x, vertexClip.y, vertexClip.z);


Where
float dp43(vec4 matrow, vec3 p)
{
return ( (matrow.x*p.x) + (matrow.y*p.y) + (matrow.z*p.z) + matrow.w );
}

It looks like i dont have to divide scrcoord by vertexClip.w component when i do something like this at the end of shader

gl_Position = vertexClip; and fragments are located where they should be...

I pass scrcoord to fragment shader, then do 0.5 + 0.5 to know from which position of the depth tex i start to unproject values.

So scrcoord should be in -1..1 space right?

Another thing is with unprojecting a screen coord to 3d position:

So far i use this formula:

Get texel depth, do *2.0-1.0 for all xyz components

Then multiple it by inverse of (model*view)*projection matrix like that:

Not quite sure if this isneven correct:

vec3 unproject(vec3 op)
{
	vec3 outpos;
outpos.x = dp43(imvp1, op);
outpos.y = dp43(imvp2, op);
outpos.z = dp43(imvp3, op);

return outpos;
}

 

And last question is about ray sampling i'm pretty sure it will skip some pixels making shadowed fragments unshadowed.... Need somehow to fix that too, but for now i have no clue...


vec3 act_tex_pos = fcoord + projected_ldir * sample_step * float ( i );

 

 

I checked depth tex for values and theyre right.


Vert shader

precision highp float;
attribute vec3 Vpos;
attribute vec3 Vnormal;

uniform vec4 MVP1;
uniform vec4 MVP2;
uniform vec4 MVP3;
uniform vec4 MVP4;

uniform vec4 WM1;
uniform vec4 WM2;
uniform vec4 WM3;
uniform vec4 WM4;

uniform vec3 LPOS;
uniform vec3 LDIFF;
uniform vec3 LAMB;

float dp43(vec4 matrow, vec3 p)
{
return ( (matrow.x*p.x) + (matrow.y*p.y) + (matrow.z*p.z) + matrow.w );
}


float dp33(vec4 matrow, vec3 p)
{
return matrow.x*p.x + matrow.y*p.y + matrow.z*p.z;
}


vec3 vectorAB( vec3 A, vec3 B)
{
return B - A;
}

varying vec4 fragColor;
varying vec3 vertex_pos;
varying vec3 scrcoord;
void main()
{
vec4 vertexClip;

vertexClip.x = dp43(MVP1, Vpos);
vertexClip.y = dp43(MVP2, Vpos);
vertexClip.z = dp43(MVP3, Vpos);
vertexClip.w = dp43(MVP4, Vpos);

scrcoord = vec3(vertexClip.x, vertexClip.y, vertexClip.z);
//scrcoord = scrcoord / vertexClip.w;
vec3 normal;
normal.x = dp33(WM1, Vnormal);
normal.y = dp33(WM2, Vnormal);
normal.z = dp33(WM3, Vnormal);
normal = normalize(normal);

vertex_pos.x = dp43(WM1, Vpos);
vertex_pos.y = dp43(WM2, Vpos);
vertex_pos.z = dp43(WM3, Vpos);

vec3 light_vert = normalize( vectorAB( LPOS, vertex_pos ) );
float intensity = clamp(-dot(light_vert, normal), 0.0, 1.0);


vec3 res_col = clamp( LAMB + LDIFF*intensity, 0.0, 1.0);
fragColor = vec4(res_col, 1.0);
gl_Position = vertexClip;







}

 

 

 

 

Andd frag one

 

precision highp float;
varying vec4 fragColor;

varying vec3 vertex_pos;
varying vec3 scrcoord;
uniform sampler2D depthtex;


highp vec3 vectorAB(highp vec3 A, highp vec3 B)
{
return B-A;
}


highp float getDepth(highp vec2 pos)
{

    highp vec4 packedZValue = texture2D(depthtex, pos);

    const highp vec4 bitShifts = vec4(1.0 / (256.0 * 256.0 * 256.0),
                        1.0 / (256.0 * 256.0),
                        1.0 / 256.0,
                        1);
    highp float shadow = dot(packedZValue , bitShifts);
 
    return shadow;
}

highp float dp43(highp vec4 matrow, highp vec3 p)
{
return ( (matrow.x*p.x) + (matrow.y*p.y) + (matrow.z*p.z) + matrow.w );
}




uniform vec3 LPOS;
//inverse (model*view)*projection matrix
uniform vec4 imvp1;
uniform vec4 imvp2;
uniform vec4 imvp3;
uniform vec4 imvp4;


uniform vec4 MVP1;
uniform vec4 MVP2;
uniform vec4 MVP3;
uniform vec4 MVP4;


highp float n3ddistance(highp vec3 first_point, highp vec3 second_point)
{
highp float x = first_point.x-second_point.x;
highp float y = first_point.y-second_point.y;
highp float z = first_point.z-second_point.z;
highp float val = x*x + y*y + z*z;
return sqrt(val);
}

vec3 ClosestPointOnLine (vec3 vA,vec3 vB,vec3 vPoint)
{
	vec3 vVector1 = vPoint - vA;
	vec3 vVector2 = normalize(vB - vA);

float d = n3ddistance(vA, vB);
float t = dot(vVector2, vVector1);


    if (t <= 0.0) return vA;
    if (t >= d) return vB;


vec3 vVector3 		= vVector2 * t;
vec3 vClosestPoint 	= vA + vVector3;


	return vClosestPoint;
}

vec3 unproject(vec3 op)
{
	vec3 outpos;
outpos.x = dp43(imvp1, op);
outpos.y = dp43(imvp2, op);
outpos.z = dp43(imvp3, op);

return outpos;
}

const int num_samples = 100;

void main()
{
vec3 ray_dir = normalize( vectorAB(vertex_pos, LPOS) );


vec4 lightpos_projected2;
/*
project light position and vertex world position to 0..1 of scr coord
*/
lightpos_projected2.x = dp43(MVP1, LPOS);
lightpos_projected2.y = dp43(MVP2, LPOS);
lightpos_projected2.z = dp43(MVP3, LPOS);
lightpos_projected2.w = dp43(MVP4, LPOS);
vec3 lightpos_projected;
lightpos_projected = lightpos_projected2.xyz / lightpos_projected2.w;
lightpos_projected = lightpos_projected * 0.5 + 0.5;

vec3 fcoord = scrcoord * 0.5 + 0.5;

//get the projected vector
vec3 projected_ldir = normalize( vectorAB( fcoord, lightpos_projected) );
float vldst = n3ddistance( fcoord, lightpos_projected );


float sample_step = vldst / float ( num_samples );


bool shadowed = false;
for (int i=1; i < num_samples; i++) //start from 1 cause wendont want to check start vertex
{
vec3 act_tex_pos = fcoord + projected_ldir * sample_step * float ( i );

if (act_tex_pos.x < 0.0) continue;
if (act_tex_pos.x > 1.0) continue;
if (act_tex_pos.y < 0.0) continue;
if (act_tex_pos.y > 1.0) continue;

float zcoord =  getDepth(act_tex_pos.xy);
if ( zcoord >=0.98 ) continue;
if ( zcoord <= 0.0 ) continue;


zcoord = zcoord * 2.0 - 1.0;
vec3 uppos = act_tex_pos * 2.0 - 1.0;
uppos.z = zcoord;

vec3 unprojected_position = unproject(uppos);
vec3 cpol = ClosestPointOnLine( LPOS, vertex_pos, unprojected_position);

if (n3ddistance(cpol, unprojected_position) <= 0.0001)
{
	shadowed = true;
	break;
}


}
vec4 ahue = vec4(1.0, 1.0, 1.0, 1.0);
if (shadowed)
	gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
//ahue = vec4(0.2, 0.2, 0.2, 1.0);
else
gl_FragColor = vec4(fragColor.x*ahue.x, fragColor.y*ahue.y, fragColor.z*ahue.z, 1.0);

}

 

 

Like you see shadow shall be colored in red. But i dont see any red.

https://i.imgur.com/U39BYF0.png

Share this post


Link to post
Share on other sites
Advertisement
15 hours ago, KKTHXBYE said:

And last question is about ray sampling i'm pretty sure it will skip some pixels making shadowed fragments unshadowed.... Need somehow to fix that too, but for now i have no clue...

Is the basic thrust of this that you are trying to do a shadow test in the vertex shader? Is that based on someones suggestion? You can do a shadow test entirely in pixel. The principle is simple, get the depth of a point from the camera and the (probably single) light source. If they are different, something is in the way. The rest is details for performance and aesthetics like blending different scale shadow maps, dealing with blockiness or  'acne' etc. Between the http://www.rastertek.com/dx11tut40.html and http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-16-shadow-mapping/ examples it should be followable (assuming you aren't a complete beginner). 

 

The only thing I'd add is there is sometimes confusion around homogeneous coordinates and the need to divide by w to undo the projection, have a look at https://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/.

Share this post


Link to post
Share on other sites

Well the main problem is with unprojecting the fragment - im not surenif i donthat correctly, second thing is i do shadowtest enteirly in fragment shader i only pass world vertex position and screen coord to fragment shader

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

  • Advertisement
  • Advertisement
  • Popular Tags

  • Popular Now

  • Advertisement
  • Similar Content

    • By mmmax3d
      Hi everyone,
      I would need some assistance from anyone who has a similar experience
      or a nice idea!
      I have created a skybox (as cube) and now I need to add a floor/ground.
      The skybox is created from cubemap and initially it was infinite.
      Now it is finite with a specific size. The floor is a quad in the middle
      of the skybox, like a horizon.
      I have two problems:
      When moving the skybox upwards or downwards, I need to
      sample from points even above the horizon while sampling
      from the botton at the same time.  I am trying to create a seamless blending of the texture
      at the points of the horizon, when the quad is connected
      to the skybox. However, I get skew effects. Does anybody has done sth similar?
      Is there any good practice?
      Thanks everyone!
    • By iArtist93
      I'm trying to implement PBR into my simple OpenGL renderer and trying to use multiple lighting passes, I'm using one pass per light for rendering as follow:
      1- First pass = depth
      2- Second pass = ambient
      3- [3 .. n] for all the lights in the scene.
      I'm using the blending function glBlendFunc(GL_ONE, GL_ONE) for passes [3..n], and i'm doing a Gamma Correction at the end of each fragment shader.
      But i still have a problem with the output image it just looks noisy specially when i'm using texture maps.
      Is there anything wrong with those steps or is there any improvement to this process?
    • By babaliaris
      Hello Everyone!
      I'm learning openGL, and currently i'm making a simple 2D game engine to test what I've learn so far.  In order to not say to much, i made a video in which i'm showing you the behavior of the rendering.
      Video: 
       
      What i was expecting to happen, was the player moving around. When i render only the player, he moves as i would expect. When i add a second Sprite object, instead of the Player, this new sprite object is moving and finally if i add a third Sprite object the third one is moving. And the weird think is that i'm transforming the Vertices of the Player so why the transformation is being applied somewhere else?
       
      Take a look at my code:
      Sprite Class
      (You mostly need to see the Constructor, the Render Method and the Move Method)
      #include "Brain.h" #include <glm/gtc/matrix_transform.hpp> #include <vector> struct Sprite::Implementation { //Position. struct pos pos; //Tag. std::string tag; //Texture. Texture *texture; //Model matrix. glm::mat4 model; //Vertex Array Object. VertexArray *vao; //Vertex Buffer Object. VertexBuffer *vbo; //Layout. VertexBufferLayout *layout; //Index Buffer Object. IndexBuffer *ibo; //Shader. Shader *program; //Brains. std::vector<Brain *> brains; //Deconstructor. ~Implementation(); }; Sprite::Sprite(std::string image_path, std::string tag, float x, float y) { //Create Pointer To Implementaion. m_Impl = new Implementation(); //Set the Position of the Sprite object. m_Impl->pos.x = x; m_Impl->pos.y = y; //Set the tag. m_Impl->tag = tag; //Create The Texture. m_Impl->texture = new Texture(image_path); //Initialize the model Matrix. m_Impl->model = glm::mat4(1.0f); //Get the Width and the Height of the Texture. int width = m_Impl->texture->GetWidth(); int height = m_Impl->texture->GetHeight(); //Create the Verticies. float verticies[] = { //Positions //Texture Coordinates. x, y, 0.0f, 0.0f, x + width, y, 1.0f, 0.0f, x + width, y + height, 1.0f, 1.0f, x, y + height, 0.0f, 1.0f }; //Create the Indicies. unsigned int indicies[] = { 0, 1, 2, 2, 3, 0 }; //Create Vertex Array. m_Impl->vao = new VertexArray(); //Create the Vertex Buffer. m_Impl->vbo = new VertexBuffer((void *)verticies, sizeof(verticies)); //Create The Layout. m_Impl->layout = new VertexBufferLayout(); m_Impl->layout->PushFloat(2); m_Impl->layout->PushFloat(2); m_Impl->vao->AddBuffer(m_Impl->vbo, m_Impl->layout); //Create the Index Buffer. m_Impl->ibo = new IndexBuffer(indicies, 6); //Create the new shader. m_Impl->program = new Shader("Shaders/SpriteShader.shader"); } //Render. void Sprite::Render(Window * window) { //Create the projection Matrix based on the current window width and height. glm::mat4 proj = glm::ortho(0.0f, (float)window->GetWidth(), 0.0f, (float)window->GetHeight(), -1.0f, 1.0f); //Set the MVP Uniform. m_Impl->program->setUniformMat4f("u_MVP", proj * m_Impl->model); //Run All The Brains (Scripts) of this game object (sprite). for (unsigned int i = 0; i < m_Impl->brains.size(); i++) { //Get Current Brain. Brain *brain = m_Impl->brains[i]; //Call the start function only once! if (brain->GetStart()) { brain->SetStart(false); brain->Start(); } //Call the update function every frame. brain->Update(); } //Render. window->GetRenderer()->Draw(m_Impl->vao, m_Impl->ibo, m_Impl->texture, m_Impl->program); } void Sprite::Move(float speed, bool left, bool right, bool up, bool down) { if (left) { m_Impl->pos.x -= speed; m_Impl->model = glm::translate(m_Impl->model, glm::vec3(-speed, 0, 0)); } if (right) { m_Impl->pos.x += speed; m_Impl->model = glm::translate(m_Impl->model, glm::vec3(speed, 0, 0)); } if (up) { m_Impl->pos.y += speed; m_Impl->model = glm::translate(m_Impl->model, glm::vec3(0, speed, 0)); } if (down) { m_Impl->pos.y -= speed; m_Impl->model = glm::translate(m_Impl->model, glm::vec3(0, -speed, 0)); } } void Sprite::AddBrain(Brain * brain) { //Push back the brain object. m_Impl->brains.push_back(brain); } pos *Sprite::GetPos() { return &m_Impl->pos; } std::string Sprite::GetTag() { return m_Impl->tag; } int Sprite::GetWidth() { return m_Impl->texture->GetWidth(); } int Sprite::GetHeight() { return m_Impl->texture->GetHeight(); } Sprite::~Sprite() { delete m_Impl; } //Implementation Deconstructor. Sprite::Implementation::~Implementation() { delete texture; delete vao; delete vbo; delete layout; delete ibo; delete program; }  
      Renderer Class
      #include "Renderer.h" #include "Error.h" Renderer::Renderer() { } Renderer::~Renderer() { } void Renderer::Draw(VertexArray * vao, IndexBuffer * ibo, Texture *texture, Shader * program) { vao->Bind(); ibo->Bind(); program->Bind(); if (texture != NULL) texture->Bind(); GLCall(glDrawElements(GL_TRIANGLES, ibo->GetCount(), GL_UNSIGNED_INT, NULL)); } void Renderer::Clear(float r, float g, float b) { GLCall(glClearColor(r, g, b, 1.0)); GLCall(glClear(GL_COLOR_BUFFER_BIT)); } void Renderer::Update(GLFWwindow *window) { /* Swap front and back buffers */ glfwSwapBuffers(window); /* Poll for and process events */ glfwPollEvents(); }  
      Shader Code
      #shader vertex #version 330 core layout(location = 0) in vec4 aPos; layout(location = 1) in vec2 aTexCoord; out vec2 t_TexCoord; uniform mat4 u_MVP; void main() { gl_Position = u_MVP * aPos; t_TexCoord = aTexCoord; } #shader fragment #version 330 core out vec4 aColor; in vec2 t_TexCoord; uniform sampler2D u_Texture; void main() { aColor = texture(u_Texture, t_TexCoord); } Also i'm pretty sure that every time i'm hitting the up, down, left and right arrows on the keyboard, i'm changing the model Matrix of the Player and not the others.
       
      Window Class:
      #include "Window.h" #include <GL/glew.h> #include <GLFW/glfw3.h> #include "Error.h" #include "Renderer.h" #include "Scene.h" #include "Input.h" //Global Variables. int screen_width, screen_height; //On Window Resize. void OnWindowResize(GLFWwindow *window, int width, int height); //Implementation Structure. struct Window::Implementation { //GLFW Window. GLFWwindow *GLFW_window; //Renderer. Renderer *renderer; //Delta Time. double delta_time; //Frames Per Second. int fps; //Scene. Scene *scnene; //Input. Input *input; //Deconstructor. ~Implementation(); }; //Window Constructor. Window::Window(std::string title, int width, int height) { //Initializing width and height. screen_width = width; screen_height = height; //Create Pointer To Implementation. m_Impl = new Implementation(); //Try initializing GLFW. if (!glfwInit()) { std::cout << "GLFW could not be initialized!" << std::endl; std::cout << "Press ENTER to exit..." << std::endl; std::cin.get(); exit(-1); } //Setting up OpenGL Version 3.3 Core Profile. glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); /* Create a windowed mode window and its OpenGL context */ m_Impl->GLFW_window = glfwCreateWindow(width, height, title.c_str(), NULL, NULL); if (!m_Impl->GLFW_window) { std::cout << "GLFW could not create a window!" << std::endl; std::cout << "Press ENTER to exit..." << std::endl; std::cin.get(); glfwTerminate(); exit(-1); } /* Make the window's context current */ glfwMakeContextCurrent(m_Impl->GLFW_window); //Initialize GLEW. if(glewInit() != GLEW_OK) { std::cout << "GLEW could not be initialized!" << std::endl; std::cout << "Press ENTER to exit..." << std::endl; std::cin.get(); glfwTerminate(); exit(-1); } //Enabling Blending. GLCall(glEnable(GL_BLEND)); GLCall(glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)); //Setting the ViewPort. GLCall(glViewport(0, 0, width, height)); //**********Initializing Implementation**********// m_Impl->renderer = new Renderer(); m_Impl->delta_time = 0.0; m_Impl->fps = 0; m_Impl->input = new Input(this); //**********Initializing Implementation**********// //Set Frame Buffer Size Callback. glfwSetFramebufferSizeCallback(m_Impl->GLFW_window, OnWindowResize); } //Window Deconstructor. Window::~Window() { delete m_Impl; } //Window Main Loop. void Window::MainLoop() { //Time Variables. double start_time = 0, end_time = 0, old_time = 0, total_time = 0; //Frames Counter. int frames = 0; /* Loop until the user closes the window */ while (!glfwWindowShouldClose(m_Impl->GLFW_window)) { old_time = start_time; //Total time of previous frame. start_time = glfwGetTime(); //Current frame start time. //Calculate the Delta Time. m_Impl->delta_time = start_time - old_time; //Get Frames Per Second. if (total_time >= 1) { m_Impl->fps = frames; total_time = 0; frames = 0; } //Clearing The Screen. m_Impl->renderer->Clear(0, 0, 0); //Render The Scene. if (m_Impl->scnene != NULL) m_Impl->scnene->Render(this); //Updating the Screen. m_Impl->renderer->Update(m_Impl->GLFW_window); //Increasing frames counter. frames++; //End Time. end_time = glfwGetTime(); //Total time after the frame completed. total_time += end_time - start_time; } //Terminate GLFW. glfwTerminate(); } //Load Scene. void Window::LoadScene(Scene * scene) { //Set the scene. m_Impl->scnene = scene; } //Get Delta Time. double Window::GetDeltaTime() { return m_Impl->delta_time; } //Get FPS. int Window::GetFPS() { return m_Impl->fps; } //Get Width. int Window::GetWidth() { return screen_width; } //Get Height. int Window::GetHeight() { return screen_height; } //Get Input. Input * Window::GetInput() { return m_Impl->input; } Renderer * Window::GetRenderer() { return m_Impl->renderer; } GLFWwindow * Window::GetGLFWindow() { return m_Impl->GLFW_window; } //Implementation Deconstructor. Window::Implementation::~Implementation() { delete renderer; delete input; } //OnWindowResize void OnWindowResize(GLFWwindow *window, int width, int height) { screen_width = width; screen_height = height; //Updating the ViewPort. GLCall(glViewport(0, 0, width, height)); }  
      Brain Class
      #include "Brain.h" #include "Sprite.h" #include "Window.h" struct Brain::Implementation { //Just A Flag. bool started; //Window Pointer. Window *window; //Sprite Pointer. Sprite *sprite; }; Brain::Brain(Window *window, Sprite *sprite) { //Create Pointer To Implementation. m_Impl = new Implementation(); //Initialize Implementation. m_Impl->started = true; m_Impl->window = window; m_Impl->sprite = sprite; } Brain::~Brain() { //Delete Pointer To Implementation. delete m_Impl; } void Brain::Start() { } void Brain::Update() { } Window * Brain::GetWindow() { return m_Impl->window; } Sprite * Brain::GetSprite() { return m_Impl->sprite; } bool Brain::GetStart() { return m_Impl->started; } void Brain::SetStart(bool value) { m_Impl->started = value; } Script Class (Its a Brain Subclass!!!)
      #include "Script.h" Script::Script(Window *window, Sprite *sprite) : Brain(window, sprite) { } Script::~Script() { } void Script::Start() { std::cout << "Game Started!" << std::endl; } void Script::Update() { Input *input = this->GetWindow()->GetInput(); Sprite *sp = this->GetSprite(); //Move this sprite. this->GetSprite()->Move(200 * this->GetWindow()->GetDeltaTime(), input->GetKeyDown("left"), input->GetKeyDown("right"), input->GetKeyDown("up"), input->GetKeyDown("down")); std::cout << sp->GetTag().c_str() << ".x = " << sp->GetPos()->x << ", " << sp->GetTag().c_str() << ".y = " << sp->GetPos()->y << std::endl; }  
      Main:
      #include "SpaceShooterEngine.h" #include "Script.h" int main() { Window w("title", 600,600); Scene *scene = new Scene(); Sprite *player = new Sprite("Resources/Images/player.png", "Player", 100,100); Sprite *other = new Sprite("Resources/Images/cherno.png", "Other", 400, 100); Sprite *other2 = new Sprite("Resources/Images/cherno.png", "Other", 300, 400); Brain *brain = new Script(&w, player); player->AddBrain(brain); scene->AddSprite(player); scene->AddSprite(other); scene->AddSprite(other2); w.LoadScene(scene); w.MainLoop(); return 0; }  
       
      I literally can't find what is wrong. If you need more code, ask me to post it. I will also attach all the source files.
      Brain.cpp
      Error.cpp
      IndexBuffer.cpp
      Input.cpp
      Renderer.cpp
      Scene.cpp
      Shader.cpp
      Sprite.cpp
      Texture.cpp
      VertexArray.cpp
      VertexBuffer.cpp
      VertexBufferLayout.cpp
      Window.cpp
      Brain.h
      Error.h
      IndexBuffer.h
      Input.h
      Renderer.h
      Scene.h
      Shader.h
      SpaceShooterEngine.h
      Sprite.h
      Texture.h
      VertexArray.h
      VertexBuffer.h
      VertexBufferLayout.h
      Window.h
    • By Cristian Decu
      Hello fellow programmers,
      For a couple of days now i've decided to build my own planet renderer just to see how floating point precision issues
      can be tackled. As you probably imagine, i've quickly faced FPP issues when trying to render absurdly large planets.
       
      I have used the classical quadtree LOD approach;
      I've generated my grids with 33 vertices, (x: -1 to 1, y: -1 to 1, z = 0).
      Each grid is managed by a TerrainNode class that, depending on the side it represents (top, bottom, left right, front, back),
      creates a special rotation-translation matrix that moves and rotates the grid away from the origin so that when i finally
      normalize all the vertices on my vertex shader i can get a perfect sphere.
      T = glm::translate(glm::dmat4(1.0), glm::dvec3(0.0, 0.0, 1.0)); R = glm::rotate(glm::dmat4(1.0), glm::radians(180.0), glm::dvec3(1.0, 0.0, 0.0)); sides[0] = new TerrainNode(1.0, radius, T * R, glm::dvec2(0.0, 0.0), new TerrainTile(1.0, SIDE_FRONT)); T = glm::translate(glm::dmat4(1.0), glm::dvec3(0.0, 0.0, -1.0)); R = glm::rotate(glm::dmat4(1.0), glm::radians(0.0), glm::dvec3(1.0, 0.0, 0.0)); sides[1] = new TerrainNode(1.0, radius, R * T, glm::dvec2(0.0, 0.0), new TerrainTile(1.0, SIDE_BACK)); // So on and so forth for the rest of the sides As you can see, for the front side grid, i rotate it 180 degrees to make it face the camera and push it towards the eye;
      the back side is handled almost the same way only that i don't need to rotate it but simply push it away from the eye.
      The same technique is applied for the rest of the faces (obviously, with the proper rotations / translations).
      The matrix that result from the multiplication of R and T (in that particular order) is send to my vertex shader as `r_Grid'.
      // spherify vec3 V = normalize((r_Grid * vec4(r_Vertex, 1.0)).xyz); gl_Position = r_ModelViewProjection * vec4(V, 1.0); The `r_ModelViewProjection' matrix is generated on the CPU in this manner.
      // No the most efficient way, but it works. glm::dmat4 Camera::getMatrix() { // Create the view matrix // Roll, Yaw and Pitch are all quaternions. glm::dmat4 View = glm::toMat4(Roll) * glm::toMat4(Pitch) * glm::toMat4(Yaw); // The model matrix is generated by translating in the oposite direction of the camera. glm::dmat4 Model = glm::translate(glm::dmat4(1.0), -Position); // Projection = glm::perspective(fovY, aspect, zNear, zFar); // zNear = 0.1, zFar = 1.0995116e12 return Projection * View * Model; } I managed to get rid of z-fighting by using a technique called Logarithmic Depth Buffer described in this article; it works amazingly well, no z-fighting at all, at least not visible.
      Each frame i'm rendering each node by sending the generated matrices this way.
      // set the r_ModelViewProjection uniform // Sneak in the mRadiusMatrix which is a matrix that contains the radius of my planet. Shader::setUniform(0, Camera::getInstance()->getMatrix() * mRadiusMatrix); // set the r_Grid matrix uniform i created earlier. Shader::setUniform(1, r_Grid); grid->render(); My planet's radius is around 6400000.0 units, absurdly large, but that's what i really want to achieve;
      Everything works well, the node's split and merge as you'd expect, however whenever i get close to the surface
      of the planet the rounding errors start to kick in giving me that lovely stairs effect.
      I've read that if i could render each grid relative to the camera i could get better precision on the surface, effectively
      getting rid of those rounding errors.
       
      My question is how can i achieve this relative to camera rendering in my scenario here?
      I know that i have to do most of the work on the CPU with double, and that's exactly what i'm doing.
      I only use double on the CPU side where i also do most of the matrix multiplications.
      As you can see from my vertex shader i only do the usual r_ModelViewProjection * (some vertex coords).
       
      Thank you for your suggestions!
       
    • By mike44
      HI
      I've a ok framebuffer looking from above. Now how to turn it 90' to look at it from the front?
      It looks almost right but the upper colors look like you're right in it. Those should be blue like sky.
      I draw GL_TRIANGLE_STRIP colored depending on a height value.
      Any ideas also on the logic? Thanks
  • Advertisement
×

Important Information

By using GameDev.net, you agree to our community Guidelines, Terms of Use, and Privacy Policy.

Participate in the game development conversation and more when you create an account on GameDev.net!

Sign me up!