• Advertisement

Search the Community

Showing results for tags 'OpenGL' in content posted in Graphics and GPU Programming.



More search options

  • Search By Tags

    Type tags separated by commas.
  • Search By Author

Content Type


Categories

  • Audio
    • Music and Sound FX
  • Business
    • Business and Law
    • Career Development
    • Production and Management
  • Game Design
    • Game Design and Theory
    • Writing for Games
    • UX for Games
  • Industry
    • Interviews
    • Event Coverage
  • Programming
    • Artificial Intelligence
    • General and Gameplay Programming
    • Graphics and GPU Programming
    • Engines and Middleware
    • Math and Physics
    • Networking and Multiplayer
  • Visual Arts
  • Archive

Categories

  • News

Categories

  • Audio
  • Visual Arts
  • Programming
  • Writing

Categories

  • GameDev Unboxed

Categories

  • Game Dev Loadout

Categories

  • Game Developers Conference
    • GDC 2017
    • GDC 2018
  • Power-Up Digital Games Conference
    • PDGC I: Words of Wisdom
    • PDGC II: The Devs Strike Back
    • PDGC III: Syntax Error

Forums

  • Audio
    • Music and Sound FX
  • Business
    • Games Career Development
    • Production and Management
    • Games Business and Law
  • Game Design
    • Game Design and Theory
    • Writing for Games
  • Programming
    • Artificial Intelligence
    • Engines and Middleware
    • General and Gameplay Programming
    • Graphics and GPU Programming
    • Math and Physics
    • Networking and Multiplayer
  • Visual Arts
    • 2D and 3D Art
    • Critique and Feedback
  • Topical
    • Virtual and Augmented Reality
    • News
  • Community
    • For Beginners
    • GameDev Challenges
    • GDNet+ Member Forum
    • GDNet Lounge
    • GDNet Comments, Suggestions, and Ideas
    • Coding Horrors
    • Your Announcements
    • Hobby Project Classifieds
    • Indie Showcase
    • Article Writing
  • Affiliates
    • NeHe Productions
    • AngelCode
  • Workshops
    • C# Workshop
    • CPP Workshop
    • Freehand Drawing Workshop
    • Hands-On Interactive Game Development
    • SICP Workshop
    • XNA 4.0 Workshop
  • Archive
    • Topical
    • Affiliates
    • Contests
    • Technical
  • GameDev Challenges's Topics

Calendars

  • Community Calendar
  • Games Industry Events
  • Game Jams
  • GameDev Challenges's Schedule

Blogs

There are no results to display.

There are no results to display.

Developers

Developers


Group


About Me


Website


Industry Role


Twitter


Github


Twitch


Steam

Found 17436 results

  1. I am using immediate mode for OpenGL and I am creating a 2D top down car game. I am trying to configure my game loop in order to get my car-like physics working on a square shape. I have working code but it is not doing as I want it to. I am not sure as to whether it is my game loop that is incorrect or my code for the square is incorrect, or maybe both! Could someone help because I have been trying to work this out for over a day now I have attached my .cpp file if you wish to run it for yourself.. This is my C++ and OpenGL code: int mouse_x=0, mouse_y=0; bool LeftPressed = false; int screenWidth=1080, screenHeight=960; bool keys[256]; float radiansFromDegrees(float deg) { return deg * (M_PI / 180.0f); } float degreesFromRadians(float rad) { return rad / (M_PI / 180.0f); } bool game_is_running = true; const int TICKS_PER_SECOND = 50; const int SKIP_TICKS = 1000 / TICKS_PER_SECOND; const int MAX_FRAMESKIP = 10; DWORD next_game_tick = GetTickCount(); int loops; typedef struct { float x, y; float dx, dy; float ang; }Car; //OPENGL FUNCTION PROTOTYPES void display(const Car& car); //called in winmain to draw everything to the screen void reshape(int width, int height); //called when the window is resized void init(); //called in winmain when the program starts. void processKeys(Car& car); //called in winmain to process keyboard input void update(Car& car); //called in winmain to update variables /************* START OF OPENGL FUNCTIONS ****************/ void display(const Car& car) { const float w = 50.0f; const float h = 50.0f; glClear(GL_COLOR_BUFFER_BIT); glLoadIdentity(); glTranslatef(100, 100, 0); glBegin(GL_POLYGON); glVertex2f(car.x, car.y); glVertex2f(car.x + w, car.y); glVertex2f(car.x + w, car.y + h); glVertex2f(car.x, car.y + h); glEnd(); glFlush(); } void reshape(int width, int height) // Resize the OpenGL window { screenWidth = width; screenHeight = height; // to ensure the mouse coordinates match // we will use these values to set the coordinate system glViewport(0, 0, width, height); // Reset the current viewport glMatrixMode(GL_PROJECTION); // select the projection matrix stack glLoadIdentity(); // reset the top of the projection matrix to an identity matrix gluOrtho2D(0, screenWidth, 0, screenHeight); // set the coordinate system for the window glMatrixMode(GL_MODELVIEW); // Select the modelview matrix stack glLoadIdentity(); // Reset the top of the modelview matrix to an identity matrix } void init() { glClearColor(1.0, 1.0, 0.0, 0.0); //sets the clear colour to yellow //glClear(GL_COLOR_BUFFER_BIT) in the display function //will clear the buffer to this colour. } void processKeys(Car& car) { if (keys[VK_UP]) { float cdx = sinf(radiansFromDegrees(car.ang)); float cdy = -cosf(radiansFromDegrees(car.ang)); car.dx += cdx; car.dy += cdy; } if (keys[VK_DOWN]) { float cdx = sinf(radiansFromDegrees(car.ang)); float cdy = -cosf(radiansFromDegrees(car.ang)); car.dx += -cdx; car.dy += -cdy; } if (keys[VK_LEFT]) { car.ang -= 2; } if (keys[VK_RIGHT]) { car.ang += 2; } } void update(Car& car) { car.x += car.dx*next_game_tick; } My WinMain code: /******************* WIN32 FUNCTIONS ***************************/ int WINAPI WinMain( HINSTANCE hInstance, // Instance HINSTANCE hPrevInstance, // Previous Instance LPSTR lpCmdLine, // Command Line Parameters int nCmdShow) // Window Show State { MSG msg; // Windows Message Structure bool done=false; // Bool Variable To Exit Loop Car car; car.x = 220; car.y = 140; car.dx = 0; car.dy = 0; car.ang = 0; AllocConsole(); FILE *stream; freopen_s(&stream, "CONOUT$", "w", stdout); // Create Our OpenGL Window if (!CreateGLWindow("OpenGL Win32 Example",screenWidth,screenHeight)) { return 0; // Quit If Window Was Not Created } while(!done) // Loop That Runs While done=FALSE { if (PeekMessage(&msg,NULL,0,0,PM_REMOVE)) // Is There A Message Waiting? { if (msg.message==WM_QUIT) // Have We Received A Quit Message? { done=true; // If So done=TRUE break; } else // If Not, Deal With Window Messages { TranslateMessage(&msg); // Translate The Message DispatchMessage(&msg); // Dispatch The Message } } else // If There Are No Messages { if(keys[VK_ESCAPE]) done = true; void processKeys(Car& car); //process keyboard while (game_is_running) { loops = 0; while (GetTickCount() > next_game_tick && loops < MAX_FRAMESKIP) { update(car); // update variables next_game_tick += SKIP_TICKS; loops++; } display(car); // Draw The Scene SwapBuffers(hDC); // Swap Buffers (Double Buffering) } } } // Shutdown KillGLWindow(); // Kill The Window return (int)(msg.wParam); // Exit The Program } //WIN32 Processes function - useful for responding to user inputs or other events. LRESULT CALLBACK WndProc( HWND hWnd, // Handle For This Window UINT uMsg, // Message For This Window WPARAM wParam, // Additional Message Information LPARAM lParam) // Additional Message Information { switch (uMsg) // Check For Windows Messages { case WM_CLOSE: // Did We Receive A Close Message? { PostQuitMessage(0); // Send A Quit Message return 0; // Jump Back } break; case WM_SIZE: // Resize The OpenGL Window { reshape(LOWORD(lParam),HIWORD(lParam)); // LoWord=Width, HiWord=Height return 0; // Jump Back } break; case WM_LBUTTONDOWN: { mouse_x = LOWORD(lParam); mouse_y = screenHeight - HIWORD(lParam); LeftPressed = true; } break; case WM_LBUTTONUP: { LeftPressed = false; } break; case WM_MOUSEMOVE: { mouse_x = LOWORD(lParam); mouse_y = screenHeight - HIWORD(lParam); } break; case WM_KEYDOWN: // Is A Key Being Held Down? { keys[wParam] = true; // If So, Mark It As TRUE return 0; // Jump Back } break; case WM_KEYUP: // Has A Key Been Released? { keys[wParam] = false; // If So, Mark It As FALSE return 0; // Jump Back } break; } // Pass All Unhandled Messages To DefWindowProc return DefWindowProc(hWnd,uMsg,wParam,lParam); } game.cpp
  2. Hi guys, There are many ways to do light culling in tile-based shading. I've been playing with this idea for a while, and just want to throw it out there. Because tile frustums are general small compared to light radius, I tried using cone test to reduce false positives introduced by commonly used sphere-frustum test. On top of that, I use distance to camera rather than depth for near/far test (aka. sliced by spheres). This method can be naturally extended to clustered light culling as well. The following image shows the general ideas Performance-wise I get around 15% improvement over sphere-frustum test. You can also see how a single light performs as the following: from left to right (1) standard rendering of a point light; then tiles passed the test of (2) sphere-frustum test; (3) cone test; (4) spherical-sliced cone test I put the details in my blog post (https://lxjk.github.io/2018/03/25/Improve-Tile-based-Light-Culling-with-Spherical-sliced-Cone.html), GLSL source code included! Eric
  3. Hello guys, Please tell me! How do I know? Why does wavefront not show for me? I already checked I have non errors yet. using OpenTK; using System.Collections.Generic; using System.IO; using System.Text; namespace Tutorial_08.net.sourceskyboxer { public class WaveFrontLoader { private static List<Vector3> inPositions; private static List<Vector2> inTexcoords; private static List<Vector3> inNormals; private static List<float> positions; private static List<float> texcoords; private static List<int> indices; public static RawModel LoadObjModel(string filename, Loader loader) { inPositions = new List<Vector3>(); inTexcoords = new List<Vector2>(); inNormals = new List<Vector3>(); positions = new List<float>(); texcoords = new List<float>(); indices = new List<int>(); int nextIdx = 0; using (var reader = new StreamReader(File.Open("Contents/" + filename + ".obj", FileMode.Open), Encoding.UTF8)) { string line = reader.ReadLine(); int i = reader.Read(); while (true) { string[] currentLine = line.Split(); if (currentLine[0] == "v") { Vector3 pos = new Vector3(float.Parse(currentLine[1]), float.Parse(currentLine[2]), float.Parse(currentLine[3])); inPositions.Add(pos); if (currentLine[1] == "t") { Vector2 tex = new Vector2(float.Parse(currentLine[1]), float.Parse(currentLine[2])); inTexcoords.Add(tex); } if (currentLine[1] == "n") { Vector3 nom = new Vector3(float.Parse(currentLine[1]), float.Parse(currentLine[2]), float.Parse(currentLine[3])); inNormals.Add(nom); } } if (currentLine[0] == "f") { Vector3 pos = inPositions[0]; positions.Add(pos.X); positions.Add(pos.Y); positions.Add(pos.Z); Vector2 tc = inTexcoords[0]; texcoords.Add(tc.X); texcoords.Add(tc.Y); indices.Add(nextIdx); ++nextIdx; } reader.Close(); return loader.loadToVAO(positions.ToArray(), texcoords.ToArray(), indices.ToArray()); } } } } } And It have tried other method but it can't show for me. I am mad now. Because any OpenTK developers won't help me. Please help me how do I fix. And my download (mega.nz) should it is original but I tried no success... - Add blend source and png file here I have tried tried,..... PS: Why is our community not active? I wait very longer. Stop to lie me! Thanks !
  4. Hello everyone, I have problem with texture using System; using OpenTK; using OpenTK.Input; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL4; using System.Drawing; using System.Reflection; namespace Tutorial_05 { class Game : GameWindow { private static int WIDTH = 1200; private static int HEIGHT = 720; private static KeyboardState keyState; private int vaoID; private int vboID; private int iboID; private Vector3[] vertices = { new Vector3(-0.5f, 0.5f, 0.0f), // V0 new Vector3(-0.5f, -0.5f, 0.0f), // V1 new Vector3(0.5f, -0.5f, 0.0f), // V2 new Vector3(0.5f, 0.5f, 0.0f) // V3 }; private Vector2[] texcoords = { new Vector2(0, 0), new Vector2(0, 1), new Vector2(1, 1), new Vector2(1, 0) }; private int[] indices = { 0, 1, 3, 3, 1, 2 }; private string vertsrc = @"#version 450 core in vec3 position; in vec2 textureCoords; out vec2 pass_textureCoords; void main(void) { gl_Position = vec4(position, 1.0); pass_textureCoords = textureCoords; }"; private string fragsrc = @"#version 450 core in vec2 pass_textureCoords; out vec4 out_color; uniform sampler2D textureSampler; void main(void) { out_color = texture(textureSampler, pass_textureCoords); }"; private int programID; private int vertexShaderID; private int fragmentShaderID; private int textureID; private Bitmap texsrc; public Game() : base(WIDTH, HEIGHT, GraphicsMode.Default, "Tutorial 05 - Texturing", GameWindowFlags.Default, DisplayDevice.Default, 4, 5, GraphicsContextFlags.Default) { } protected override void OnLoad(EventArgs e) { base.OnLoad(e); CursorVisible = true; GL.GenVertexArrays(1, out vaoID); GL.BindVertexArray(vaoID); GL.GenBuffers(1, out vboID); GL.BindBuffer(BufferTarget.ArrayBuffer, vboID); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(vertices.Length * Vector3.SizeInBytes), vertices, BufferUsageHint.StaticDraw); GL.GenBuffers(1, out iboID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, iboID); GL.BufferData(BufferTarget.ElementArrayBuffer, (IntPtr)(indices.Length * sizeof(int)), indices, BufferUsageHint.StaticDraw); vertexShaderID = GL.CreateShader(ShaderType.VertexShader); GL.ShaderSource(vertexShaderID, vertsrc); GL.CompileShader(vertexShaderID); fragmentShaderID = GL.CreateShader(ShaderType.FragmentShader); GL.ShaderSource(fragmentShaderID, fragsrc); GL.CompileShader(fragmentShaderID); programID = GL.CreateProgram(); GL.AttachShader(programID, vertexShaderID); GL.AttachShader(programID, fragmentShaderID); GL.LinkProgram(programID); // Loading texture from embedded resource texsrc = new Bitmap(Assembly.GetEntryAssembly().GetManifestResourceStream("Tutorial_05.example.png")); textureID = GL.GenTexture(); GL.BindTexture(TextureTarget.Texture2D, textureID); GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)All.Linear); GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)All.Linear); GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, texsrc.Width, texsrc.Height, 0, PixelFormat.Bgra, PixelType.UnsignedByte, IntPtr.Zero); System.Drawing.Imaging.BitmapData bitmap_data = texsrc.LockBits(new Rectangle(0, 0, texsrc.Width, texsrc.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppRgb); GL.TexSubImage2D(TextureTarget.Texture2D, 0, 0, 0, texsrc.Width, texsrc.Height, PixelFormat.Bgra, PixelType.UnsignedByte, bitmap_data.Scan0); texsrc.UnlockBits(bitmap_data); GL.Enable(EnableCap.Texture2D); GL.BufferData(BufferTarget.TextureBuffer, (IntPtr)(texcoords.Length * Vector2.SizeInBytes), texcoords, BufferUsageHint.StaticDraw); GL.BindAttribLocation(programID, 0, "position"); GL.BindAttribLocation(programID, 1, "textureCoords"); } protected override void OnResize(EventArgs e) { base.OnResize(e); GL.Viewport(0, 0, ClientRectangle.Width, ClientRectangle.Height); } protected override void OnUpdateFrame(FrameEventArgs e) { base.OnUpdateFrame(e); keyState = Keyboard.GetState(); if (keyState.IsKeyDown(Key.Escape)) { Exit(); } } protected override void OnRenderFrame(FrameEventArgs e) { base.OnRenderFrame(e); // Prepare for background GL.Clear(ClearBufferMask.ColorBufferBit); GL.ClearColor(Color4.Red); // Draw traingles GL.EnableVertexAttribArray(0); GL.EnableVertexAttribArray(1); GL.BindVertexArray(vaoID); GL.UseProgram(programID); GL.BindBuffer(BufferTarget.ArrayBuffer, vboID); GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 0, IntPtr.Zero); GL.ActiveTexture(TextureUnit.Texture0); GL.BindTexture(TextureTarget.Texture3D, textureID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, iboID); GL.DrawElements(BeginMode.Triangles, indices.Length, DrawElementsType.UnsignedInt, 0); GL.DisableVertexAttribArray(0); GL.DisableVertexAttribArray(1); SwapBuffers(); } protected override void OnClosed(EventArgs e) { base.OnClosed(e); GL.DeleteVertexArray(vaoID); GL.DeleteBuffer(vboID); } } } I can not remember where do I add GL.Uniform2();
  5. Hello everyone For @80bserver8 nice job - I have found Google search. How did you port from Javascript WebGL to C# OpenTK.? I have been searched Google but it shows f***ing Unity 3D. I really want know how do I understand I want start with OpenTK But I want know where is porting of Javascript and C#? Thanks!
  6. Hi I draw in a OpenGL framebuffer. All is fine but it eats FPS (frames per second), hence I wonder if I could execute the framebuffer drawing only every 5-10th loop or so? Many thanks
  7. hi all, how to implement this type of effect ? Also what is this effect called? this is considered volumetric lighting? what are the options of doing this? a. billboard? but i want this to have the 3D effect that when we rotate the camera we can still have that 3d feel. b. a transparent 3d mesh? and we can animate it as well? need your expert advise. additional: 2. how to implement things like fireball projectile (shot from a monster) (billboard texture or a 3d mesh)? Note: im using OpenGL ES 2.0 on mobile. thanks!
  8. Trying to get texture buffer objects working. I'm planning on updating a buffer every frame with instance matrices, for now I have stored a single color. I stripped my code to a simple example. Right now the color I am getting is black. I'm wondering if there is something dumb I need to know about TBO's. I have 0 glGetError() issues. The buffer should definitely contain data, so I wonder if there isn't something with binding the texture properly. ***I missed in my example, but I am calling glUniform1i("instanceMatrixBuffer", 11); To properly bind texture 11 to the sampler in the shader. glGenVertexArrays(1, &VAO_Handle); glBindVertexArray(VAO_Handle); glBindBuffer(GL_ARRAY_BUFFER, VBO_Handle); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)this->m_vboLayout.positionDataOffset); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, VBO_Index_Handle); glBindVertexArray(0); ...later glBindVertexArray(VAO_Handle); static bool doOnce = true; if(doOnce) { doOnce = false; glGenBuffers(1, &TBO_Buffer_Handle); glBindBuffer(GL_TEXTURE_BUFFER, VBO_Index); float data[4] = {1.0, 0.0, 1.0, 1.0}; glBufferData(GL_TEXTURE_BUFFER, sizeof(float)*4, data, GL_STATIC_DRAW); glGenTextures(1, &TBO_Texture_Handle); glBindTexture(GL_TEXTURE_BUFFER, Texture_Index ); glTexBuffer(GL_TEXTURE_BUFFER, GL_RGBA32F, VBO_Index); } glActiveTexture(GL_TEXTURE11); glBindBuffer(GL_TEXTURE_BUFFER, VBO_Index); glBindTexture(GL_TEXTURE_BUFFER, Texture_Index); glDrawElementsInstanced(GL_TRIANGLES, meshToRender->num_faces*3, GL_UNSIGNED_INT, 0, instanceCount) GLSL vec4 Color = texelFetch(instanceMatrixBuffer, 0);
  9. Hi Guys, A quick question. I have a 3d grid sector made out of AABB boxes ( see picture below ). When I turn the light on, I get the light catching the extremities of the boxes creating ugly line artifacts. Just wondering if there's a way to solve this problem. Thanks again, Mike
  10. I spent the last few days implementing Deferred rendering and then Physically based Shading into my C++ engine to see if i'll be able to pull this off. My rendering setup is as follows: Position Buffer (RGB 16 bit float world space) Normal buffer (RGB 16 bit float world space) Albedo buffer (RGB 8 bit) Roughness/Metallness Buffer (one in the R component, one in the G component) It's not an efficient setup but i wanted to keep it simple and optimize it later after i get everything to work. Here is the shader which i'm using: #version 330 in vec2 vTexcoord; out vec4 outputF; uniform sampler2D gPosition; uniform sampler2D gNormal; uniform sampler2D gAlbedo; uniform sampler2D gMetallicRoughness; uniform vec2 uPixelSize; uniform float uRadius; uniform vec3 uColor; uniform vec3 uLightPos; uniform vec3 uCameraPosition; const float PI = 3.141592653589793; float BRDF_Lambert(float NdotL) { return NdotL; } float BRDF_D_GGX(float NdotH, float roughness) { float roughness2 = roughness * roughness; float NdotH2 = NdotH * NdotH; float r1 = (NdotH2 - 1.0) / (roughness2 * NdotH2); float r2 = 1.0 / (PI * (roughness2 * NdotH2) * roughness2); return exp(r1) * r2; } //NdotV seems to be correct (instead of HdotV) float BRDF_F_FresnelSchlick(float NdotV, float F0) { return (F0 + (1.0f - F0) * (pow(1.0f - max(NdotV,0.0f),5.0f)));; } float BRDF_G_SchlickGGX(float NdotV,float roughness){ float k = (roughness*roughness)/2.0f; //float k = roughness; return (NdotV)/(NdotV * (1.0f - k) + k); } //geometrix shadowing - cook-Torrance float BRDF_G_Smith(float NdotV,float NdotL, float roughness) { NdotV = max(NdotV,0.0f); NdotL = max(NdotL,0.0f); return BRDF_G_SchlickGGX(NdotV,roughness) * BRDF_G_SchlickGGX(NdotL,roughness); } float calcAttenuation(float distToFragment,float lightRadius){ float att = clamp(1.0 - distToFragment*distToFragment/(lightRadius*lightRadius), 0.0, 1.0); att *= att; return att; } void main() { vec2 texCoord = vec2(gl_FragCoord.x * uPixelSize.x,gl_FragCoord.y*uPixelSize.y); vec3 fragPos = texture2D(gPosition, texCoord).rgb; vec3 fragNormal = texture2D(gNormal, texCoord).rgb; vec3 fragAlbedo = texture2D(gAlbedo, texCoord).rgb; vec2 fragMetallicRoughness = texture2D(gMetallicRoughness, texCoord).rg; float fragMetallic = fragMetallicRoughness.r; float fragRoughness = fragMetallicRoughness.g; fragRoughness = max(fragRoughness,0.05f);//if value is 0 it doesnt reflect anything //-------------- vec3 fragToLightNormal = uLightPos-fragPos; vec3 N = normalize(fragNormal);//normal vector vec3 L = normalize(fragToLightNormal);//light vector vec3 V = normalize(uCameraPosition-fragPos.xyz); //eye vector vec3 H = normalize(L+V); //half vector float NdotH = max(dot(N,H),0.0f); float NdotV = max(dot(N,V),0.0f); float NdotL = max(dot(N,L),0.0f); float VdotH = max(dot(V,H),0.0f); //------------------ float F0 = 0.04f;//assumption float D = BRDF_D_GGX(NdotH, fragRoughness); //normal distribution float F = BRDF_F_FresnelSchlick(NdotV, F0); // Fresnel float G = BRDF_G_Smith(NdotV,NdotL,fragRoughness); //geometric shadowing vec3 specularColor = mix(vec3(F0,F0,F0), fragAlbedo, fragMetallic); float specularContribution = (D * F * G) / (4.0f * NdotL * NdotV); float lightNormalLength = length(fragToLightNormal); float attenuation = calcAttenuation(lightNormalLength, uRadius); vec3 radiance = attenuation * uColor; vec3 finalColor= (fragAlbedo*(1.0f - F) + specularContribution * specularColor ) * radiance * BRDF_Lambert(NdotL); outputF = vec4(finalColor,1.0f); } For a while i had trouble with this part, but it seems to work. (correct me if you find issues here) float D = BRDF_D_GGX(NdotH, fragRoughness); //normal distribution float F = BRDF_F_FresnelSchlick(NdotV, F0); // Fresnel float G = BRDF_G_Smith(NdotV,NdotL,fragRoughness); //geometric shadowing The issue i'm having is with the last bit: vec3 specularColor = mix(vec3(F0,F0,F0), fragAlbedo, fragMetallic); float specularContribution = (D * F * G) / (4.0f * NdotL * NdotV); float lightNormalLength = length(fragToLightNormal); float attenuation = calcAttenuation(lightNormalLength, uRadius); vec3 radiance = attenuation * uColor; vec3 finalColor= (fragAlbedo*(1.0f - F) + specularContribution * specularColor ) * radiance * BRDF_Lambert(NdotL); outputF = vec4(finalColor,1.0f); I'm not really sure that i'm doing it right. I've done a bit of research online (looking up tutorials, etc...) but it seems that everyone is doing PBR slightly differently. As an example, my materials have a roughness and metallic texture. Sometimes roughness was implemented while metallic values weren't. (The F0 value was used, although i don't know if it's the same thing.) Some times i read about energy consetvation which is implemented by multiplying the kD factor with 1.0f -metallnessvalue: float kD = 1.0f- kS; kD*= 1.0f - fragMetallic; //energy consetvation Which confuses me a bit because i'm already multiplying the value with 1.0f-F0. (and doing so in my code results in darker overall colors:) vec3 finalColor= (fragAlbedo*(1.0f - F) * (1.0-fragMetallic) + specularContribution * specularColor ) * radiance * BRDF_Lambert(NdotL); I think that i'm handling the roughness/metallic values incorrectly in this code. So my question is: How exactly do i have to combine the albedo, roughness, metallic, normal/lighting data in the last step to get the correct results? Can someone glance over this code and point out what could be at fault? (Specifically in this code bit: ) vec3 specularColor = mix(vec3(F0,F0,F0), fragAlbedo, fragMetallic); float specularContribution = (D * F * G) / (4.0f * NdotL * NdotV); float lightNormalLength = length(fragToLightNormal); float attenuation = calcAttenuation(lightNormalLength, uRadius); vec3 radiance = attenuation * uColor; vec3 finalColor= (fragAlbedo*(1.0f - F) + specularContribution * specularColor ) * radiance * BRDF_Lambert(NdotL); outputF = vec4(finalColor,1.0f);
  11. Hi, ok, so, we are having problems with our current mirror reflection implementation. At the moment we are doing it very simple, so for the i-th frame, we calculate the reflection vectors given the viewPoint and some predefined points on the mirror surface (position and normal). Then, using the least squared algorithm, we find the point that has the minimum distance from all these reflections vectors. This is going to be our virtual viewPoint (with the right orientation). After that, we render offscreen to a texture by setting the OpenGL camera on the virtual viewPoint. And finally we use the rendered texture on the mirror surface. So far this has always been fine, but now we are having some more strong constraints on accuracy. What are our best options given that: - we have a dynamic scene, the mirror and parts of the scene can change continuously from frame to frame - we have about 3k points (with normals) per mirror, calculated offline using some cad program (such as Catia) - all the mirror are always perfectly spherical (with different radius vertically and horizontally) and they are always convex - a scene can have up to 10 mirror - it should be fast enough also for vr (Htc Vive) on fastest gpus (only desktops) Looking around, some papers talk about calculating some caustic surface derivation offline, but I don't know if this suits my case Also, another paper, used some acceleration structures to detect the intersection between the reflection vectors and the scene, and then adjust the corresponding texture coordinate. This looks the most accurate but also very heavy from a computational point of view. Other than that, I couldn't find anything updated/exhaustive around, can you help me? Thanks in advance
  12. Hello all, I am currently working on a game engine for use with my game development that I would like to be as flexible as possible. As such the exact requirements for how things should work can't be nailed down to a specific implementation and I am looking for, at least now, a default good average case scenario design. Here is what I have implemented: Deferred rendering using OpenGL Arbitrary number of lights and shadow mapping Each rendered object, as defined by a set of geometry, textures, animation data, and a model matrix is rendered with its own draw call Skeletal animations implemented on the GPU. Model matrix transformation implemented on the GPU Frustum and octree culling for optimization Here are my questions and concerns: Doing the skeletal animation on the GPU, currently, requires doing the skinning for each object multiple times per frame: once for the initial geometry rendering and once for the shadow map rendering for each light for which it is not culled. This seems very inefficient. Is there a way to do skeletal animation on the GPU only once across these render calls? Without doing the model matrix transformation on the CPU, I fail to see how I can easily batch objects with the same textures and shaders in a single draw call without passing a ton of matrix data to the GPU (an array of model matrices then an index for each vertex into that array for transformation purposes?) If I do the matrix transformations on the CPU, It seems I can't really do the skinning on the GPU as the pre-transformed vertexes will wreck havoc with the calculations, so this seems not viable unless I am missing something Overall it seems like simplest solution is to just do all of the vertex manipulation on the CPU and pass the pre-transformed data to the GPU, using vertex shaders that do basically nothing. This doesn't seem the most efficient use of the graphics hardware, but could potentially reduce the number of draw calls needed. Really, I am looking for some advice on how to proceed with this, how something like this is typically handled. Are the multiple draw calls and skinning calculations not a huge deal? I would LIKE to save as much of the CPU's time per frame so it can be tasked with other things, as to keep CPU resources open to the implementation of the engine. However, that becomes a moot point if the GPU becomes a bottleneck.
  13. That means how do I use base DirectX or OpenGL api's to make a physics based destruction simulation? Will it be just smart rendering or something else is required?
  14. Hi all this is my first post on this forum. First of all i want to say you that i've searched many posts on this forum about this specific argument, without success, so i write another one.... Im a beginner. I want use GPU geometry clipmaps algorithm to visualize virtual inifinte terrains. I already tried to use vertex texture fetch with a single sampler2D with success. Readed many papers about the argument and all speak about the fact that EVERY level of a geometry clipmap, has its own texture. What means this exactly? i have to upload on graphic card a sampler2DArray? With a single sampler2D is conceptually simple. Creating a vbo and ibo on cpu (the vbo contains only the positions on X-Z plane, not the heights) and upload on GPU the texture containing the elevations. In vertex shader i sample, for every vertex, the relative height to te uv coordinate. But i can't imagine how can i reproduce various 2d footprint for every level of the clipmap. The only way i can imagine is follow: Upload the finer texture on GPU (entire heightmap). Create on CPU, and for each level of clipmap, the 2D footprints of entire clipmap. So in CPU i create all clipmap levels in terms of X-Z plane. In vertex shader sampling these values is simple using vertex texture fetch. So, how can i to sample a sampler2DArray in vertex shader, instead of upload a sampler2D of entire clipmap? Sorry for my VERY bad english, i hope i have been clear.
  15. Hello Everyone, I have been going over a number of books and examples that deal with GLSL. It's common after viewing the source code to have something like this... class Model{ public: Model(); void render(); private: GLSL glsl_program; }; ////// .cpp Model::Model(){ glsl_program.compileAndLinkShaders() } void Model::render(){ glsl_program.use() //render something glsl_program.unUse(); } Is this how a shader program should be used in real time applications? For example, if I have a particle class, for every particle that's created, do I want to compiling and linking a vertex, frag shader? It seems to a noob such as myself this might not be the best approach to real time applications. If I am correct, what is the best work around? Thanks so much for all the help, Mike
  16. I'm having some difficulty understanding how data would flow or get inserted into a multi-threaded opengl renderer where there is a thread pool and a render thread and an update thread (possibly main). My understanding is that the threadpool will continually execute jobs, assemble these and when done send them off to be rendered where I can further sort these and achieve some cheap form of statelessness. I don't want anything overly complicated or too fine grained, fibers, job stealing etc. My end goal is to simply have my renderer isolated in its own thread and only concerned with drawing and swapping buffers. My questions are: 1. At what point in this pipeline are resources created? Say I have a class CCommandList { void SetVertexBuffer(...); void SetIndexBuffer(...); void SetVertexShader(...); void SetPixelShader(...); } borrowed from an existing post here. I would need to generate a VAO at some point and call glGenBuffers etc especially if I start with an empty scene. If my context lives on another thread, how do I call these commands if the command list is only supposed to be a collection of state and what command to use. I don't think that the render thread should do this and somehow add a task to the queue or am I wrong? Or could I do some variation where I do the loading in a thread with shared context and from there generate a command that has the handle to the resources needed. 2. How do I know all my jobs are done. I'm working with C++, is this as simple as knowing how many objects there are in the scene, for every task that gets added increment a counter and when it matches aforementioned count I signal the renderer that the command list is ready? I was thinking a condition_variable or something would suffice to alert the renderthread that work is ready. 3. Does all work come from a singular queue that the thread pool constantly cycles over? With the notion of jobs, we are basically sending the same work repeatedly right? Do all jobs need to be added to a single persistent queue to be submitted over and over again? 4. Are resources destroyed with commands? Likewise with initializing and assuming #3 is correct, removing an item from the scene would mean removing it from the job queue, no? Would I need to send a onetime command to the renderer to cleanup?
  17. I am starting to get into linux X11/GLX programming, but from every C example i found - there is this XVisualInfo thing parameter passed to XCreateWindow always. Can i control this parameter later on - when the window is already created? What i want it to change my own non GLX window to be a GLX window - without recreating. Is that possible? On win32 this works just fine to create a rendering context later on, i simply find and setup the pixel format from a pixel format descriptor and create the context and are ready to go. I am asking, because if that doesent work - i need to change a few things to support both worlds (Create a context from a existing window, create a context for a new window).
  18. Good Evening, I want to make a 2D game which involves displaying some debug information. Especially for collision, enemy sights and so on ... First of I was thinking about all those shapes which I need will need for debugging purposes: circles, rectangles, lines, polygons. I am really stucked right now because of the fundamental question: Where do I store my vertices positions for each line (object)? Currently I am not using a model matrix because I am using orthographic projection and set the final position within the VBO. That means that if I add a new line I would have to expand the "points" array and re-upload (recall glBufferData) it every time. The other method would be to use a model matrix and a fixed vbo for a line but it would be also messy to exactly create a line from (0,0) to (100,20) calculating the rotation and scale to make it fit. If I proceed with option 1 "updating the array each frame" I was thinking of having 4 draw calls every frame for the lines vao, polygons vao and so on. In addition to that I am planning to use some sort of ECS based architecture. So the other question would be: Should I treat those debug objects as entities/components? For me it would make sense to treat them as entities but that's creates a new issue with the previous array approach because it would have for example a transform and render component. A special render component for debug objects (no texture etc) ... For me the transform component is also just a matrix but how would I then define a line? Treating them as components would'nt be a good idea in my eyes because then I would always need an entity. Well entity is just an id !? So maybe its a component? Regards, LifeArtist
  19. Hello. I am coding a small thingy in my spare time. All i want to achieve is to load a heightmap (as the lowest possible walking terrain), some static meshes (elements of the environment) and a dynamic character (meaning i can move, collide with heightmap/static meshes and hold a varying item in a hand ). Got a bunch of questions, or rather problems i can't find solution to myself. Nearly all are deal with graphics/gpu, not the coding part. My c++ is on high enough level. Let's go: Heightmap - i obviously want it to be textured, size is hardcoded to 256x256 squares. I can't have one huge texture stretched over entire terrain cause every pixel would be enormous. Thats why i decided to use 2 specified textures. First will be a tileset consisting of 16 square tiles (u v range from 0 to 0.25 for first tile and so on) and second a 256x256 buffer with 0-15 value representing index of the tile from tileset for every heigtmap square. Problem is, how do i blend the edges nicely and make some computationally cheap changes so its not obvious there are only 16 tiles? Is it possible to generate such terrain with some existing program? Collisions - i want to use bounding sphere and aabb. But should i store them for a model or entity instance? Meaning i have 20 same trees spawned using the same tree model, but every entity got its own transformation (position, scale etc). Storing collision component per instance grats faster access + is precalculated and transformed (takes additional memory, but who cares?), so i stick with this, right? What should i do if object is dynamically rotated? The aabb is no longer aligned and calculating per vertex min/max everytime object rotates/scales is pretty expensive, right? Drawing aabb - problem similar to above (storing aabb data per instance or model). This time in my opinion per model is enough since every instance also does not have own vertex buffer but uses the shared one (so 20 trees share reference to one tree model). So rendering aabb is about taking the model's aabb, transforming with instance matrix and voila. What about aabb vertex buffer (this is more of a cosmetic question, just curious, bumped onto it in time of writing this). Is it better to make it as 8 points and index buffer (12 lines), or only 2 vertices with min/max x/y/z and having the shaders dynamically generate 6 other vertices and draw the box? Or maybe there should be just ONE 1x1x1 cube box template moved/scaled per entity? What if one model got a diffuse texture and a normal map, and other has only diffuse? Should i pass some bool flag to shader with that info, or just assume that my game supports only diffuse maps without fancy stuff? There were several more but i forgot/solved them at time of writing Thanks in advance
  20. Sampling a floating point texture where the alpha channel holds 4-bytes of packed data into the float. I don't know how to cast the raw memory to treat it as an integer so I can perform bit-shifting operations. int rgbValue = int(textureSample.w);//4 bytes of data packed as color // algorithm might not be correct and endianness might need switching. vec3 extractedData = vec3( rgbValue & 0xFF000000, (rgbValue << 8) & 0xFF000000, (rgbValue << 16) & 0xFF000000); extractedData /= 255.0f;
  21. OpenGL Uniforms

    While writing a simple renderer using OpenGL, I faced an issue with the glGetUniformLocation function. For some reason, the location is coming to be -1. Anyone has any idea .. what should I do?
  22. Hello everyone! I'm trying to complete OpenGL tutorial http://www.opengl-tutorial.org/beginners-tutorials/tutorial-5-a-textured-cube/ And I faced a problem when loading texture from dds file to my triangle. Here is my code (basically from the tutorial): Load DDS from file(Texture.cpp): https://pastebin.com/Y5TKPvue Vertex shader: https://pastebin.com/2pPXQkS9 Fragment shader: https://pastebin.com/4nF2jVMy dds file: https://drive.google.com/open?id=1yYF2oyLbqn-OMB_QxZKyzKwBwxl_9sCb rendered window: https://imgur.com/a/1pP92 In main file I call drawing triangle, I call texture.Bind(); Also I tried to manually set uv points in fragment shader, and it seems working(color might be changed based on coordinates). If you have any idea, will be really appreciated. Regards, Esentiel
  23. Hi all. I have been looking for a real-time global illumination algorithm to use in my game. I've found voxel cone tracing and I'm debating whether or not it's an algorithm worth investing my time researching and implementing. I have this doubt due to the following reasons: . I see a lot of people say it's really hard to implement. . Apparently this algorithm requires some Nvidia extension to work efficiently according to the original paper (I highly doubt it though) . Barely real-time performance, meaning it's too slow to be implemented in a game So in order to determine if I should invest time in voxel cone tracing, I want to ask the following questions: . Is the algorithm itself flexible enough so that I can increase the performance by tweaking it (probably lowering the GI quality at the same time, but I don't care) . Can I implement it without any driver requirement or special extensions, like the paper claims?
  24. Consider the following situation: - We have an FBO with two identical color attachments - Bind shader program 1 and render an object to FBO attachment 0 - Bind the texture on attachment 0 for sampling - Bind shader program 2 and draw a full screen quad. In the fragment shader we sample from the texture on attachment 0 and write it’s value to the texture on attachment 1. Can framebuffer objects be used in this way? The reason why I’m considering this is to reduce the number of FBOs I create. I’m experimenting to see if I can perform all of my rendering passes with a single FBO equipped with multiple attachments. In my current implementation this setup does not seem to work as expected, I'm trying to determine if there's a problem with my implementation or if this is even possible. Any insight would be appreciated!
  25. Hi all! I try to use the Sun shafts effects via post process in my 3DEngine, but i have some artefacts on final image(Please see attached images). The effect contains the following passes: 1) Depth scene pass; 2) "Shafts pass" Using DepthPass Texture + RGBA BackBuffer texture. 3) Shafts pass texture + RGBA BackBuffer texture. Shafts shader for 2 pass: // uniform sampler2D FullSampler; // RGBA Back Buffer uniform sampler2D DepthSampler; varying vec2 tex; #ifndef saturate float saturate(float val) { return clamp(val, 0.0, 1.0); } #endif void main(void) { vec2 uv = tex; float sceneDepth = texture2D(DepthSampler, uv.xy).r; vec4 scene = texture2D(FullSampler, tex); float fShaftsMask = (1.0 - sceneDepth); gl_FragColor = vec4( scene.xyz * saturate(sceneDepth), fShaftsMask ); } final shader: // uniform sampler2D FullSampler; // RGBA Back Buffer uniform sampler2D BlurSampler; // shafts sampler varying vec4 Sun_pos; const vec4 ShaftParams = vec4(0.1,2.0,0.1,2.0); varying vec2 Tex_UV; #ifndef saturate float saturate(float val) { return clamp(val, 0.0, 1.0); } #endif vec4 blendSoftLight(vec4 a, vec4 b) { vec4 c = 2.0 * a * b + a * a * (1.0 - 2.0 * b); vec4 d = sqrt(a) * (2.0 * b - 1.0) + 2.0 * a * (1.0 - b); // TODO: To look in Crysis what it the shit??? //return ( b < 0.5 )? c : d; return any(lessThan(b, vec4(0.5,0.5,0.5,0.5)))? c : d; } void main(void) { vec4 sun_pos = Sun_pos; vec2 sunPosProj = sun_pos.xy; //float sign = sun_pos.w; float sign = 1.0; vec2 sunVec = sunPosProj.xy - (Tex_UV.xy - vec2(0.5, 0.5)); float sunDist = saturate(sign) * saturate( 1.0 - saturate(length(sunVec) * ShaftParams.y )); sunVec *= ShaftParams.x * sign; vec4 accum; vec2 tc = Tex_UV.xy; tc += sunVec; accum = texture2D(BlurSampler, tc); tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.875; tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.75; tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.625; tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.5; tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.375; tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.25; tc += sunVec; accum += texture2D(BlurSampler, tc) * 0.125; accum *= 0.25 * vec4(sunDist, sunDist, sunDist, 1.0); accum.w += 1.0 - saturate(saturate(sign * 0.1 + 0.9)); vec4 cScreen = texture2D(FullSampler, Tex_UV.xy); vec4 cSunShafts = accum; float fShaftsMask = saturate(1.00001 - cSunShafts.w) * ShaftParams.z * 2.0; float fBlend = cSunShafts.w; vec4 sunColor = vec4(0.9, 0.8, 0.6, 1.0); accum = cScreen + cSunShafts.xyzz * ShaftParams.w * sunColor * (1.0 - cScreen); accum = blendSoftLight(accum, sunColor * fShaftsMask * 0.5 + 0.5); gl_FragColor = accum; } Demo project: Demo Project Shaders for postprocess Shaders/SunShaft/ What i do wrong ? Thanks!
  • Advertisement