• Advertisement
  • Popular Tags

  • Popular Now

  • Advertisement
  • Similar Content

    • By codelyoko373
      I wasn't sure if this would be the right place for a topic like this so sorry if it isn't.
      I'm currently working on a project for Uni using FreeGLUT to make a simple solar system simulation. I've got to the point where I've implemented all the planets and have used a Scene Graph to link them all together. The issue I'm having with now though is basically the planets and moons orbit correctly at their own orbit speeds.
      I'm not really experienced with using matrices for stuff like this so It's likely why I can't figure out how exactly to get it working. This is where I'm applying the transformation matrices, as well as pushing and popping them. This is within the Render function that every planet including the sun and moons will have and run.
      if (tag != "Sun") { glRotatef(orbitAngle, orbitRotation.X, orbitRotation.Y, orbitRotation.Z); } glPushMatrix(); glTranslatef(position.X, position.Y, position.Z); glRotatef(rotationAngle, rotation.X, rotation.Y, rotation.Z); glScalef(scale.X, scale.Y, scale.Z); glDrawElements(GL_TRIANGLES, mesh->indiceCount, GL_UNSIGNED_SHORT, mesh->indices); if (tag != "Sun") { glPopMatrix(); } The "If(tag != "Sun")" parts are my attempts are getting the planets to orbit correctly though it likely isn't the way I'm meant to be doing it. So I was wondering if someone would be able to help me? As I really don't have an idea on what I would do to get it working. Using the if statement is truthfully the closest I've got to it working but there are still weird effects like the planets orbiting faster then they should depending on the number of planets actually be updated/rendered.
    • By Jens Eckervogt
      Hello everyone, 
      I have problem with texture
      using System; using OpenTK; using OpenTK.Input; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL4; using System.Drawing; using System.Reflection; namespace Tutorial_05 { class Game : GameWindow { private static int WIDTH = 1200; private static int HEIGHT = 720; private static KeyboardState keyState; private int vaoID; private int vboID; private int iboID; private Vector3[] vertices = { new Vector3(-0.5f, 0.5f, 0.0f), // V0 new Vector3(-0.5f, -0.5f, 0.0f), // V1 new Vector3(0.5f, -0.5f, 0.0f), // V2 new Vector3(0.5f, 0.5f, 0.0f) // V3 }; private Vector2[] texcoords = { new Vector2(0, 0), new Vector2(0, 1), new Vector2(1, 1), new Vector2(1, 0) }; private int[] indices = { 0, 1, 3, 3, 1, 2 }; private string vertsrc = @"#version 450 core in vec3 position; in vec2 textureCoords; out vec2 pass_textureCoords; void main(void) { gl_Position = vec4(position, 1.0); pass_textureCoords = textureCoords; }"; private string fragsrc = @"#version 450 core in vec2 pass_textureCoords; out vec4 out_color; uniform sampler2D textureSampler; void main(void) { out_color = texture(textureSampler, pass_textureCoords); }"; private int programID; private int vertexShaderID; private int fragmentShaderID; private int textureID; private Bitmap texsrc; public Game() : base(WIDTH, HEIGHT, GraphicsMode.Default, "Tutorial 05 - Texturing", GameWindowFlags.Default, DisplayDevice.Default, 4, 5, GraphicsContextFlags.Default) { } protected override void OnLoad(EventArgs e) { base.OnLoad(e); CursorVisible = true; GL.GenVertexArrays(1, out vaoID); GL.BindVertexArray(vaoID); GL.GenBuffers(1, out vboID); GL.BindBuffer(BufferTarget.ArrayBuffer, vboID); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(vertices.Length * Vector3.SizeInBytes), vertices, BufferUsageHint.StaticDraw); GL.GenBuffers(1, out iboID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, iboID); GL.BufferData(BufferTarget.ElementArrayBuffer, (IntPtr)(indices.Length * sizeof(int)), indices, BufferUsageHint.StaticDraw); vertexShaderID = GL.CreateShader(ShaderType.VertexShader); GL.ShaderSource(vertexShaderID, vertsrc); GL.CompileShader(vertexShaderID); fragmentShaderID = GL.CreateShader(ShaderType.FragmentShader); GL.ShaderSource(fragmentShaderID, fragsrc); GL.CompileShader(fragmentShaderID); programID = GL.CreateProgram(); GL.AttachShader(programID, vertexShaderID); GL.AttachShader(programID, fragmentShaderID); GL.LinkProgram(programID); // Loading texture from embedded resource texsrc = new Bitmap(Assembly.GetEntryAssembly().GetManifestResourceStream("Tutorial_05.example.png")); textureID = GL.GenTexture(); GL.BindTexture(TextureTarget.Texture2D, textureID); GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)All.Linear); GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)All.Linear); GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, texsrc.Width, texsrc.Height, 0, PixelFormat.Bgra, PixelType.UnsignedByte, IntPtr.Zero); System.Drawing.Imaging.BitmapData bitmap_data = texsrc.LockBits(new Rectangle(0, 0, texsrc.Width, texsrc.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppRgb); GL.TexSubImage2D(TextureTarget.Texture2D, 0, 0, 0, texsrc.Width, texsrc.Height, PixelFormat.Bgra, PixelType.UnsignedByte, bitmap_data.Scan0); texsrc.UnlockBits(bitmap_data); GL.Enable(EnableCap.Texture2D); GL.BufferData(BufferTarget.TextureBuffer, (IntPtr)(texcoords.Length * Vector2.SizeInBytes), texcoords, BufferUsageHint.StaticDraw); GL.BindAttribLocation(programID, 0, "position"); GL.BindAttribLocation(programID, 1, "textureCoords"); } protected override void OnResize(EventArgs e) { base.OnResize(e); GL.Viewport(0, 0, ClientRectangle.Width, ClientRectangle.Height); } protected override void OnUpdateFrame(FrameEventArgs e) { base.OnUpdateFrame(e); keyState = Keyboard.GetState(); if (keyState.IsKeyDown(Key.Escape)) { Exit(); } } protected override void OnRenderFrame(FrameEventArgs e) { base.OnRenderFrame(e); // Prepare for background GL.Clear(ClearBufferMask.ColorBufferBit); GL.ClearColor(Color4.Red); // Draw traingles GL.EnableVertexAttribArray(0); GL.EnableVertexAttribArray(1); GL.BindVertexArray(vaoID); GL.UseProgram(programID); GL.BindBuffer(BufferTarget.ArrayBuffer, vboID); GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 0, IntPtr.Zero); GL.ActiveTexture(TextureUnit.Texture0); GL.BindTexture(TextureTarget.Texture3D, textureID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, iboID); GL.DrawElements(BeginMode.Triangles, indices.Length, DrawElementsType.UnsignedInt, 0); GL.DisableVertexAttribArray(0); GL.DisableVertexAttribArray(1); SwapBuffers(); } protected override void OnClosed(EventArgs e) { base.OnClosed(e); GL.DeleteVertexArray(vaoID); GL.DeleteBuffer(vboID); } } } I can not remember where do I add GL.Uniform2();
    • By Jens Eckervogt
      Hello everyone
      For @80bserver8 nice job - I have found Google search. How did you port from Javascript WebGL to C# OpenTK.?
      I have been searched Google but it shows f***ing Unity 3D. I really want know how do I understand I want start with OpenTK But I want know where is porting of Javascript and C#?
       
      Thanks!
    • By mike44
      Hi
      I draw in a OpenGL framebuffer. All is fine but it eats FPS (frames per second), hence I wonder if I could execute the framebuffer drawing only every 5-10th loop or so?
      Many thanks
       
    • By cebugdev
      hi all,
      how to implement this type of effect ? 
      Also what is this effect called? this is considered volumetric lighting?
      what are the options of doing this? 
      a. billboard? but i want this to have the 3D effect that when we rotate the camera we can still have that 3d feel.
      b. a transparent 3d mesh? and we can animate it as well?
      need your expert advise.
       
      additional:
      2. how to implement things like fireball projectile (shot from a monster) (billboard texture or a 3d mesh)?
       
      Note: im using OpenGL ES 2.0 on mobile. 
       
      thanks!
  • Advertisement
  • Advertisement
Sign in to follow this  

OpenGL 3D Projection to 2D screen space

This topic is 990 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Just for kicks, I'm wring a software renderer. No particular reason, just for kicks.

 

In my DirectX engine or my OpenGL engine, doing perspective or orthographic rendering is simply the matter of generating the corresponding matrix, perspective or orthographic. Both rendering API's handle this without any additional information.

 

Here's my code to convert perspective projection to screen space.

((trans.vert.x / trans.vert.z) * m_screenHalf.w) + m_screenHalfS.w + 0.5f;
((trans.vert.y / trans.vert.z) * m_screenHalf.h) + m_screenHalfS.h + 0.5f;

The problem is, this same code can't be used for orthographic projection. For orthographic projection, you don't need to divide by z so I was assuming z would translate to 1. The orthographic matrix is described as below.

yScale = cot(fovY/2)
xScale = yScale / aspect ratio
xScale     0          0              0
0        yScale       0              0
0        0        zf/(zn-zf)        -1
0        0        zn*zf/(zn-zf)      0

So to correct this problem, I would simple do the below.

(trans.vert.x * m_screenHalf.w) + m_screenHalfS.w + 0.5f;
(trans.vert.y * m_screenHalf.h) + m_screenHalfS.h + 0.5f;

So my question is, why do I need to differentiate in my code the projection of perspective vs orthographic when DirectX or OpenGL works seamlessly? For DirectX or OpenGL, you can give your geometry any z value and it seems to ignore it. I'm getting z values I can't ues for orthographic projection.

Share this post


Link to post
Share on other sites
Advertisement

those are two different projections and the reason OpenGL and D3D don't need to change their code is that they use matrices for doing the projection so a change in matrix, changes the projection, the Z component is still used and is not ignored nor is set 1.

Share this post


Link to post
Share on other sites

those are two different projections and the reason OpenGL and D3D don't need to change their code is that they use matrices for doing the projection so a change in matrix, changes the projection, the Z component is still used and is not ignored nor is set 1.


This is an area that I am totally rusty on. If the z coordinate is used, wouldn't it have an effect on the final projection?

Share this post


Link to post
Share on other sites

I setup some test DirectX code, generated an orthographic matrix and translated some points. The z is being set to some value that otherwise shouldn't work but yet it's passed down the pipeline and all is well. Since this is done in the shader, DirectX or OpenGL only recieves the translated verts so I don't know how or why this works.

Share this post


Link to post
Share on other sites

the basics of 2 projection methods are completely different. In orthogonal projection the z does not effect the outcome's  x, y values.

 

the point is first translated and then the x, y, and z are only scaled depending on the input of the projection matrix, and this translation and scaling is the same for all the points, independent to their z values.

 

https://msdn.microsoft.com/en-us/library/windows/desktop/bb205347(v=vs.85).aspx

 

as you can see the matrix is a simple combination of translation matrix and a scaling matrix.

 

so the z is not ignored nor is set to a constant value. ignoring the z wouldn't make the z test possible.

Edited by IYP

Share this post


Link to post
Share on other sites

Looking at the homogeneous vectors and projection matrices, thing is that the step in question does not divide by z but by w. Now w depends on z (within view space) in case of perspective projection, but it does not in case of orthogonal projection. So also in orthogonal projection there is still a varying z (used for depth test) although x and y are not affected by the value of z.

Edited by haegarr

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement