• Advertisement
  • Popular Tags

  • Popular Now

  • Advertisement
  • Similar Content

    • By codelyoko373
      I wasn't sure if this would be the right place for a topic like this so sorry if it isn't.
      I'm currently working on a project for Uni using FreeGLUT to make a simple solar system simulation. I've got to the point where I've implemented all the planets and have used a Scene Graph to link them all together. The issue I'm having with now though is basically the planets and moons orbit correctly at their own orbit speeds.
      I'm not really experienced with using matrices for stuff like this so It's likely why I can't figure out how exactly to get it working. This is where I'm applying the transformation matrices, as well as pushing and popping them. This is within the Render function that every planet including the sun and moons will have and run.
      if (tag != "Sun") { glRotatef(orbitAngle, orbitRotation.X, orbitRotation.Y, orbitRotation.Z); } glPushMatrix(); glTranslatef(position.X, position.Y, position.Z); glRotatef(rotationAngle, rotation.X, rotation.Y, rotation.Z); glScalef(scale.X, scale.Y, scale.Z); glDrawElements(GL_TRIANGLES, mesh->indiceCount, GL_UNSIGNED_SHORT, mesh->indices); if (tag != "Sun") { glPopMatrix(); } The "If(tag != "Sun")" parts are my attempts are getting the planets to orbit correctly though it likely isn't the way I'm meant to be doing it. So I was wondering if someone would be able to help me? As I really don't have an idea on what I would do to get it working. Using the if statement is truthfully the closest I've got to it working but there are still weird effects like the planets orbiting faster then they should depending on the number of planets actually be updated/rendered.
    • By Jens Eckervogt
      Hello everyone, 
      I have problem with texture
      using System; using OpenTK; using OpenTK.Input; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL4; using System.Drawing; using System.Reflection; namespace Tutorial_05 { class Game : GameWindow { private static int WIDTH = 1200; private static int HEIGHT = 720; private static KeyboardState keyState; private int vaoID; private int vboID; private int iboID; private Vector3[] vertices = { new Vector3(-0.5f, 0.5f, 0.0f), // V0 new Vector3(-0.5f, -0.5f, 0.0f), // V1 new Vector3(0.5f, -0.5f, 0.0f), // V2 new Vector3(0.5f, 0.5f, 0.0f) // V3 }; private Vector2[] texcoords = { new Vector2(0, 0), new Vector2(0, 1), new Vector2(1, 1), new Vector2(1, 0) }; private int[] indices = { 0, 1, 3, 3, 1, 2 }; private string vertsrc = @"#version 450 core in vec3 position; in vec2 textureCoords; out vec2 pass_textureCoords; void main(void) { gl_Position = vec4(position, 1.0); pass_textureCoords = textureCoords; }"; private string fragsrc = @"#version 450 core in vec2 pass_textureCoords; out vec4 out_color; uniform sampler2D textureSampler; void main(void) { out_color = texture(textureSampler, pass_textureCoords); }"; private int programID; private int vertexShaderID; private int fragmentShaderID; private int textureID; private Bitmap texsrc; public Game() : base(WIDTH, HEIGHT, GraphicsMode.Default, "Tutorial 05 - Texturing", GameWindowFlags.Default, DisplayDevice.Default, 4, 5, GraphicsContextFlags.Default) { } protected override void OnLoad(EventArgs e) { base.OnLoad(e); CursorVisible = true; GL.GenVertexArrays(1, out vaoID); GL.BindVertexArray(vaoID); GL.GenBuffers(1, out vboID); GL.BindBuffer(BufferTarget.ArrayBuffer, vboID); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(vertices.Length * Vector3.SizeInBytes), vertices, BufferUsageHint.StaticDraw); GL.GenBuffers(1, out iboID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, iboID); GL.BufferData(BufferTarget.ElementArrayBuffer, (IntPtr)(indices.Length * sizeof(int)), indices, BufferUsageHint.StaticDraw); vertexShaderID = GL.CreateShader(ShaderType.VertexShader); GL.ShaderSource(vertexShaderID, vertsrc); GL.CompileShader(vertexShaderID); fragmentShaderID = GL.CreateShader(ShaderType.FragmentShader); GL.ShaderSource(fragmentShaderID, fragsrc); GL.CompileShader(fragmentShaderID); programID = GL.CreateProgram(); GL.AttachShader(programID, vertexShaderID); GL.AttachShader(programID, fragmentShaderID); GL.LinkProgram(programID); // Loading texture from embedded resource texsrc = new Bitmap(Assembly.GetEntryAssembly().GetManifestResourceStream("Tutorial_05.example.png")); textureID = GL.GenTexture(); GL.BindTexture(TextureTarget.Texture2D, textureID); GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)All.Linear); GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)All.Linear); GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, texsrc.Width, texsrc.Height, 0, PixelFormat.Bgra, PixelType.UnsignedByte, IntPtr.Zero); System.Drawing.Imaging.BitmapData bitmap_data = texsrc.LockBits(new Rectangle(0, 0, texsrc.Width, texsrc.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppRgb); GL.TexSubImage2D(TextureTarget.Texture2D, 0, 0, 0, texsrc.Width, texsrc.Height, PixelFormat.Bgra, PixelType.UnsignedByte, bitmap_data.Scan0); texsrc.UnlockBits(bitmap_data); GL.Enable(EnableCap.Texture2D); GL.BufferData(BufferTarget.TextureBuffer, (IntPtr)(texcoords.Length * Vector2.SizeInBytes), texcoords, BufferUsageHint.StaticDraw); GL.BindAttribLocation(programID, 0, "position"); GL.BindAttribLocation(programID, 1, "textureCoords"); } protected override void OnResize(EventArgs e) { base.OnResize(e); GL.Viewport(0, 0, ClientRectangle.Width, ClientRectangle.Height); } protected override void OnUpdateFrame(FrameEventArgs e) { base.OnUpdateFrame(e); keyState = Keyboard.GetState(); if (keyState.IsKeyDown(Key.Escape)) { Exit(); } } protected override void OnRenderFrame(FrameEventArgs e) { base.OnRenderFrame(e); // Prepare for background GL.Clear(ClearBufferMask.ColorBufferBit); GL.ClearColor(Color4.Red); // Draw traingles GL.EnableVertexAttribArray(0); GL.EnableVertexAttribArray(1); GL.BindVertexArray(vaoID); GL.UseProgram(programID); GL.BindBuffer(BufferTarget.ArrayBuffer, vboID); GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 0, IntPtr.Zero); GL.ActiveTexture(TextureUnit.Texture0); GL.BindTexture(TextureTarget.Texture3D, textureID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, iboID); GL.DrawElements(BeginMode.Triangles, indices.Length, DrawElementsType.UnsignedInt, 0); GL.DisableVertexAttribArray(0); GL.DisableVertexAttribArray(1); SwapBuffers(); } protected override void OnClosed(EventArgs e) { base.OnClosed(e); GL.DeleteVertexArray(vaoID); GL.DeleteBuffer(vboID); } } } I can not remember where do I add GL.Uniform2();
    • By Jens Eckervogt
      Hello everyone
      For @80bserver8 nice job - I have found Google search. How did you port from Javascript WebGL to C# OpenTK.?
      I have been searched Google but it shows f***ing Unity 3D. I really want know how do I understand I want start with OpenTK But I want know where is porting of Javascript and C#?
       
      Thanks!
    • By mike44
      Hi
      I draw in a OpenGL framebuffer. All is fine but it eats FPS (frames per second), hence I wonder if I could execute the framebuffer drawing only every 5-10th loop or so?
      Many thanks
       
    • By cebugdev
      hi all,
      how to implement this type of effect ? 
      Also what is this effect called? this is considered volumetric lighting?
      what are the options of doing this? 
      a. billboard? but i want this to have the 3D effect that when we rotate the camera we can still have that 3d feel.
      b. a transparent 3d mesh? and we can animate it as well?
      need your expert advise.
       
      additional:
      2. how to implement things like fireball projectile (shot from a monster) (billboard texture or a 3d mesh)?
       
      Note: im using OpenGL ES 2.0 on mobile. 
       
      thanks!
  • Advertisement
  • Advertisement
Sign in to follow this  

OpenGL Learning

This topic is 4397 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

In learning real-time 3D graphics programming with OpenGL, how can I tell what methods are antequated? For example, I found out that masking is not really used. I also assume the stipple and bitmap stuff probably isn't used that much anymore. What about fog? What about the selection and feedback modes? And frame buffers are the replacement for pixel buffers? What about indexed color modes and associated functions and pixel manipulation? Vertex buffers are preferred over vertex arrays now? Also what are pixel fragments, is this a rasterized primitive in whole or in part? Also is it easier to go OGL -> D3D or D3D -> OGL in terms of which you learned first? Also I can never understand RHW and how W changes as it goes through the pipeline and what it is used for. Is there any way to understand this without writing a complete software renderer?

Share this post


Link to post
Share on other sites
Advertisement
Of course, answering your questions is influenced heavily by application in mind and partly also by personal preference. The following is my sight.

Quote:
Original post by Boder
For example, I found out that masking is not really used. I also assume the stipple and bitmap stuff probably isn't used that much anymore.

Stencilling has still the one or other use case. E.g. recently there was a thread asking about on-the-fly CSG. Bitmaps are sometimes used e.g. for fonts, but there are faster methods. I personally use small bitmaps for displaying markers for vertices, axes, control points, and so on in my editor. Say, the usefullness depends on your application.

Quote:
Original post by Boder
What about fog?

The fog you presumably mean is a pure view dependent thing, say there is no volume restriction (like "ground fog" or such). Hence its use cases are restricted. I use the fog only for view depth restriction in my engine currently.

Quote:
Original post by Boder
What about the selection and feedback modes?

In the past 30 days I remember of 3 threads dealing with them, so it can't be totally obsolete ;) I personally don't use them atm.

Quote:
Original post by Boder
And frame buffers are the replacement for pixel buffers?

No. Frame buffer is the normal color pixel buffer (say the x/y part of the display, extended by the depth buffer into the 3rd dimension): An "on-screen" rendering target. Pixel buffers are a kind of "off-screen" rendering target for algorithms that need to render intermediate steps somewhere.

Quote:
Original post by Boder
What about indexed color modes and associated functions and pixel manipulation?

Nowadays I think the application of indexed color modes are seldom.

Quote:
Original post by Boder
Vertex buffers are preferred over vertex arrays now?

In general, yes.

Quote:
Original post by Boder
Also is it easier to go OGL -> D3D or D3D -> OGL in terms of which you learned first?

Well, D3D already provides help to deal with many details (like vectors and matrices) what OGL doesn't. If you have programmed OGL you were forced to do much stuff yourself, and hence you must have background knowledge then. So IMHO using D3D later may be easier. On the other hand, there're people saying that due to the same reason it is easier to learn D3D first, since it allows a faster way of doing things.

Quote:
Original post by Boder
Also I can never understand RHW and how W changes as it goes through the pipeline and what it is used for. Is there any way to understand this without writing a complete software renderer?

Err, now you got me ;) What means RHW? Okay, w is the typical variable name for the homogeneous co-ordinate. Do you mean that? If so, understanding homogeneous co-ordinates is IMHO easy if you have some background of vector math already, and knowing affine transformations may help also but isn't really required (but else you have to learn it besides).

Share this post


Link to post
Share on other sites
Quote:
Original post by haegarr
Quote:
Original post by Boder
Also is it easier to go OGL -> D3D or D3D -> OGL in terms of which you learned first?

Well, D3D already provides help to deal with many details (like vectors and matrices) what OGL doesn't. If you have programmed OGL you were forced to do much stuff yourself, and hence you must have background knowledge then. So IMHO using D3D later may be easier. On the other hand, there're people saying that due to the same reason it is easier to learn D3D first, since it allows a faster way of doing things.


Things that speak for OpenGL IMHO:

- immediate mode: For lerning and experimenting it is wonderull

- low setup overhead: Create a window, add a gl context, set up the perspective transformation and start drawing.

- clean and (mostly) consisten interface: It is easy to guess how something is called after a while, which is always a Good Thing.

- well documented: also a Good Thing

As for the math you have to learn them anyway, doing so from start is better, because you will already know them when you will realy need them.

I would sugest OGL -> D3D but I think it doesn't realy matter that much.

Take it all with a grain of salt, since I havent used D3D and don't plan on doing so. My Eyes hurt just by looking at D3D code (and its not supported on my OS anyway).

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement