Sign in to follow this  
juxie

OpenGL using own matrix in opengl

Recommended Posts

Hi, I have been quite confused by matrix operations when developing games. I decided to learn to understand it but having a little confusion now. I created the following codes using opengl & glut: main.cpp
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <GL/gl.h>
#include <atlstr.h>
#include <time.h>

#include "Cube.h"
#include "Vector4.h"
#include "Matrix16.h"

// Set the size of the OpenGL Window
double winL = -300;
double winR = 300;
double winB = -300;
double winT = 300;

double start;
double last;
double now;

void UpdateScene(void);
void DrawScene(void);
void DrawAxis(void);
void renderBitmapString(float x, float y, void *font,char *string);
void Keyboard(int key, int x, int y);

CCube cube(5);

// This function is continuously called.
void Idle(void)
{
	DrawScene();
}


void renderBitmapString(float x, float y, void *font,char *string)
{
  
  char *c;
  glRasterPos2f(x, y);
  for (c=string; *c != '\0'; c++) {
    glutBitmapCharacter(font, *c);
  }
}  /* end renderBitmapString() */

void 
UpdateScene(void)
{
	last = clock();
	now = (last - start) / CLOCKS_PER_SEC;
	start = last;

	cube.Update(now);
}

void
DrawScene(void)
{
	UpdateScene();
	glLoadIdentity();
	gluLookAt(0, 0, 80, 0, 0, 0, 0, 1, 0);

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Your drawing code here

	DrawAxis();

	cube.Render();

	// End drawing code

	glutSwapBuffers();
}

void
DrawAxis(void)
{
	glBegin(GL_LINES);
	glColor3f(1, 0, 0);
	glVertex3f(-200, 0, 0);
	glVertex3f(200, 0, 0);

	glColor3f(0, 1, 0);
	glVertex3f(0, -200, 0);
	glVertex3f(0, 200, 0);

	glColor3f(0, 0, 1);
	glVertex3f(0, 0, -200);
	glVertex3f(0, 0, 200);
	glEnd();
}

void
Init(void)
{
	glShadeModel(GL_SMOOTH);
	glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
	glClearDepth(1.0f);
	glEnable(GL_DEPTH_TEST);
	glDepthFunc(GL_LEQUAL);
	glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);

	glMatrixMode(GL_PROJECTION);
	gluPerspective(45, (winR - winL) / (winT - winB), 1, 1000);
	glMatrixMode(GL_MODELVIEW);

	start = clock();

	glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);	
	glEnable ( GL_COLOR_MATERIAL ) ;

	glEnable(GL_LIGHTING);
	float pos[4] = {0, 100, 0, 1};
	glLightfv(GL_LIGHT0, GL_POSITION, pos);
	glEnable(GL_LIGHT0);
}

void
Reshape(int width, int height)
{
	glViewport(0, 0, width, height);
}

/* ARGSUSED1 */
void
Key(unsigned char key, int x, int y)
{
	switch (key) {
	case 27:
		exit(0);
		break;
	case ' ':
		glutIdleFunc(Idle);
		break;
	case 'q':
		cube.Rotate(-5, 1, 0, 0, 1);
		break;
	case 'w':
		cube.Rotate(5, 1, 0, 0, 1);
		break;
	case 'a':
		cube.Rotate(-5, 0, 1, 0, 1);
		break;
	case 's':
		cube.Rotate(5, 0, 1, 0, 1);
		break;
	case 'z':
		cube.Rotate(-5, 0, 0, 1, 1);
		break;
	case 'x':
		cube.Rotate(5, 0, 0, 1, 1);
		break;
	case 'r':
		cube.Translate(-5, 0, 0, 2);
		break;
	case 'f':
		cube.Translate(5, 0, 0, 2);
		break;
	case 'e':
		cube.Translate(-5, 0, 0, 1);
		break;
	case 'd':
		cube.Translate(5, 0, 0, 1);
		break;
	case 'l':
		cube.Scale(2, 2, 2);
		break;
	default:
		cube.Reset();
		break;
	}
}

void Keyboard(int key, int x, int y)
{
	switch(key)
	{
	case GLUT_KEY_UP:
		cube.Rotate(-5, 1, 0, 0, 2);
		break;

	case GLUT_KEY_DOWN:
		cube.Rotate(5, 1, 0, 0, 2);
		break;

	case GLUT_KEY_LEFT:
		cube.Rotate(-5, 0, 1, 0, 2);
		break;

	case GLUT_KEY_RIGHT:
		cube.Rotate(5, 0, 1, 0, 2);
		break;

	case GLUT_KEY_PAGE_UP:
		cube.Rotate(-5, 0, 0, 1, 2);
		break;

	case GLUT_KEY_PAGE_DOWN:
		cube.Rotate(5, 0, 0, 1, 2);
		break;
	}
}


int
main(int argc, char **argv)
{
	glutInitWindowSize((winR - winL), (winT - winB));
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_DOUBLE);
	glutCreateWindow("Physics");

	Init();

	glutReshapeFunc(Reshape);
	glutKeyboardFunc(Key);
	glutSpecialFunc(Keyboard);
	glutDisplayFunc(DrawScene);
	glutIdleFunc(Idle);

	glutMainLoop();
	return 0;             /* ANSI C requires main to return int. */
}

Matrix16.h
#pragma once
#include <iostream>
using namespace std;

#include "Vector4.h"

static const float PI = 3.14159265359f;

inline float DEG2RAD(float a)
{
	return (PI/180*(a));
}

inline float RAD2DEG(float a)
{
	return (180/PI*(a));
}

class CMatrix16
{
public:
	CMatrix16(void);
	CMatrix16(float m11, float m12, float m13, float m14,
			float m21, float m22, float m23, float m24,
			float m31, float m32, float m33, float m34,
			float m41, float m42, float m43, float m44);
	~CMatrix16(void);

	// Modification
	static CMatrix16 & Add(const CMatrix16 & m1, const CMatrix16 & m2, CMatrix16 & result);
	static CMatrix16 & Subtract(const CMatrix16 & m1, const CMatrix16 & m2, CMatrix16 & result);
	static CMatrix16 & Multiply(const CMatrix16 & m1, const CMatrix16 & m2, CMatrix16 & result);
	static CMatrix16 & Multiply(const CMatrix16 & m1, float multiplyBy, CMatrix16 & result);
	static CVector4 & Multiply(const CMatrix16 & m1, const CVector4 & v, CVector4 & result);
	static CMatrix16 & Transpose(const CMatrix16 & m1, CMatrix16 & result);
	//static CMatrix16 & Invert(const CMatrix16 & m1, CMatrix16 & result);
	static CMatrix16 & SetIdentity(CMatrix16 & m1);
	static float Determinant(const CMatrix16 & m1);

	inline static CMatrix16 Translation(const CVector4 & v)
	{
		return Translation(v.X(), v.Y(), v.Z());
	}

	inline static CMatrix16 Translation(float x, float y, float z)
	{
		return CMatrix16(1, 0, 0, 0,
						 0, 1, 0, 0,
						 0, 0, 1, 0,
						 x, y, z, 1);
	}

	inline static CMatrix16 Scale(float x, float y, float z)
	{
		return CMatrix16(x, 0, 0, 0,
						 0, y, 0, 0,
						 0, 0, z, 0,
						 0, 0, 0, 1);
	}

	inline static CMatrix16 Rotation(float angle, float x, float y, float z)
	{
		angle = angle - ((int) angle / 360);
		angle = DEG2RAD(angle);
		float c = cos(angle);
		float s = sin(angle);
		return CMatrix16(c + x * x * (1 - c), x * y * (1 - c) - s * z, x * z * (1 - c) + s * y, 0,
							x * y * (1 - c) + s * z, c + y * y * (1 - c), y * z * (1 - c) - s * x, 0,
							x * z * (1 - c) - s * y, y * z * (1 - c) + s * x, c + z * z * (1 - c), 0,
							0, 0, 0, 1);
	}

	inline CMatrix16 & Transpose() { CMatrix16 temp(*this); return CMatrix16::Transpose(temp, (*this)); }
	//inline CMatrix16 & Invert() { CMatrix16 temp((*this)); return CMatrix16::Invert(temp, (*this)); } 
	inline CMatrix16 & SetIdentity() { return CMatrix16::SetIdentity((*this)); }
	inline float Determinant() const { return CMatrix16::Determinant((*this)); }

	// Auxilliaries
	CMatrix16 & operator+=(const CMatrix16 & m1) { return CMatrix16::Add((*this), m1, (*this)); }
	CMatrix16 & operator-=(const CMatrix16 & m1) { return CMatrix16::Subtract((*this), m1, (*this)); }
	CMatrix16 & operator*=(const CMatrix16 & m1) { CMatrix16 temp((*this)); return CMatrix16::Multiply(temp, m1, (*this)); }
	CMatrix16 & operator*=(const float multiplyBy) { CMatrix16 temp((*this)); return CMatrix16::Multiply(temp, multiplyBy, (*this)); }

	// Conversion
	void ToArray(float * m) const 
	{
		m[0] = _m11;	m[4] = _m21;	m[8] = _m31;	m[12] = _m41;
		m[1] = _m12;	m[5] = _m22;	m[9] = _m32;	m[13] = _m42;
		m[2] = _m13;	m[6] = _m23;	m[10] = _m33;	m[14] = _m43;
		m[3] = _m14;	m[7] = _m24;	m[11] = _m34;	m[15] = _m44;
	}

	// Output and Input
	void Write(ostream & out) const
	{
		out << "[\t" << _m11 << ",\t" << _m12 << ",\t" << _m13 << ",\t" << _m14 << "\t]" << endl
			<< "[\t" << _m21 << ",\t" << _m22 << ",\t" << _m23 << ",\t" << _m24 << "\t]" << endl
			<< "[\t" << _m31 << ",\t" << _m32 << ",\t" << _m33 << ",\t" << _m34 << "\t]" << endl
			<< "[\t" << _m41 << ",\t" << _m42 << ",\t" << _m43 << ",\t" << _m44 << "\t]" << endl;
	}

	void Read(istream & in)
	{
		char ch;
		in >> _m11 >> ch >> _m12 >> ch >> _m13 >> ch >> _m14
			>> _m21 >> ch >> _m22 >> ch >> _m23 >> ch >> _m24
			>> _m31 >> ch >> _m32 >> ch >> _m33 >> ch >> _m34
			>> _m41 >> ch >> _m42 >> ch >> _m43 >> ch >> _m44;
	}

	float _m11, _m12, _m13, _m14,
		  _m21, _m22, _m23, _m24,
		  _m31, _m32, _m33, _m34,
		  _m41, _m42, _m43, _m44;
};

inline CMatrix16 operator+ (const CMatrix16 & m1, const CMatrix16 & m2) { CMatrix16 m; return CMatrix16::Add(m1, m2, m); }
inline CMatrix16 operator- (const CMatrix16 & m1, const CMatrix16 & m2) { CMatrix16 m; return CMatrix16::Subtract(m1, m2, m); }
inline CMatrix16 operator* (const CMatrix16 & m1, const CMatrix16 & m2) { CMatrix16 m; return CMatrix16::Multiply(m1, m2, m); }
inline CMatrix16 operator* (const CMatrix16 & m1, float multiplyBy) { CMatrix16 m; return CMatrix16::Multiply(m1, multiplyBy, m); }
inline CMatrix16 operator* (float multiplyBy, const CMatrix16 & m1) { CMatrix16 m; return CMatrix16::Multiply(m1, multiplyBy, m); }
inline CVector4 operator* (const CMatrix16 & m1, const CVector4 & v) { CVector4 temp; return CMatrix16::Multiply(m1, v, temp); }
inline CVector4 operator* (const CVector4 & v, const CMatrix16 & m1) { CVector4 temp; return CMatrix16::Multiply(m1, v, temp); }

inline ostream & operator<< (ostream & out, const CMatrix16 & mat) { mat.Write(out); return out; }
inline istream & operator>> (istream & in, CMatrix16 & mat) { mat.Read(in); return in; }

Matrix16.cpp
#include ".\matrix16.h"

CMatrix16::CMatrix16(void)
:	_m11(0), _m12(0), _m13(0), _m14(0),
	_m21(0), _m22(0), _m23(0), _m24(0),
	_m31(0), _m32(0), _m33(0), _m34(0),
	_m41(0), _m42(0), _m43(0), _m44(0)
{
}

CMatrix16::CMatrix16(float m11, float m12, float m13, float m14,
					float m21, float m22, float m23, float m24,
					float m31, float m32, float m33, float m34,
					float m41, float m42, float m43, float m44)
:	_m11(m11), _m12(m12), _m13(m13), _m14(m14),
	_m21(m21), _m22(m22), _m23(m23), _m24(m24),
	_m31(m31), _m32(m32), _m33(m33), _m34(m34),
	_m41(m41), _m42(m42), _m43(m43), _m44(m44)
{
}

CMatrix16::~CMatrix16(void)
{
}

CMatrix16 & CMatrix16::Add(const CMatrix16 & m1, const CMatrix16 & m2, CMatrix16 & result)
{
	result._m11 = m1._m11 + m2._m11;
	result._m12 = m1._m12 + m2._m12;
	result._m13 = m1._m13 + m2._m13;
	result._m14 = m1._m14 + m2._m14;

	result._m21 = m1._m21 + m2._m21;
	result._m22 = m1._m22 + m2._m22;
	result._m23 = m1._m23 + m2._m23;
	result._m24 = m1._m24 + m2._m24;

	result._m31 = m1._m31 + m2._m31;
	result._m32 = m1._m32 + m2._m32;
	result._m33 = m1._m33 + m2._m33;
	result._m34 = m1._m34 + m2._m34;

	result._m41 = m1._m41 + m2._m41;
	result._m42 = m1._m42 + m2._m42;
	result._m43 = m1._m43 + m2._m43;
	result._m44 = m1._m44 + m2._m44;

	return result;
}

CMatrix16 & CMatrix16::Subtract(const CMatrix16 & m1, const CMatrix16 & m2, CMatrix16 & result)
{
	result._m11 = m1._m11 - m2._m11;
	result._m12 = m1._m12 - m2._m12;
	result._m13 = m1._m13 - m2._m13;
	result._m14 = m1._m14 - m2._m14;

	result._m21 = m1._m21 - m2._m21;
	result._m22 = m1._m22 - m2._m22;
	result._m23 = m1._m23 - m2._m23;
	result._m24 = m1._m24 - m2._m24;

	result._m31 = m1._m31 - m2._m31;
	result._m32 = m1._m32 - m2._m32;
	result._m33 = m1._m33 - m2._m33;
	result._m34 = m1._m34 - m2._m34;

	result._m41 = m1._m41 - m2._m41;
	result._m42 = m1._m42 - m2._m42;
	result._m43 = m1._m43 - m2._m43;
	result._m44 = m1._m44 - m2._m44;

	return result;
}

CMatrix16 & CMatrix16::Multiply(const CMatrix16 & m1, const CMatrix16 & m2, CMatrix16 & result)
{
	result._m11 = m1._m11 * m2._m11 + m1._m12 * m2._m21 + m1._m13 * m2._m31 + m1._m14 * m2._m41;
	result._m12 = m1._m11 * m2._m12 + m1._m12 * m2._m22 + m1._m13 * m2._m32 + m1._m14 * m2._m42;
	result._m13 = m1._m11 * m2._m13 + m1._m12 * m2._m23 + m1._m13 * m2._m33 + m1._m14 * m2._m43;
	result._m14 = m1._m11 * m2._m14 + m1._m12 * m2._m24 + m1._m13 * m2._m34 + m1._m14 * m2._m44;

	result._m21 = m1._m21 * m2._m11 + m1._m22 * m2._m21 + m1._m23 * m2._m31 + m1._m24 * m2._m41;
	result._m22 = m1._m21 * m2._m12 + m1._m22 * m2._m22 + m1._m23 * m2._m32 + m1._m24 * m2._m42;
	result._m23 = m1._m21 * m2._m13 + m1._m22 * m2._m23 + m1._m23 * m2._m33 + m1._m24 * m2._m43;
	result._m24 = m1._m21 * m2._m14 + m1._m22 * m2._m24 + m1._m23 * m2._m34 + m1._m24 * m2._m44;

	result._m31 = m1._m31 * m2._m11 + m1._m32 * m2._m21 + m1._m33 * m2._m31 + m1._m34 * m2._m41;
	result._m32 = m1._m31 * m2._m12 + m1._m32 * m2._m22 + m1._m33 * m2._m32 + m1._m34 * m2._m42;
	result._m33 = m1._m31 * m2._m13 + m1._m32 * m2._m23 + m1._m33 * m2._m33 + m1._m34 * m2._m43;
	result._m34 = m1._m31 * m2._m14 + m1._m32 * m2._m24 + m1._m33 * m2._m34 + m1._m34 * m2._m44;

	result._m41 = m1._m41 * m2._m11 + m1._m42 * m2._m21 + m1._m43 * m2._m31 + m1._m44 * m2._m41;
	result._m42 = m1._m41 * m2._m12 + m1._m42 * m2._m22 + m1._m43 * m2._m32 + m1._m44 * m2._m42;
	result._m43 = m1._m41 * m2._m13 + m1._m42 * m2._m23 + m1._m43 * m2._m33 + m1._m44 * m2._m43;
	result._m44 = m1._m41 * m2._m14 + m1._m42 * m2._m24 + m1._m43 * m2._m34 + m1._m44 * m2._m44;

	return result;
}

CMatrix16 & CMatrix16::Multiply(const CMatrix16 & m1, float multiplyBy, CMatrix16 & result)
{
	result._m11 = m1._m11 * multiplyBy;
	result._m12 = m1._m12 * multiplyBy;
	result._m13 = m1._m13 * multiplyBy;
	result._m14 = m1._m14 * multiplyBy;

	result._m21 = m1._m21 * multiplyBy;
	result._m22 = m1._m22 * multiplyBy;
	result._m23 = m1._m23 * multiplyBy;
	result._m24 = m1._m24 * multiplyBy;

	result._m31 = m1._m31 * multiplyBy;
	result._m32 = m1._m32 * multiplyBy;
	result._m33 = m1._m33 * multiplyBy;
	result._m34 = m1._m34 * multiplyBy;

	result._m41 = m1._m41 * multiplyBy;
	result._m42 = m1._m42 * multiplyBy;
	result._m43 = m1._m43 * multiplyBy;
	result._m44 = m1._m44 * multiplyBy;

	return result;
}

CVector4 & CMatrix16::Multiply(const CMatrix16 & m1, const CVector4 & v, CVector4 &result)
{
	result.X(m1._m11 * v.X() + m1._m21 * v.Y() + m1._m31 * v.Z() + m1._m41);
	result.Y(m1._m12 * v.X() + m1._m22 * v.Y() + m1._m32 * v.Z() + m1._m42);
	result.Z(m1._m13 * v.X() + m1._m23 * v.Y() + m1._m33 * v.Z() + m1._m43);

	return result;
}

CMatrix16 & CMatrix16::Transpose(const CMatrix16 & m1, CMatrix16 & result)
{
	result._m11 = m1._m11;
	result._m12 = m1._m21;
	result._m13 = m1._m31;
	result._m14 = m1._m41;

	result._m21 = m1._m12;
	result._m22 = m1._m22;
	result._m23 = m1._m32;
	result._m24 = m1._m42;

	result._m31 = m1._m13;
	result._m32 = m1._m23;
	result._m33 = m1._m33;
	result._m34 = m1._m43;

	result._m41 = m1._m14;
	result._m42 = m1._m24;
	result._m43 = m1._m34;
	result._m44 = m1._m44;

	return result;
}

/*CMatrix16 & CMatrix16::Invert(const CMatrix16 & m1, CMatrix16 & result)
{
	float temp = 1 / m1.Determinant();

	result._m11 = m1._m22 * m1._m33 * m1._m44 + m1._m23 * m1._m34 * m1._m42 + m1._m24 * m1._m32 * m1._m43
				- m1._m22 * m1._m34 * m1._m43 - m1._m23 * m1._m32 * m1._m44 - m1._m24 * m1._m33 * m1._m42;
	result._m21 = m1._m12 * m1._m34 * m1._m43 + m1._m13 * m1._m32 * m1._m44 + m1._m14 * m1._m33 * m1._m42
				- m1._m12 * m1._m33 * m1._m44 - m1._m13 * m1._m34 * m1._m42 - m1._m14 * m1._m32 * m1._m43;
	result._m31 = m1._m12 * m1._m23 * m1._m44 + m1._m13 * m1._m24 * m1._m42 + m1._m14 * m1._m22 * m1._m43
				- m1._m12 * m1._m24 * m1._m43 - m1._m13 * m1._m22 * m1._m44 - m1._m14 * m1._m23 * m1._m42;
	result._m41 = m1._m12 * m1._m24 * m1._m33 + m1._m13 * m1._m22 * m1._m34 + m1._m14 * m1._m23 * m1._m32
				- m1._m12 * m1._m23 * m1._m34 - m1._m13 * m1._m24 * m1._m32 - m1._m14 * m1._m22 * m1._m33;

	result._m12 = m1._m21 * m1._m34 * m1._m43 + m1._m23 * m1._m31 * m1._m44 + m1._m24 * m1._m33 * m1._m41
				- m1._m21 * m1._m33 * m1._m44 - m1._m23 * m1._m34 * m1._m41 - m1._m24 * m1._m31 * m1._m43;
	result._m22 = m1._m11 * m1._m33 * m1._m44 + m1._m13 * m1._m34 * m1._m41 + m1._m14 * m1._m31 * m1._m43
				- m1._m11 * m1._m34 * m1._m43 - m1._m13 * m1._m31 * m1._m44 - m1._m14 * m1._m33 * m1._m41;
	result._m32 = m1._m11 * m1._m24 * m1._m43 + m1._m13 * m1._m21 * m1._m44 + m1._m14 * m1._m23 * m1._m41
				- m1._m11 * m1._m23 * m1._m44 - m1._m13 * m1._m24 * m1._m41 - m1._m14 * m1._m21 * m1._m43;
	result._m42 = m1._m11 * m1._m23 * m1._m34 + m1._m13 * m1._m24 * m1._m31 + m1._m14 * m1._m21 * m1._m33
				- m1._m11 * m1._m24 * m1._m33 - m1._m13 * m1._m21 * m1._m34 - m1._m14 * m1._m23 * m1._m31;

	result._m13 = m1._m21 * m1._m32 * m1._m44 + m1._m22 * m1._m34 * m1._m41 + m1._m24 * m1._m31 * m1._m42
				- m1._m21 * m1._m34 * m1._m42 - m1._m22 * m1._m31 * m1._m44 - m1._m24 * m1._m32 * m1._m41;
	result._m23 = m1._m11 * m1._m34 * m1._m42 + m1._m12 * m1._m31 * m1._m44 + m1._m14 * m1._m32 * m1._m41
				- m1._m11 * m1._m32 * m1._m44 - m1._m12 * m1._m34 * m1._m41 - m1._m14 * m1._m31 * m1._m42;
	result._m33 = m1._m11 * m1._m22 * m1._m44 + m1._m12 * m1._m24 * m1._m41 + m1._m14 * m1._m21 * m1._m42
				- m1._m11 * m1._m24 * m1._m42 - m1._m12 * m1._m21 * m1._m44 - m1._m14 * m1._m22 * m1._m41;
	result._m43 = m1._m11 * m1._m24 * m1._m32 + m1._m12 * m1._m21 * m1._m34 + m1._m14 * m1._m22 * m1._m31
				- m1._m11 * m1._m22 * m1._m34 - m1._m12 * m1._m24 * m1._m31 - m1._m14 * m1._m21 * m1._m32;

	result._m14 = m1._m21 * m1._m33 * m1._m42 + m1._m22 * m1._m31 * m1._m43 + m1._m23 * m1._m32 * m1._m41
				- m1._m21 * m1._m32 * m1._m43 - m1._m22 * m1._m33 * m1._m41 - m1._m23 * m1._m31 * m1._m42;
	result._m24 = m1._m11 * m1._m32 * m1._m43 + m1._m12 * m1._m33 * m1._m41 + m1._m13 * m1._m31 * m1._m42
				- m1._m11 * m1._m22 * m1._m42 - m1._m12 * m1._m31 * m1._m43 - m1._m13 * m1._m32 * m1._m41;
	result._m34 = m1._m11 * m1._m23 * m1._m42 + m1._m12 * m1._m21 * m1._m43 + m1._m13 * m1._m22 * m1._m41
				- m1._m11 * m1._m22 * m1._m43 - m1._m12 * m1._m23 * m1._m41 - m1._m13 * m1._m21 * m1._m42;
	result._m44 = m1._m11 * m1._m22 * m1._m33 + m1._m12 * m1._m23 * m1._m31 + m1._m13 * m1._m21 * m1._m32
				- m1._m11 * m1._m23 * m1._m32 - m1._m12 * m1._m21 * m1._m33 - m1._m13 * m1._m22 * m1._m31;

	result = result * temp;

	return result;
}*/

CMatrix16 & CMatrix16::SetIdentity(CMatrix16 & m1)
{
	m1._m11 = 1;	m1._m12 = 0;	m1._m13 = 0;	m1._m14 = 0;
	m1._m21 = 0;	m1._m22 = 1;	m1._m23 = 0;	m1._m24 = 0;
	m1._m31 = 0;	m1._m32 = 0;	m1._m33 = 1;	m1._m34 = 0;
	m1._m41 = 0;	m1._m42 = 0;	m1._m43 = 0;	m1._m44 = 1;

	return m1;
}

float CMatrix16::Determinant(const CMatrix16 & m1)
{
	return m1._m11 * m1._m22 * m1._m33 * m1._m44 + m1._m11 * m1._m23 * m1._m34 * m1._m42 + m1._m11 * m1._m24 * m1._m32 * m1._m43 +
			m1._m12 * m1._m21 * m1._m34 * m1._m43 + m1._m12 * m1._m23 * m1._m31 * m1._m44 + m1._m12 * m1._m24 * m1._m33 * m1._m41 +
			m1._m13 * m1._m21 * m1._m32 * m1._m44 + m1._m13 * m1._m22 * m1._m34 * m1._m41 + m1._m13 * m1._m24 * m1._m31 * m1._m42 +
			m1._m14 * m1._m21 * m1._m33 * m1._m42 + m1._m14 * m1._m22 * m1._m31 * m1._m43 + m1._m14 * m1._m23 * m1._m32 * m1._m41 -
			m1._m11 * m1._m22 * m1._m34 * m1._m43 - m1._m11 * m1._m23 * m1._m32 * m1._m44 - m1._m11 * m1._m24 * m1._m33 * m1._m42 -
			m1._m12 * m1._m21 * m1._m33 * m1._m44 - m1._m12 * m1._m23 * m1._m34 * m1._m41 - m1._m12 * m1._m24 * m1._m31 * m1._m43 -
			m1._m13 * m1._m21 * m1._m34 * m1._m42 - m1._m13 * m1._m22 * m1._m31 * m1._m44 - m1._m13 * m1._m24 * m1._m32 * m1._m41 -
			m1._m14 * m1._m21 * m1._m32 * m1._m43 - m1._m14 * m1._m22 * m1._m33 * m1._m41 - m1._m14 * m1._m23 * m1._m31 * m1._m42;
}

Vector4.h
#pragma once
#include <iostream>
using namespace std;

#include <math.h>

class CVector4
{
public:
	// Constructors
	CVector4(void);
	CVector4(float x, float y, float z, float w = 1);
	~CVector4(void);

	// Selectors
	inline float X() const { return _x; }
	inline float Y() const { return _y; }
	inline float Z() const { return _z; }

	// Mutators
	inline void X(float x) { _x = x; }
	inline void Y(float y) { _y = y; }
	inline void Z(float z) { _z = z; }

	// Magnitude
	inline float Length() const { return sqrt(LengthSq()); }
	inline float LengthSq() const { return _x * _x + _y * _y + _z * _z; }
	inline void Zero() { X(0); Y(0); Z(0); }

	// Unit
	CVector4 & Unit();
	static void Unit(const CVector4 & v);

	// Modification
	static float Dot(const CVector4 & v1, const CVector4 & v2);
	static CVector4 & Cross(const CVector4 & v1, const CVector4 & v2, CVector4 & result);
	static CVector4 & Add(const CVector4 & v1, const CVector4 & v2, CVector4 & result);
	static CVector4 & Subtract(const CVector4 & v1, const CVector4 & v2, CVector4 & result);
	static CVector4 & Multiply(const CVector4 & v, float multiplyBy, CVector4 & result);
	static CVector4 & Invert(CVector4 & v);

	// Auxilliaries
	CVector4 & operator+=(const CVector4 & v) { return CVector4::Add((*this), v, (*this)); }
	CVector4 & operator-=(const CVector4 & v) { return CVector4::Subtract((*this), v, (*this)); }
	CVector4 & operator*=(const CVector4 & v) { CVector4 temp((*this)); return CVector4::Cross(temp, v, (*this)); }
	CVector4 & operator*=(float multiplyBy) { CVector4 temp((*this)); return CVector4::Multiply(temp, multiplyBy, (*this)); }
	CVector4 & operator/=(float divideBy) { CVector4 temp((*this)); return CVector4::Multiply(temp, (1 / divideBy), (*this)); }

	// Input and output
	void Write(ostream & out) const { out << "[" << _x << "," << _y << "," << _z <<  "]"; }
	void Read(istream & in) { char ch; in >> ch >> _x >> ch >> _y >> ch >> _z >> ch; }

private:
	float _x, _y, _z, _w;
};

inline CVector4 operator+(const CVector4 & v1, const CVector4 & v2) { CVector4 v; return CVector4::Add(v1, v2, v); }
inline CVector4 operator-(const CVector4 & v1, const CVector4 & v2) { CVector4 v; return CVector4::Subtract(v1, v2, v); }
inline CVector4 operator*(const CVector4 & v1, const CVector4 & v2) { CVector4 v; return CVector4::Cross(v1, v2, v); }
inline CVector4 operator*(const CVector4 & v1, float multiplyBy) { CVector4 v; return CVector4::Multiply(v1, multiplyBy, v); }
inline CVector4 operator/(const CVector4 & v1, float divideBy) { CVector4 v; float m = 1 / divideBy; return CVector4::Multiply(v1, m, v); }
inline float operator|(const CVector4 & v1, const CVector4 & v2) { return CVector4::Dot(v1, v2); }

inline ostream & operator<<(ostream & out, const CVector4 & v) { v.Write(out); return out; }
inline istream & operator>>(istream & in, CVector4 & v) { v.Read(in); return in; }

Vector4.cpp
#include ".\vector4.h"

CVector4::CVector4(void) 
: _x(0), _y(0), _z(0), _w(1)
{
}

CVector4::CVector4(float x, float y, float z, float w) 
: _x(x), _y(y), _z(z), _w(w)
{
}

CVector4::~CVector4(void)
{
}

float CVector4::Dot(const CVector4 & v1, const CVector4 & v2)
{
	return v1.X() * v2.X() + v1.Y() * v2.Y() + v1.Z() * v2.Z();
}

CVector4 & CVector4::Cross(const CVector4 & v1, const CVector4 & v2, CVector4 & result)
{
	result.X(v1.Y() * v2.Z() - v1.Z() * v2.Y());
	result.Y(v1.Z() * v2.X() - v1.X() * v2.Z());
	result.Z(v1.X() * v2.Y() - v1.Y() * v2.X());

	return result;
}

CVector4 & CVector4::Add(const CVector4 & v1, const CVector4 & v2, CVector4 & result)
{
	result.X(v1.X() + v2.X());
	result.Y(v1.Y() + v2.Y());
	result.Z(v1.Z() + v2.Z());

	return result;
}

CVector4 & CVector4::Subtract(const CVector4 & v1, const CVector4 & v2, CVector4 & result)
{
	result.X(v1.X() - v2.X());
	result.Y(v1.Y() - v2.Y());
	result.Z(v1.Z() - v2.Z());

	return result;
}

CVector4 & CVector4::Multiply(const CVector4 & v, float multiplyBy, CVector4 & result)
{
	result.X(v.X() * multiplyBy);
	result.Y(v.Y() * multiplyBy);
	result.Z(v.Z() * multiplyBy);

	return result;
}

CVector4 & CVector4::Invert(CVector4 & v)
{
	v.X(-v.X());
	v.Y(-v.Y());
	v.Z(-v.Z());

	return v;
}

cube.h
#pragma once
#include "Matrix16.h"
#include "Vector4.h"

class CCube
{
public:
	CCube(void);
	CCube(float size);
	~CCube(void);

	float GetSize() const { return _size; }

	void Update(float dt);
	void Render() const;

	void DrawAxis() const;

	void Rotate(float angle, float x, float y, float z, int Order)
	{
		switch(Order)
		{
		case 1:
			_r = CMatrix16::Rotation(angle, x, y, z) * _r;
			break;
		case 2:
			_r = _r * CMatrix16::Rotation(angle, x, y, z);
			break;
		}

		MarkDirty();
	}

	void Translate(float x, float y, float z, int Order)
	{
		switch(Order)
		{
		case 1:
			_t += (CVector4(x, y, z) * _r);
			break;
		case 2:
			_t += CVector4(x, y, z);
			break;
		}

		MarkDirty();
	}

	void Scale(float x, float y, float z)
	{
		_s._m11 *= x;
		_s._m22 *= y;
		_s._m33 *= z;

		MarkDirty();
	}

	const CMatrix16 & GetTransform() const
	{
		if(_dirty)
		{
			_trans.SetIdentity();
			_trans = _r * _s;
			
			_trans._m41 = _t.X();
			_trans._m42 = _t.Y();
			_trans._m43 = _t.Z();
			_trans._m44 = 1;
			
			_dirty = false;
		}

		return _trans;
	}

	void MarkDirty() { _dirty = true; }

	void Reset()
	{
		_s.SetIdentity();
		_r.SetIdentity();
		_t.Zero();

		MarkDirty();
		GetTransform();
	}

private:
	float _size;
	mutable bool _dirty;

	CMatrix16 _s;
	CMatrix16 _r;
	CVector4 _t;

	mutable CMatrix16 _trans;
};

cube.cpp
#include "Cube.h"
#include <GL\glut.h>

CCube::CCube(void)
:	_size(1), _dirty(true), _s(), _r(), _t(), _trans()
{
	_s.SetIdentity();
	_r.SetIdentity();

	GetTransform();
}

CCube::CCube(float size)
:	_size(size), _dirty(true), _s(), _r(), _t(), _trans()
{
	_s.SetIdentity();
	_r.SetIdentity();

	GetTransform();
}

CCube::~CCube(void)
{
}

void CCube::Update(float dt)
{
}

void CCube::Render() const
{
	glPushMatrix();

	float m[16];
	GetTransform().ToArray(m);
	glMultMatrixf(m);
	
	DrawAxis();

	glColor3f(1, 0, 0);
	glutSolidCube(_size);
	glPopMatrix();
}

void CCube::DrawAxis(void) const
{
	glBegin(GL_LINES);	
	glColor3f(1, 1, 0);
	glVertex3f(-30, 0, 0);
	glVertex3f(30, 0, 0);

	glColor3f(1, 0, 1);
	glVertex3f(0, -30, 0);
	glVertex3f(0, 30, 0);

	glColor3f(0, 1, 1);
	glVertex3f(0, 0, -30);
	glVertex3f(0, 0, 30);
	glEnd();
}

Keys: Q & W - Rotate clockwise-anticlockwise in along local X A & S - Rotate clockwise-anticlockwise in along local Y Z & X - Rotate clockwise-anticlockwise in along local Z E & D - Translate in local X axis A & S - Translate in world X axis Up & Down Arrow - Rotate clockwise-anticlockwise in along world X Left & Right Arrow - Rotate clockwise-anticlockwise in along world Y Pg Up & Pg Down - Rotate clockwise-anticlockwise in along world Z I am not sure whether the translation part is wrong or the rotation. When I translate, say (20, 0, 0), then rotate 90 in world space Y, the object still rotate on its own space. I would expect it to orbit around the world space Y. Please anyone could give me some pointers?

Share this post


Link to post
Share on other sites
First of all, how come in the Translate function the second "order" case doesn't have _r on either side?
Now, the problem is obviously with the order of the matrix multiplication. If you want to rotate the world space and R is the rotation matrix and M is the current matrix then you do R*P. The last column of P should the the position (_t in your code). I think that the problem is that after you change _t with the transformation you don't change the last column of _r. So when you rotate _r you aren't taking the transformation into account.

Share this post


Link to post
Share on other sites
Quote:
Original post by daniel_i_l
First of all, how come in the Translate function the second "order" case doesn't have _r on either side?
Now, the problem is obviously with the order of the matrix multiplication. If you want to rotate the world space and R is the rotation matrix and M is the current matrix then you do R*P. The last column of P should the the position (_t in your code). I think that the problem is that after you change _t with the transformation you don't change the last column of _r. So when you rotate _r you aren't taking the transformation into account.


Hi daniel,

Second 'order' in the Translate function is to translate based on world space.
I am not quite sure if that's done correctly.
And I tried to modify only the Translate function to:


void Translate(float x, float y, float z, int Order)
{
switch(Order)
{
case 1:
_t += (CVector4(x, y, z) * _r);
break;
case 2:
_t += CVector4(x, y, z);
break;
}

_r._m41 = _t.X();
_r._m42 = _t.Y();
_r._m43 = _t.Z();

MarkDirty();
}



the rotation works as what I thought it should i.e. orbit around the world space Y.
However, the when I translate along world X after that, the object assumes the old position before the orbit around the world space Y and then moves.

Any idea about this?

Thanks.

Share this post


Link to post
Share on other sites
first of all opengl's matrices are stored in the following order


/*
00,04,08,12
01,05,09,13
02,06,10,14
03,07,11,15
*/

float mat[16];
glMultiMatrixf(mat);



12,13,14 is the translation vector

if you perform matrix transformations remember the the last matrix multiplied onto the matrix stack is the first one applied to your vertices.

e.g.:
rot(0,1,0,90)*trans(1,1,1) will translate your vertices by (1,1,1) and finally rotate the translated vertices around the y axis with the origin(0,0,0) as your rotation pivot.


if you want to rotate your object in object coordinates you need to first translate its center to the origin, rotate it, translate it back.
e.g.:
trans(1,1,1)*rot(0,1,0,90)*trans(-1,-1,-1)




Share this post


Link to post
Share on other sites
Quote:
Original post by Basiror
first of all opengl's matrices are stored in the following order

*** Source Snippet Removed ***

12,13,14 is the translation vector

if you perform matrix transformations remember the the last matrix multiplied onto the matrix stack is the first one applied to your vertices.

e.g.:
rot(0,1,0,90)*trans(1,1,1) will translate your vertices by (1,1,1) and finally rotate the translated vertices around the y axis with the origin(0,0,0) as your rotation pivot.


if you want to rotate your object in object coordinates you need to first translate its center to the origin, rotate it, translate it back.
e.g.:
trans(1,1,1)*rot(0,1,0,90)*trans(-1,-1,-1)


Hi Basiror,

Thanks for your reply.
I was told about pre and post-multiply to achieve world and local transformation.
Would you advise doing pre or post-multiply for performing world and local matrix?

Kindly advice from anyone is welcome too, please.
I have been struggling and reading a lot of articles on matrices.
I understand the basic but when it comes to applying transformation, I would still get confused.
Hope could get some helps.
Thanks.

Share this post


Link to post
Share on other sites
By convention opengl matrices are post multiplied

Think about it like this:

Example post multiply(OPENGL):
1) Modelview; glTranslate(...);
2) Modelview*trans(...); glRotate(....);
3) Modelview*trans(...)*rotate(...);

Post multiply == multiply from right
Pre multiply == multiply from left

Example pre multiply:
1) trans(...); glRotate(....);
2) trans(...)*rotate(...); premult(modelview);
3) Modelview*trans(...)*rotate(...);


thats all.

some notes:
your matrix is orthogonal:
-> the transpose of the matrix is its inverse
-> the column vectors are perpendicular to each other
e.g.: a*b == 0 -> a perpendicular to b

the determinant of a square diagonal matrix is its determinant

there is really a lot of information I could provide you, but I would suggest your to get a math script of a lecture at university, there tons of them on the net.

Share this post


Link to post
Share on other sites
Actually you can use any order for storing matrices. OpenGL does not enforce ordering of matrices.

The only thing I like to add is this link. Hope that helps.

Share this post


Link to post
Share on other sites
Quote:
Original post by Basiror
By convention opengl matrices are post multiplied

Think about it like this:

Example post multiply(OPENGL):
1) Modelview; glTranslate(...);
2) Modelview*trans(...); glRotate(....);
3) Modelview*trans(...)*rotate(...);

Post multiply == multiply from right
Pre multiply == multiply from left

Example pre multiply:
1) trans(...); glRotate(....);
2) trans(...)*rotate(...); premult(modelview);
3) Modelview*trans(...)*rotate(...);


thats all.

some notes:
your matrix is orthogonal:
-> the transpose of the matrix is its inverse
-> the column vectors are perpendicular to each other
e.g.: a*b == 0 -> a perpendicular to b

the determinant of a square diagonal matrix is its determinant

there is really a lot of information I could provide you, but I would suggest your to get a math script of a lecture at university, there tons of them on the net.


I was actually looking at the implementation in the Ogre3d engine too.
In there the order of matrix multiplication is according to the transform space that is specified.
I wanted to re-create that example to understand on the matrix transformation.
I was thinking that doing that way will be much more flexible in terms of implementing user interaction.
Any advice on that?
Thanks.

Share this post


Link to post
Share on other sites
Quote:
Original post by _neutrin0_
Actually you can use any order for storing matrices. OpenGL does not enforce ordering of matrices.

The only thing I like to add is this link. Hope that helps.


Of course you can use whatever order you wish, but remember that you should take care about sequential access to the matrix elements when multiplying matrices with vectors

I would adopt my internal order in such a way that you can directly use OpenGL matrices for the sake of simplicity.

For my personal projects I multiply from the right by convention.
Its just more intuitive and most frameworks I have worked with do it the same way.

Share this post


Link to post
Share on other sites
Quote:
Original post by Basiror
For my personal projects I multiply from the right by convention.
Its just more intuitive and most frameworks I have worked with do it the same way.
I'm kind of nitpicking here, but for the benefit of the OP I'd like to suggest that neither left-multiplication nor right-multiplication is any more intuitive than the other. One or the other may be more intuitive to a particular person (depending on how they prefer to think about sequential transforms), but neither is objectively better than the other (or at least I haven't come across any convincing arguments to this effect).

Also, I personally find the terms 'right multiply' and 'left multiply' to be somewhat confusing; the terms 'row-vector notation' and 'column-vector notation' are, on the other hand, unambiguous.

Share this post


Link to post
Share on other sites
May I ask for more advices, please?

With OpenGL, we have glTranslatef(), glRotatef(), glScalef() which have to be called within glPushMatrix() and glPopMatrix().
I find that this might limit the possibility of my key interaction as I will need to fix the function calls before I render the object.
For example, the sample I created above, I want the object to be able to change its direction (rotate on it's own space), move forward according to direction, orbit a point, then move left and right.
Therefore i decided to keep my own matrix.

Any advices, please?

Share this post


Link to post
Share on other sites
Quote:
Original post by juxie
May I ask for more advices, please?

With OpenGL, we have glTranslatef(), glRotatef(), glScalef() which have to be called within glPushMatrix() and glPopMatrix().
I find that this might limit the possibility of my key interaction as I will need to fix the function calls before I render the object.
For example, the sample I created above, I want the object to be able to change its direction (rotate on it's own space), move forward according to direction, orbit a point, then move left and right.
Therefore i decided to keep my own matrix.

Any advices, please?
Whether or not you use your own math functions or OpenGL's has nothing to do with when, whether, or how an object transform can be modified. I tend to find using one's own math code to be more flexible and convenient than relying exclusively on OpenGL function calls, but there's no fundamental difference in functionality between the two methods (at least not of the sort you describe above).

Share this post


Link to post
Share on other sites
Quote:
Original post by Brother Bob
Where did you get the idea from that you have to call matrix functions within a glPush/PopMatrix pair? You're free not to if you don't want to.


i mean if i don't call the matrix functions within glPush/PopMatrix, it will affect the view matrix, right?

Share this post


Link to post
Share on other sites
Quote:
Original post by jyk

Whether or not you use your own math functions or OpenGL's has nothing to do with when, whether, or how an object transform can be modified. I tend to find using one's own math code to be more flexible and convenient than relying exclusively on OpenGL function calls, but there's no fundamental difference in functionality between the two methods (at least not of the sort you describe above).


I am really sorry for my confusion.
Is it right to say that, OpenGL matrix is always according to local matrix?
Whereas DirectX is according to world matrix?

How can I flexibly combine world transform and local transform?

Share this post


Link to post
Share on other sites
Quote:
Original post by juxie
Is it right to say that, OpenGL matrix is always according to local matrix?
Whereas DirectX is according to world matrix?
I've heard people say that, but I'm not exactly sure what they mean by it, and in any case I would argue that it's not really accurate (OpenGL and DirectX deal with transforms in basically the same way).

I think what people are referring to here is the difference in notational convention between the two APIs (row-vector vs. column-vector notation) and the implications for multiplication order.

When using the DirectX math library this is directly evident, but in OpenGL everything happens 'under the hood'. It could probably be argued that OpenGL itself doesn't really assume a notational convention; rather, it is simply the case that transforms are applied in the opposite of the order in which the corresponding function calls appear in the code. (Most OpenGL references use column-vector notation, however, so this is how people tend to think of things when working with OpenGL transform functions.)

It's all a bit confusing, but I think the first thing you need to understand is that OpenGL and D3D/DirectX are fundamentally the same in terms of how they deal with transforms. I say 'fundamentally' because there are a number of superficial differences - for example, D3D maintains separate world and model matrices, while OpenGL combines them into one - but the concepts are essentially the same.

Share this post


Link to post
Share on other sites
Quote:
Original post by jyk
Quote:
Original post by Basiror
For my personal projects I multiply from the right by convention.
Its just more intuitive and most frameworks I have worked with do it the same way.
I'm kind of nitpicking here, but for the benefit of the OP I'd like to suggest that neither left-multiplication nor right-multiplication is any more intuitive than the other. One or the other may be more intuitive to a particular person (depending on how they prefer to think about sequential transforms), but neither is objectively better than the other (or at least I haven't come across any convincing arguments to this effect).

Also, I personally find the terms 'right multiply' and 'left multiply' to be somewhat confusing; the terms 'row-vector notation' and 'column-vector notation' are, on the other hand, unambiguous.


The western world writes from left to right.
So lets say you do multiply from left, you had to write down the entire matrix stack again if you forgot a transformation and there is no room left to insert it.

Just an example, but frankly in each math/computer graphic lecture I have attended so far we multiply from right.

Share this post


Link to post
Share on other sites
Quote:
Original post by Basiror
The western world writes from left to right.
So lets say you do multiply from left, you had to write down the entire matrix stack again if you forgot a transformation and there is no room left to insert it.

Just an example, but frankly in each math/computer graphic lecture I have attended so far we multiply from right.
Just to make sure I'm not misunderstanding, when you say 'multiply from the right' are you referring to column-vector notation, or row-vector notation?

Share this post


Link to post
Share on other sites
Quote:
Original post by jyk
I've heard people say that, but I'm not exactly sure what they mean by it, and in any case I would argue that it's not really accurate (OpenGL and DirectX deal with transforms in basically the same way).

I think what people are referring to here is the difference in notational convention between the two APIs (row-vector vs. column-vector notation) and the implications for multiplication order.

When using the DirectX math library this is directly evident, but in OpenGL everything happens 'under the hood'. It could probably be argued that OpenGL itself doesn't really assume a notational convention; rather, it is simply the case that transforms are applied in the opposite of the order in which the corresponding function calls appear in the code. (Most OpenGL references use column-vector notation, however, so this is how people tend to think of things when working with OpenGL transform functions.)

It's all a bit confusing, but I think the first thing you need to understand is that OpenGL and D3D/DirectX are fundamentally the same in terms of how they deal with transforms. I say 'fundamentally' because there are a number of superficial differences - for example, D3D maintains separate world and model matrices, while OpenGL combines them into one - but the concepts are essentially the same.


I feel bad that I am still pretty much confused.
I tried to read up on quite a number of article, it seems to touch on simpler transformation.
I am not sure when I did the transformation correct or when I did it incorrectly.

Anywhere I can read up on this?

Thanks.

Share this post


Link to post
Share on other sites
Quote:
Original post by jyk
Quote:
Original post by Basiror
The western world writes from left to right.
So lets say you do multiply from left, you had to write down the entire matrix stack again if you forgot a transformation and there is no room left to insert it.

Just an example, but frankly in each math/computer graphic lecture I have attended so far we multiply from right.
Just to make sure I'm not misunderstanding, when you say 'multiply from the right' are you referring to column-vector notation, or row-vector notation?


column

Share this post


Link to post
Share on other sites
Quote:
Original post by juxie
Quote:
Original post by jyk
I've heard people say that, but I'm not exactly sure what they mean by it, and in any case I would argue that it's not really accurate (OpenGL and DirectX deal with transforms in basically the same way).

I think what people are referring to here is the difference in notational convention between the two APIs (row-vector vs. column-vector notation) and the implications for multiplication order.

When using the DirectX math library this is directly evident, but in OpenGL everything happens 'under the hood'. It could probably be argued that OpenGL itself doesn't really assume a notational convention; rather, it is simply the case that transforms are applied in the opposite of the order in which the corresponding function calls appear in the code. (Most OpenGL references use column-vector notation, however, so this is how people tend to think of things when working with OpenGL transform functions.)

It's all a bit confusing, but I think the first thing you need to understand is that OpenGL and D3D/DirectX are fundamentally the same in terms of how they deal with transforms. I say 'fundamentally' because there are a number of superficial differences - for example, D3D maintains separate world and model matrices, while OpenGL combines them into one - but the concepts are essentially the same.


I feel bad that I am still pretty much confused.
I tried to read up on quite a number of article, it seems to touch on simpler transformation.
I am not sure when I did the transformation correct or when I did it incorrectly.

Anywhere I can read up on this?

Thanks.


Do you have a math program like octave installed?
I would suggest you to do some transformations manually and to examining the results to get an intuition how they behave.

Share this post


Link to post
Share on other sites
Quote:
Original post by Basiror

Do you have a math program like octave installed?
I would suggest you to do some transformations manually and to examining the results to get an intuition how they behave.


I have just downloaded octave.
I will give it a try and hopefully will understand matrices better.
Thanks everyone.

Share this post


Link to post
Share on other sites
Quote:
Original post by Basiror
Quote:
Original post by jyk
Just to make sure I'm not misunderstanding, when you say 'multiply from the right' are you referring to column-vector notation, or row-vector notation?
column
Oh, ok. Well, I'm with you on column-vector notation, and it does seem that most academic references use this convention (as do, of course, most OpenGL references). However, since DirectX (which uses row-vector notation) is so prevalent, I would guess that row-vector and column-vector notation are used about equally overall.

As far as intuitiveness goes, row vector advocates point out that sequences of transforms read naturally from left to right when written using row-vector notation, while column vector advocates sometimes use the counter-argument that column vector notation closely mirrors function composition.

Again, I tend to use column vectors myself, but I'm still not sure if one or the other convention can be said to be more intuitive (or more prevalent overall) than the other.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Forum Statistics

    • Total Topics
      627722
    • Total Posts
      2978804
  • Similar Content

    • By DelicateTreeFrog
      Hello! As an exercise for delving into modern OpenGL, I'm creating a simple .obj renderer. I want to support things like varying degrees of specularity, geometry opacity, things like that, on a per-material basis. Different materials can also have different textures. Basic .obj necessities. I've done this in old school OpenGL, but modern OpenGL has its own thing going on, and I'd like to conform as closely to the standards as possible so as to keep the program running correctly, and I'm hoping to avoid picking up bad habits this early on.
      Reading around on the OpenGL Wiki, one tip in particular really stands out to me on this page:
      For something like a renderer for .obj files, this sort of thing seems almost ideal, but according to the wiki, it's a bad idea. Interesting to note!
      So, here's what the plan is so far as far as loading goes:
      Set up a type for materials so that materials can be created and destroyed. They will contain things like diffuse color, diffuse texture, geometry opacity, and so on, for each material in the .mtl file. Since .obj files are conveniently split up by material, I can load different groups of vertices/normals/UVs and triangles into different blocks of data for different models. When it comes to the rendering, I get a bit lost. I can either:
      Between drawing triangle groups, call glUseProgram to use a different shader for that particular geometry (so a unique shader just for the material that is shared by this triangle group). or
      Between drawing triangle groups, call glUniform a few times to adjust different parameters within the "master shader", such as specularity, diffuse color, and geometry opacity. In both cases, I still have to call glBindTexture between drawing triangle groups in order to bind the diffuse texture used by the material, so there doesn't seem to be a way around having the CPU do *something* during the rendering process instead of letting the GPU do everything all at once.
      The second option here seems less cluttered, however. There are less shaders to keep up with while one "master shader" handles it all. I don't have to duplicate any code or compile multiple shaders. Arguably, I could always have the shader program for each material be embedded in the material itself, and be auto-generated upon loading the material from the .mtl file. But this still leads to constantly calling glUseProgram, much more than is probably necessary in order to properly render the .obj. There seem to be a number of differing opinions on if it's okay to use hundreds of shaders or if it's best to just use tens of shaders.
      So, ultimately, what is the "right" way to do this? Does using a "master shader" (or a few variants of one) bog down the system compared to using hundreds of shader programs each dedicated to their own corresponding materials? Keeping in mind that the "master shaders" would have to track these additional uniforms and potentially have numerous branches of ifs, it may be possible that the ifs will lead to additional and unnecessary processing. But would that more expensive than constantly calling glUseProgram to switch shaders, or storing the shaders to begin with?
      With all these angles to consider, it's difficult to come to a conclusion. Both possible methods work, and both seem rather convenient for their own reasons, but which is the most performant? Please help this beginner/dummy understand. Thank you!
    • By JJCDeveloper
      I want to make professional java 3d game with server program and database,packet handling for multiplayer and client-server communicating,maps rendering,models,and stuffs Which aspect of java can I learn and where can I learn java Lwjgl OpenGL rendering Like minecraft and world of tanks
    • By AyeRonTarpas
      A friend of mine and I are making a 2D game engine as a learning experience and to hopefully build upon the experience in the long run.

      -What I'm using:
          C++;. Since im learning this language while in college and its one of the popular language to make games with why not.     Visual Studios; Im using a windows so yea.     SDL or GLFW; was thinking about SDL since i do some research on it where it is catching my interest but i hear SDL is a huge package compared to GLFW, so i may do GLFW to start with as learning since i may get overwhelmed with SDL.  
      -Questions
      Knowing what we want in the engine what should our main focus be in terms of learning. File managements, with headers, functions ect. How can i properly manage files with out confusing myself and my friend when sharing code. Alternative to Visual studios: My friend has a mac and cant properly use Vis studios, is there another alternative to it?  
    • By ferreiradaselva
      Both functions are available since 3.0, and I'm currently using `glMapBuffer()`, which works fine.
      But, I was wondering if anyone has experienced advantage in using `glMapBufferRange()`, which allows to specify the range of the mapped buffer. Could this be only a safety measure or does it improve performance?
      Note: I'm not asking about glBufferSubData()/glBufferData. Those two are irrelevant in this case.
    • By xhcao
      Before using void glBindImageTexture(    GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format), does need to make sure that texture is completeness. 
  • Popular Now