Just a portion of the whole thing I'm working on (implementing most of the pipeline), but since I'm doing it in pieces, here's where I'm at. I had some 'camera' functions that could translate, rotate, or translate while maintaining a fixed view, like so:
void Camera::MoveCamera()
{
if( (moveLeft || moveRight) && !(moveLeft && moveRight) )
{
float s = (moveRight ? speed : -speed);
Vector3 vVector = view - pos;
Vector3 vOrthoVector;
vOrthoVector.v[0] = -vVector.v[2];
vOrthoVector.v[2] = vVector.v[0];
pos.v[0] = pos.v[0] + vOrthoVector.v[0] * s;
pos.v[2] = pos.v[2] + vOrthoVector.v[2] * s;
if( !viewLocked )
{
view.v[0] = view.v[0] + vOrthoVector.v[0] * s;
view.v[2] = view.v[2] + vOrthoVector.v[2] * s;
}
}
if( (moveForward || moveBack) && !(moveForward && moveBack) )
{
float s = (moveForward ? speed : -speed);
Vector3 vVector = view - pos;
pos.v[0] = pos.v[0] + vVector.v[0] * s;
pos.v[2] = pos.v[2] + vVector.v[2] * s;
if( !viewLocked )
{
view.v[0] = view.v[0] + vVector.v[0] * s;
view.v[2] = view.v[2] + vVector.v[2] * s;
}
}
}
void Camera::Rotate()
{
if(!viewLocked)
{
if( (rotateLeft || rotateRight) && !(rotateLeft && rotateRight) )
{
float s = (rotateLeft ? -speed : speed);
Vector3 vVector = view - pos;
view.v[2] = (float)(pos.v[2] + sin(s)*vVector.v[0] + cos(s)*vVector.v[2]);
view.v[0] = (float)(pos.v[0] + cos(s)*vVector.v[0] - sin(s)*vVector.v[2]);
}
if( (rotateUp || rotateDown) && !(rotateUp && rotateDown) )
{
float s = (rotateUp ? -speed : speed);
Vector3 vVector = view - pos;
view.v[2] = (float)(pos.v[2] + sin(s)*vVector.v[1] + cos(s)*vVector.v[2]);
view.v[1] = (float)(pos.v[1] + cos(s)*vVector.v[1] - sin(s)*vVector.v[2]);
}
}
}
Worked perfectly, using this drawing implementation (in the display func):
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
camera.MoveCamera();
camera.Rotate();
gluLookAt(camera.pos.v[0], camera.pos.v[1], camera.pos.v[2],
camera.view.v[0], camera.view.v[1], camera.view.v[2],
camera.up.v[0], camera.up.v[1], camera.up.v[2]);
// ...cut out code for object rotation - works fine... //
Now, here is my replacement implemenation segment:
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
make4x4Identity( obj.transform );
make4x4Identity( camera.transform );
// ...cut out code for object rotation - works fine... //
//// ---- Camera transform ----------------
camera.MoveCamera();
camera.Rotate();
camera.CreateTransformFromVectors();
// apply camera transform
obj.MultiplyTransformByMatrix(camera.transform);
//// ---- Camera transform ----------------
glLoadMatrixf( obj.transform );
Obviously I'm looking to just create a knock-off gluLookAt() method. Here it is:
void Camera::CreateTransformFromVectors()
{
Vector3 forward; // the direction the camera is pointing
Vector3 camUp; // the upward direction of the camera
Vector3 side; // vector pointing out from the side of the camera
float m[16];
forward = view - pos;
camUp = up;
forward = Normalize(forward);
side = Cross(forward, camUp);
side = Normalize(side);
camUp = Cross(side, forward);
camUp = Normalize(camUp);
make4x4Identity(m);
m[0] = side.v[0];
m[4] = side.v[1];
m[8] = side.v[2];
m[1] = camUp.v[0];
m[5] = camUp.v[1];
m[9] = camUp.v[2];
m[2] = -forward.v[0];
m[6] = -forward.v[1];
m[10] = -forward.v[2];
MultiplyTransformByMatrix(m);
ApplyTranslation( CreateVector3(-pos.v[0], -pos.v[1], -pos.v[2]) );
}
The question I have is this: what's wrong with my lookAt function that it's not creating the right transform matrix? I haven't touched the MoveCamera() or Rotate() functions at all...and as said, I know the math for them is right because it works when I use gluLookAt().
Help!