bool OBB_OBB_Intersection(OBB* A, OBB* B, float& s) { float ra, rb; XMMATRIX R, AbsR; // Compute rotation matrix expressing b in a’s coordinate frame for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) R.r[i].m128_f32[j] = XMVector3Dot(A->GetOrientation().r[i], B->GetOrientation().r[i]).m128_f32[0]; // Compute translation vector t XMVECTOR t = (XMLoadFloat3(&B->GetCenter()) - XMLoadFloat3(&A->GetCenter())); // Bring translation into a’s coordinate frame t = XMVectorSet(XMVector3Dot(t, A->GetOrientation().r[0]).m128_f32[0], XMVector3Dot(t, A->GetOrientation().r[1]).m128_f32[0], XMVector3Dot(t, A->GetOrientation().r[2]).m128_f32[0], 1.0f); // Compute common subexpressions. Add in an epsilon term to // counteract arithmetic errors when two edges are parallel and // their cross product is (near) null const float EPSILON = 1.0e-6f; for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) AbsR.r[i].m128_f32[j] = abs(R.r[i].m128_f32[j]) + EPSILON; // Test axes L = A0, L = A1, L = A2 for (int i = 0; i < 3; i++) { ra = XMLoadFloat3(&A->GetExtents()).m128_f32[i]; rb = B->GetExtents().x * AbsR.r[i].m128_f32[0] + B->GetExtents().y * AbsR.r[i].m128_f32[1] + B->GetExtents().z * AbsR.r[i].m128_f32[2]; if (abs(t.m128_f32[i]) > ra + rb) return false; s = abs(t.m128_f32[i]) - (ra + rb); } // Test axes L = B0, L = B1, L = B2 for (int i = 0; i < 3; i++) { ra = A->GetExtents().x * AbsR.r[0].m128_f32[i] + A->GetExtents().y * AbsR.r[1].m128_f32[i] + A->GetExtents().z * AbsR.r[2].m128_f32[i]; rb = XMLoadFloat3(&B->GetExtents()).m128_f32[i]; if (abs(t.m128_f32[0] * R.r[0].m128_f32[i] + t.m128_f32[1] * R.r[1].m128_f32[i] + t.m128_f32[2] * R.r[2].m128_f32[i]) > ra + rb) return false; s = abs(t.m128_f32[0] * R.r[0].m128_f32[i] + t.m128_f32[1] * R.r[1].m128_f32[i] + t.m128_f32[2] * R.r[2].m128_f32[i]) - (ra + rb); } // Test axis L = A0 x B0 ra = A->GetExtents().y * AbsR.r[2].m128_f32[0] + A->GetExtents().z * AbsR.r[1].m128_f32[0]; rb = B->GetExtents().y * AbsR.r[0].m128_f32[2] + B->GetExtents().z * AbsR.r[0].m128_f32[1]; if (abs(t.m128_f32[2] * R.r[1].m128_f32[0] - t.m128_f32[1] * R.r[2].m128_f32[0]) > ra + rb) return false; s = abs(t.m128_f32[2] * R.r[1].m128_f32[0] - t.m128_f32[1] * R.r[2].m128_f32[0]) - (ra + rb); // Test axis L = A0 x B1 ra = A->GetExtents().y * AbsR.r[2].m128_f32[1] + A->GetExtents().z * AbsR.r[1].m128_f32[1]; rb = B->GetExtents().x * AbsR.r[0].m128_f32[2] + B->GetExtents().z * AbsR.r[0].m128_f32[0]; if (abs(t.m128_f32[2] * R.r[1].m128_f32[1] - t.m128_f32[1] * R.r[2].m128_f32[1]) > ra + rb) return false; s = abs(t.m128_f32[2] * R.r[1].m128_f32[1] - t.m128_f32[1] * R.r[2].m128_f32[1]) - (ra + rb); // Test axis L = A0 x B2 ra = A->GetExtents().y * AbsR.r[2].m128_f32[2] + A->GetExtents().z * AbsR.r[1].m128_f32[2]; rb = B->GetExtents().x * AbsR.r[0].m128_f32[1] + B->GetExtents().y * AbsR.r[0].m128_f32[0]; if (abs(t.m128_f32[2] * R.r[1].m128_f32[2] - t.m128_f32[1] * R.r[2].m128_f32[2]) > ra + rb) return false; s = abs(t.m128_f32[2] * R.r[1].m128_f32[2] - t.m128_f32[1] * R.r[2].m128_f32[2]) - (ra + rb); // Test axis L = A1 x B0 ra = A->GetExtents().x * AbsR.r[2].m128_f32[0] + A->GetExtents().z * AbsR.r[0].m128_f32[0]; rb = B->GetExtents().y * AbsR.r[1].m128_f32[2] + B->GetExtents().z * AbsR.r[1].m128_f32[1]; if (abs(t.m128_f32[0] * R.r[2].m128_f32[0] - t.m128_f32[2] * R.r[0].m128_f32[0]) > ra + rb) return false; s = abs(t.m128_f32[0] * R.r[2].m128_f32[0] - t.m128_f32[2] * R.r[0].m128_f32[0]) - (ra + rb); // Test axis L = A1 x B1 ra = A->GetExtents().x * AbsR.r[2].m128_f32[1] + A->GetExtents().z * AbsR.r[0].m128_f32[1]; rb = B->GetExtents().x * AbsR.r[1].m128_f32[2] + B->GetExtents().z * AbsR.r[1].m128_f32[0]; if (abs(t.m128_f32[0] * R.r[2].m128_f32[1] - t.m128_f32[2] * R.r[0].m128_f32[1]) > ra + rb) return false; s = abs(t.m128_f32[0] * R.r[2].m128_f32[1] - t.m128_f32[2] * R.r[0].m128_f32[1]) - (ra + rb); // Test axis L = A1 x B2 ra = A->GetExtents().x * AbsR.r[2].m128_f32[2] + A->GetExtents().z * AbsR.r[0].m128_f32[2]; rb = B->GetExtents().x * AbsR.r[1].m128_f32[1] + B->GetExtents().y * AbsR.r[1].m128_f32[0]; if (abs(t.m128_f32[0] * R.r[2].m128_f32[2] - t.m128_f32[2] * R.r[0].m128_f32[2]) > ra + rb) return false; s = abs(t.m128_f32[0] * R.r[2].m128_f32[2] - t.m128_f32[2] * R.r[0].m128_f32[2]) - (ra + rb); // Test axis L = A2 x B0 ra = A->GetExtents().x * AbsR.r[1].m128_f32[0] + A->GetExtents().y * AbsR.r[0].m128_f32[0]; rb = B->GetExtents().y * AbsR.r[2].m128_f32[2] + B->GetExtents().z * AbsR.r[2].m128_f32[1]; if (abs(t.m128_f32[1] * R.r[0].m128_f32[0] - t.m128_f32[0] * R.r[1].m128_f32[0]) > ra + rb) return false; s = abs(t.m128_f32[1] * R.r[0].m128_f32[0] - t.m128_f32[0] * R.r[1].m128_f32[0]) - (ra + rb); // Test axis L = A2 x B1 ra = A->GetExtents().x * AbsR.r[1].m128_f32[1] + A->GetExtents().y * AbsR.r[0].m128_f32[1]; rb = B->GetExtents().x * AbsR.r[2].m128_f32[2] + B->GetExtents().z * AbsR.r[2].m128_f32[0]; if (abs(t.m128_f32[1] * R.r[0].m128_f32[1] - t.m128_f32[0] * R.r[1].m128_f32[1]) > ra + rb) return false; s = abs(t.m128_f32[1] * R.r[0].m128_f32[1] - t.m128_f32[0] * R.r[1].m128_f32[1]) - (ra + rb); // Test axis L = A2 x B2 ra = A->GetExtents().x * AbsR.r[1].m128_f32[2] + A->GetExtents().y * AbsR.r[0].m128_f32[2]; rb = B->GetExtents().x * AbsR.r[2].m128_f32[1] + B->GetExtents().y * AbsR.r[2].m128_f32[0]; if (abs(t.m128_f32[1] * R.r[0].m128_f32[2] - t.m128_f32[0] * R.r[1].m128_f32[2]) > ra + rb) return false; s = abs(t.m128_f32[1] * R.r[0].m128_f32[2] - t.m128_f32[0] * R.r[1].m128_f32[2]) - (ra + rb); // Since no separating axis is found, the OBBs must be intersecting return true; } int main() { float sep; OBB objA(XMFLOAT3(0,10,10),XMFLOAT3(.5f,.5f,.5f), XMMatrixRotationY(0)), objB(XMFLOAT3(0,11.8f,10.0f),XMFLOAT3(.5f,.5f,.5f), XMMatrixRotationY(0)); if (OBB_OBB_Intersection(&objA, &objB, sep)) cout << "intersecting is found; a separation" << sep << endl; system("pause"); return 0; }

]]>

I have two curves. One handdrawn and one is a smoothed version of the handdrawn. The data of each curve is stored in 2 seperate vector arrays.

Time Delta is also stored in the handdrawn curve vector, so i can replay the drawing process and so that it looks natural.

Now i need to transfer the Time Delta from Curve 1 (Raw input) to Curve 2 (the already smoothed curve).

Sometimes the size of the first vector is larger and sometimes smaller than the second vector.

(Depends on the input draw speed)

(Depends on the input draw speed)

So my question is: How do i fill the whole vector PenSmoot.time with the correct interpolated values?

**Case 1: Input vector is larger**

PenInput.time[0] = 0 PenSmoot.time[0] = 0 PenInput.time[1] = 5 PenSmoot.time[1] = ? PenInput.time[2] = 12 PenSmoot.time[2] = ? PenInput.time[3] = 2 PenSmoot.time[3] = ? PenInput.time[4] = 50 PenSmoot.time[4] = ? PenInput.time[5] = 100 PenInput.time[6] = 20 PenInput.time[7] = 3 PenInput.time[8] = 9 PenInput.time[9] = 33

PenInput.time[0] = 0 PenSmoot.time[0] = 0 PenInput.time[1] = 5 PenSmoot.time[1] = ? PenInput.time[2] = 12 PenSmoot.time[2] = ? PenInput.time[3] = 2 PenSmoot.time[3] = ? PenInput.time[4] = 50 PenSmoot.time[4] = ? PenSmoot.time[5] = ? PenSmoot.time[6] = ? PenSmoot.time[7] = ? PenSmoot.time[8] = ? PenSmoot.time[9] = ?

]]>

I have several questions but I'll just start with the hopefully easy ones.

The first is in reference to section 3, equation (3) from the paper. Here is that section for reference:

I am focusing on objects composed of affine frames, so 12 DOF per frame.

Question 1: In equation (3), the p bar variable (undeformed position) does not have a subscript on it, which sounds to me like there is only one no matter how many frames there are. However, my understanding of traditional skinning is that there is in fact one "undeformed" position per frame. Should there not be an "i" subscript on p bar? Is this possibly an error in the paper or am I missing something here?

Question 2: With regards to "p bar star" (vector of polynomials), specifically p^1, they have [1, x, y, z]^T. Is the x, y, z here referring to the x,y,z of the undeformed position? Assuming the "A" matrix is a typical 3x4 affine matrix, the translation part usually goes in the last column. If so, why would they have a "1" as the first component of "p bar star"? Shouldn't the 1 be the last component ([x, y, z, 1])?

Thanks,

Chris

]]>The common approach I know of to this is sweeping/raycasting ahead to find the next piece of ground they will hit or should be on.

I could dive in but I'm also aware that there are a number of edge cases and pitfalls to be aware of. I've tried to find some tutorials that look into it but haven't been able to turn up anything at all. Does anyone know a good reference for this?

Cheers

]]>

The easy way to decompose an affine transformation matrix is to:

1) Compute the length of the column (or row), that gives the scale on X, Y and Z.

2) Extract the last column (or row) to have the translation on X, Y and Z.

3) Last part is to normalize the 3x3 and create the quaternion from it.

Is it safe in all cases for an affine transformation matrix?

What about shear/skew?

Thanks! ]]>

Box2d has discrete & continuous collision detection. We first integrate our shapes velocity, and using a 'conservative advancement algorithm, we advance the shapes & find toi.

It then uses gjk & barycentric coordinates to find the closest point on the shapes. If theyre overlapping we use epa or sat (and dont get a contact point? Or run gjk again?)

It then builds an incremental manifold (i probably have this wrong) adding a point each frame.

Then once all collision points have been collected for the frame, they go through the collision solver that takes each collision, & adds an impulse to the shape to bring the relative velocity <= 0. We go through the list of contacts several times (the iterative rigid body solver), to stabilise the simulation.

I'm not sure what it uses for discrete collisions?

What things I don't understand is

1. When do we know we don't need a collision point anymore? When the collision points are moving away?

2. How do we know a contact point is unique with gjk/barycentric coords? Or do we just add them and wait till they get pulled off the list?

3. If the toi is less than one & we want to use the rest up, do we run the whole collision routine again for a max say 4 times, to use it up, and where does this fit into the iterative solver?

I kind of just want to get an overview of how a physics engine fits together and the parts or algorithms we need. Its just for a hobby engine, and with understanding as a priority. Any help would be great, and sorry if the question is a bit over simplified.

]]>

I've found a lot of great articles here and on the web around FPS style games and using the 'cameras' frustum to calculate if something is visible or not.

What my issue is:

Camera is largely irrelevant as i have a math/server side question: given player 1 is at 0,0,0, and player 2 is at 0,1,1 there are no obstacles - given each player has a frustum facing each other - they 'can see' each other (two people looking at each other on a flat surface).

If i introduce obstacles and alter angles - say now P1 can see P2 but P2 cannot see in return - either due to occlusion / blocking by another 3d world object - or for other reasons (angles/etc).

I cannot wrap my head around how i can do these calculations server side with math only - my game has a core mechanic riding on it - but i'm not even sure how to begin researching as most topics seem to worry about culling for the purposes of frame rates etc.

Essentially (and assume this is true): i need server side to know geometries, positions and fields of view; it should then be able to say 'can p1 see p2?', 'can p2 see p1?' - ideally just using the underlying math - something i'm struggling to get when i look at the likes of a unity thread; https://answers.unity.com/questions/8003/how-can-i-know-if-a-gameobject-is-seen-by-a-partic.html

*For the record i'm using nodeJS server side (for now); and front end is JS/canvas - i don't mind too much about implementation; if someone has any idea what math i should research or begin looking for it'd be a massive help*

thank you, fellow gavedevs

]]>My pleasure to be here, My question is a bit embarrassing,

I am a guy with good background in math/physics but I have only knowledge in C programming (and a little bit of C++) and have done writing small programs for console application. I have not done serious things in programming but at least I am not scared of it.

The thing is that I purchased Dr Eberly's Game physics and the book was in perfect condition but there was no cd-rom. I contacted him and he helped me a lot.He provided me with the content of CD_ROM and introduced me to this forum.

But I am still badly stuck. I know nothing about project and sln and these kind of jargons and when I follow the instructions I get either no answer or lots of errors.

I have installed non-commercial version of visual studio 2019 and still no result.

I totally gave up but wanted to give one more try.

First of all is there anyone who struggled like me? What was the first step on running those physics related simulations?

Will you please help me get started in right direction?

Thanks a lot in advance for any and all help.

]]>Assume, after pass 1 we have Color1 for Pixel (1, 1), after pass 2 we have Color2 for Pixel (1,1) - how should Color1 and Color2 be combined to result in an accurate color?

]]>glm::vec4 mouseClip = {ev.cursor.x * 2 / 640.f - 1, 1 - ev.cursor.y * 2 / 480.f, 0, 1}; glm::mat4 inv = glm::inverse(game::camera->projection * game::camera->transform); glm::vec4 p = inv * mouseClip; p.x /= p.w; p.y /= p.w; p.z /= p.w; glm::vec3 direction = glm::normalize(glm::vec3{p});

I can guarantee that mouseClip's x and y is correct (from [-1, -1] to [1, 1]).

glm::vec3 intPos, intNor; if(glm::intersectRaySphere(glm::vec3{game::camera->transform[3]}, direction, glm::vec3{0, 0, 0}, 1, intPos, intNor)) { printf("intersect\n"); }

intersectRaySphere does as it says, here is it's documentation.

Problem is that.. well.. basically everything is off. There is nothing I can accurately describe, it seems as if the camera's transformation is always exaggerated. I know this is all very vague, please ask questions if you have any.

]]>I have a simple (probably) math problem but I'm not very good at math. My question is if there's a ball that initially rises 2 meters per second upwards and the rising slows down one meter on each second, how long it takes for the ball to stop rising?

formula for this would be nice!

thanks!

]]>void Manifold::ApplyImpulse( float dt ) { for(int i = 0; i < contact_count; ++i) { XMVECTOR p = XMVectorSet(contacts[i].x,contacts[i].y,contacts[i].z, 1.0f); XMVECTOR padot = A->GetVelocityAtPoint(p); XMVECTOR pbdot = B->GetVelocityAtPoint(p); XMVECTOR n = normal; XMVECTOR ra = (p - A->GetPosition()); XMVECTOR rb = (p - B->GetPosition()); float term1 = A->GetMass()>0.0f ? (1.0f / A->GetMass()) : 0.0f; float term2 = B->GetMass()>0.0f ? (1.0f / B->GetMass()) : 0.0f; float invdt = dt>0.0f ? (1/dt) : 0.0f; // Add constraint error to the velocity constraint. float C = min(0.0f, penetration + 0.015f); float velocityBias = 0.0f; velocityBias += -invdt * C *.1f; // Compute normal mass. XMVECTOR rnA = XMVector3Cross(ra, n); XMVECTOR rnB = XMVector3Cross(rb, n); float K = term1 + term2 + XMVector3Dot(rnA, XMVector4Transform(rnA, A->GetIInverse())).m128_f32[0] + XMVector3Dot(rnB, XMVector4Transform(rnB, B->GetIInverse())).m128_f32[0]; float invK = K > 0.0f ? 1.0f / K : 0.0f; XMVECTOR dv = padot - pbdot; float Cdot = XMVector3Dot(dv, n).m128_f32[0]; //if (Cdot < -1.0f) { velocityBias += -e * Cdot; } float impulse = -invK * (Cdot - velocityBias); XMVECTOR P = 1.17f*impulse * n; A->AddImpulse(P); B->AddImpulse(-P); A->AddImpulsiveTorque(XMVector3Cross(ra, P)); B->AddImpulsiveTorque(-XMVector3Cross(rb, P)); /* tangent[0] = XMVector3Orthogonal(n); tangent[1] = XMVector3Cross(tangent[0], n);; // Add friction constraints. { padot = A->GetVelocityAtPoint(p); pbdot = B->GetVelocityAtPoint(p); ra = (p - A->GetPosition()); rb = (p - B->GetPosition()); XMVECTOR rt1A = XMVector3Cross(ra, tangent[0]); XMVECTOR rt1B = XMVector3Cross(rb, tangent[0]); XMVECTOR rt2A = XMVector3Cross(ra, tangent[1]); XMVECTOR rt2B = XMVector3Cross(rb, tangent[1]); float K1 = term1 + term2 + XMVector3Dot(rt1A, XMVector4Transform(rt1A, A->GetIInverse())).m128_f32[0] + XMVector3Dot(rt1B, XMVector4Transform(rt1B, B->GetIInverse())).m128_f32[0]; float K2 = term1 + term2 + XMVector3Dot(rt2A, XMVector4Transform(rt2A, A->GetIInverse())).m128_f32[0] + XMVector3Dot(rt2B, XMVector4Transform(rt2B, B->GetIInverse())).m128_f32[0]; tangentMasses[0] = K1 > 0.0f ? 1.0f / K1 : 0.0f; tangentMasses[1] = K2 > 0.0f ? 1.0f / K2 : 0.0f; } // Solve tangent constraints. for (int j = 0; j < 2; ++j) { float hi = sf * impulse; float lo = -hi; dv = padot - pbdot; Cdot = XMVector3Dot(tangent[j], dv).m128_f32[0]; float timpulse = -tangentMasses[j] * Cdot; P = timpulse * tangent[j]; A->AddImpulse(P); B->AddImpulse(-P); A->AddImpulsiveTorque(XMVector3Cross(ra, P)); B->AddImpulsiveTorque(-XMVector3Cross(rb, P)); }*/ } }

I had to multiply impulse by 1.17f to get bounciness. why?

I have attached two videos. with and without the tangent code (^^^^^ the code I have commented out)

with the tangent code all objects start to slide. why?

1st video >> with full apply impulse code

2nd video >> w/o tangent code

]]>Box2d use indicent edge, reference edge and clip to determine a contact point. It's very complex method and hard to understand.

However, matter.js use a very simply method to do the same thing. just find the vertex that contained by opposite polygon with hill-climbing:

var verticesB = SAT._findSupports(bodyA, bodyB, collision.normal), supports = []; // find the supports from bodyB that are inside bodyA if (Vertices.contains(bodyA.vertices, verticesB[0])) supports.push(verticesB[0]); if (Vertices.contains(bodyA.vertices, verticesB[1])) supports.push(verticesB[1]); // find the supports from bodyA that are inside bodyB if (supports.length < 2) { var verticesA = SAT._findSupports(bodyB, bodyA, Vector.neg(collision.normal)); if (Vertices.contains(bodyB.vertices, verticesA[0])) supports.push(verticesA[0]); if (supports.length < 2 && Vertices.contains(bodyB.vertices, verticesA[1])) supports.push(verticesA[1]); }

and it works well in demo.

My question is: why does box2d use such complex method? It's clear that matter.js's method is better. Or there are some potential shortcomings in matter.js's method?

]]>

In the example below:

**Screen 1** shows a non-rotated typical "top down 2D" scene, where the grey objects can be sorted based on their screen Y position, drawn from top to bottom, so in this case B, A, C.

**Screen 2** shows the situation after the player/camera rotates clockwise, with the original screen up vector (green) and the new up vector (red). In this situation, sorting by object Y position will not work, because A should now be drawn after C.

What I need to do is to sort the objects not only by their Y position, but also relative to the new up vector and/or camera rotation, but I'm unsure how to handle this. Appreciate any help you can give!

]]>

by floating i do not mean water floating but hover/air floating where the object is bouncing up and down like say a floating/flying island.

Whats the per frame computation of it?

thank you in advance

]]>In the second image, the program needs to determine to project line A onto the viewing line first. This allows B, when projected, to cover up the part of A that doesn't need to be seen. I cannot think of a way to determine which lines to project first, because for every idea I think of, I can come up with an example that it will not work with. For example, if we just do some sort of distance formula to let's say, the midpoint of every line, that will not work for something like in the third picture. In this case, B should be projected first so A can cover up the part of B.

Whatever the solution is, it must also work for any amount of lines in front of the player, so I just need the order in which to display the lines. Thank you.

]]>

So it feels actually 3d ?

For example if this is our ball texture:

Do i have to add actually 3 d models( sphere ) or is there another easy way

]]>

And i've seen people mention they use a LOD system so there is less performance wasted on distance meshes from the camera. But from the algorithm - this makes no sense.

The FFT equation requires a length (mesh vertex count along x and y) and resolution, if you change either one you get totally different result for the same input seed at any given time. So how exactly is that an optimisation or at all beneficial - it makes no sense.....

I can't seem to find many people who understand the math to answer this question which is such a frustration to say the least.

]]>