2D Texture Mapped Polygons in XNA

Started by
6 comments, last by Starnick 12 years, 11 months ago
working_polygon.png

I'm trying to build a framework off of XNA that lets me render 2D texture mapped polygons. SpriteBatch is very fast and optimized but it only lets me draw quads. I have tried using Graphics Device to render the polygons, and then after, render all the quads, but it is giving me anomalies. The first picture above shows a really simple scene with a grass texture mapped onto a polygon. Everything renders perfectly. However, if I throw a SpriteBatch Begin and End call sequence before or after drawing the polygon I get results like this. Note: the lines are being drawn by spritebatch as they should, but the texture coordinates seem to be forced into quads.

not_working.png

Here's the early and unfinished code I've written so far.


public class TexturedPolygon : gxtIDraw, IDisposable
{
private bool visible;
public bool Visible { get { return visible; } set { visible = value; } }

private float depth;
public float Depth { get { return depth; } set { depth = value; } }

private Color colorOverlay;
public Color ColorOverlay { get { return colorOverlay; } set { SetColor(value); } }

private gxtPolygon localPolygon;

private gxtPolygon worldPolygon;
public gxtPolygon Polygon { get { return worldPolygon; } set { worldPolygon = value; } }

private float rotation;
public float Rotation { get { return rotation; } set { SetRotation(value); } }

private Vector2 position;
public Vector2 Position { get { return position; } set { SetPosition(value); } }

private IndexBuffer indexBuffer;
private VertexBuffer vertexBuffer;
BasicEffect effect;

private Texture2D texture;
public Texture2D Texture { get { return texture; } set { texture = value; effect.Texture = value; } }

public bool TextureEnabled { get { return effect.TextureEnabled; } set { effect.TextureEnabled = value; } }

int[] indicesArray;
VertexPositionColorTexture[] verts;

public TexturedPolygon(gxtPolygon polygon, GraphicsDevice graphics)
{
visible = true;
depth = 0.0f;
colorOverlay = Color.White;
rotation = 0.0f;
position = Vector2.Zero;

indexBuffer = new IndexBuffer(graphics, typeof(int), 3 + ((polygon.NumVertices - 3) * 3), BufferUsage.WriteOnly);
vertexBuffer = new VertexBuffer(graphics, typeof(VertexPositionColorTexture), polygon.NumVertices, BufferUsage.WriteOnly);
effect = new BasicEffect(graphics);
effect.LightingEnabled = false;
effect.VertexColorEnabled = false; // important
effect.View = Matrix.CreateLookAt(new Vector3(0, 0, -1.0f), new Vector3(0.0f, 0.0f, 0.0f), -Vector3.Up);

effect.Projection = Matrix.CreateOrthographic(800.0f, 600.0f, 0.1f, 1.5f);
SetupPolygon(polygon);
Triangulate(polygon.v);
SetPosition(position);
SetRotation(rotation);
}

private void SetupPolygon(gxtPolygon polygon)
{
worldPolygon = polygon;

// calc local poly and AABB using centroid translation
Vector2 centroid = polygon.GetCentroid();
Vector2[] localVertices = new Vector2[polygon.NumVertices];
for (int i = 0; i < localVertices.Length; i++)
{
localVertices = polygon.v - centroid;
}
localPolygon = new gxtPolygon(localVertices);
//localAABB = gxtGeometry.ComputeAABB(localVertices);

// applies existing position and rotation characteristics to the
// world polygon
Matrix rotationMatrix;
Matrix.CreateRotationZ(rotation, out rotationMatrix);
for (int i = 0; i < polygon.NumVertices; i++)
{
worldPolygon.v = Vector2.Transform(localPolygon.v, rotationMatrix) + position;
}

effect.World = Matrix.CreateWorld(new Vector3(position.X, position.Y, 0.0f), -Vector3.UnitZ, Vector3.Up);
}

private void Triangulate(Vector2[] vertices)
{
// setup vertex buffer
verts = new VertexPositionColorTexture[vertices.Length];
for (int i = 0; i < vertices.Length; i++)
{
verts = new VertexPositionColorTexture(new Vector3(vertices.X, vertices.Y, 0.0f), colorOverlay, new Vector2(0.0f, 0.0f)); // coords will be figured out later
}
vertexBuffer.SetData<VertexPositionColorTexture>(verts);

// setup index buffer, uses proper triangulation
List<int> indices = new List<int>(3 + ((vertices.Length - 3) * 3));
for (int i = 1, j = 2; j < vertices.Length; i = j, j++)
{
indices.Add(0);
indices.Add(i);
indices.Add(j);
}
indicesArray = indices.ToArray();
indexBuffer.SetData<int>(indicesArray);
}

public void SetRotation(float rad)
{
if (rotation == rad) return;

rotation = rad;
Matrix rotMat;
Matrix.CreateRotationZ(rotation, out rotMat);
for (int i = 0; i < worldPolygon.NumVertices; i++)
{
worldPolygon.v = Vector2.Transform(localPolygon.v, rotMat) + position;
}
}

public void Translate(Vector2 t)
{
if (t == Vector2.Zero) return;

position += t;
worldPolygon.Translate(t);
// effect.world must be the centroid
effect.World = Matrix.CreateWorld(new Vector3(position.X, position.Y, 0.0f), -Vector3.UnitZ, Vector3.Up);
}

public void SetColor(Color color)
{
if (color == colorOverlay)
return;
colorOverlay = color;
for (int i = 0; i < verts.Length; i++)
{
verts.Color = colorOverlay;
}
vertexBuffer.SetData<VertexPositionColorTexture>(verts);
}

public void SetPosition(Vector2 pos)
{
if (position == pos) return;

Vector2 prevPos = position;
Vector2 trans = pos - prevPos;
Translate(trans);
}

public void CalculateUVCoords()
{
Vector2 topLeft = new Vector2(-texture.Width * 0.5f, -texture.Height * 0.5f);
Vector2 oneOverSizeVector = new Vector2(1.0f / texture.Width, 1.0f / texture.Height);
for (int i = 0; i < verts.Length; i++)
{
verts.TextureCoordinate = Vector2.Multiply(localPolygon.v - topLeft, oneOverSizeVector);
}
vertexBuffer.SetData<VertexPositionColorTexture>(verts);
}

public void Draw(GraphicsDevice graphics)
{
graphics.RasterizerState = RasterizerState.CullNone;
graphics.SetVertexBuffer(vertexBuffer);
graphics.Indices = indexBuffer;

foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
graphics.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, vertexBuffer.VertexCount, 0, indicesArray.Length / 3);
}
}

public void Dispose()
{
texture.Dispose();
effect.Dispose();
vertexBuffer.Dispose();
indexBuffer.Dispose();
}


So, my major questions are as follows:
1) Can I use graphics device and spritebatch together?
2) How might I fix this problem if I can? Am I missing a clear or reset call or something?
3) I like to use front-to-back depth based rendering, as is enabled in spritebatch. How might I accomplish this so render depths are correct for everything?
<shameless blog plug>
A Floating Point
</shameless blog plug>
Advertisement
OK, after more hacking away at it, I find out the issue with the Texture Coordinates. I wasn't wrapping or clamping them when doing my calculations, so this was entirely my fault. It now works in conjunction with spritebatch, the only possible exception being with the depth based rendering.
<shameless blog plug>
A Floating Point
</shameless blog plug>

3) I like to use front-to-back depth based rendering, as is enabled in spritebatch. How might I accomplish this so render depths are correct for everything?


There are a few ways to do sorting:

  1. Always explicitly render things back to front
  2. If you never use blending (only fully opaque triangles or alpha test) you can use a Z-buffer for this, assuming you set it up properly.
  3. Sort your objects before drawing them.

There are a variety of ways to accomplish sorting, but the simplest to start out with is that you add everything you are going to draw to a list (e.g. List<TexturedPolygon>) and sort it by depth (e.g. List.Sort() with a custom comparison criteria that compares depths.) There are more advanced things you can get into if you have performance issues, like radix sorts which are O(k n) instead of O(n log n), or if your game only has a small number of discrete depths objects can be drawn at (e.g. background/enemies/player/foreground) you can keep separate lists for each, which means you don't have to sort them at all, just draw the lists in the correct order. However, in my experience 2D games rarely have enough things on screen to make a simple depth sort take up a lot time.

OK, after more hacking away at it, I find out the issue with the Texture Coordinates. I wasn't wrapping or clamping them when doing my calculations, so this was entirely my fault. It now works in conjunction with spritebatch, the only possible exception being with the depth based rendering.

Also, do you mean you modified the texture coordinates you are drawing with now, or you just fixed the texture addressing state you're drawing with? Because you should be able to just modify the appropriate SamplerState.AddressU/AddressV value (or do the equivalent thing in an effect file) so they wrap instead of clamp. You shouldn't have to modify the actual vertex data you're sending to the hardware. Apologies if I misunderstood and you're already doing that ;)
The issue wasn't just your texture coordinates, they would've been fine as long as you set your sampler states to wrap and not clamp. Remember the sprite batch does set render states (and the sampler state for the first texture is set to clamp in U/V.

When you're drawing after using a spritebatch, especially 3D geometry, make sure you reset the renderstates that spritebatch changes if you need them to be different. Shawn Hargreaves posted a helpful list of states that are changed by drawing a spritebatch.
Also SpriteBatch comes with a flag SaveRenderStates (or something like that) that restores the renderstates in the end call back to what they were in the begin call.

However, when you are rendering ANYTHING you must make sure your renderstates are set, default render states cannot be depended on unless you know nothing else in your code changes it, and if someone else is to use your code its very error prone to expect him to know that.

Game making is godlike

LinkedIn profile: http://ar.linkedin.com/pub/andres-ricardo-chamarra/2a/28a/272


Piece of advice, create a PolygonBatch object that you can draw your polygons with, give it a list of polygons and have it draw them all in batch, this way you minimize shader and render state changes
Game making is godlike

LinkedIn profile: http://ar.linkedin.com/pub/andres-ricardo-chamarra/2a/28a/272



Also SpriteBatch comes with a flag SaveRenderStates (or something like that) that restores the renderstates in the end call back to what they were in the begin call.

However, when you are rendering ANYTHING you must make sure your renderstates are set, default render states cannot be depended on unless you know nothing else in your code changes it, and if someone else is to use your code its very error prone to expect him to know that.





That depends on which version of XNA the original poster is using. That enum was removed in XNA 4.0, which given the code appears to be that version.

On the topic of renderstates:

1. Generally a better idea to just set the render states you need, e.g. if you don't care about what BlendState is set, then no need to change it after you draw a spritebatch.

2. As Nexus pointed out, batching where you don't have to change your states that much is a very good thing (this can also go in hand with sorting geometry, e.g. sort by their material so you do draw the geometries that use shader A in order, then shader B, and so on. I think changing shaders and textures are the two prime states to avoid switching every frame, although similar materials probably also have similar render states in most cases so it may be better just to just sort by material.)

Edit: I'm referring to Opaque objects here. You're probably going to not be able to batch as well for transparent/blended objects, which generally are sorted back-to-front regardless of their material.

3. Avoid redundant state setting. I cache state objects when I set them to the device, and each state object has a key associated with the values it represents. So if two objects have two different BlendStates but the states are for all practical measures, equal, the state only gets set onto the device the first time. Although this method can break if you set states in your effect passes in your FX file (I prefer to manage them by the Application).

This topic is closed to new replies.

Advertisement