So I have the following method to generate rooms using what I assume is a flood fill algorithm:
private void SetupRooms() {
SimpleTimer t = new SimpleTimer();
List<Vector2> tilesToFloodFill = new List<Vector2>();
// first figure out which tiles need to be flood filled
for (int x = 0; x < _worldWidth; x++) {
for (int y = 0; y < _worldHeight; y++) {
if (_hasColliders[x, y]) {
_tiles[x, y].RoomIndex = WorldRoom.NO_ROOM_VALUE;
continue;
}
tilesToFloodFill.Add(new Vector2(x, y));
}
}
t.LogRunningTimeMilliseconds($"room flood fill first pass: ");
t.ResetStart();
Queue<Vector2> tilesToCheckAdjacentFor = new Queue<Vector2>();
int currentRoomIndex = 0;
int tilesProcessed = 0;
while (tilesToFloodFill.Count > 0) {
tilesToCheckAdjacentFor.Enqueue(tilesToFloodFill[0]);
tilesToFloodFill.Remove(tilesToFloodFill[0]);
while (tilesToCheckAdjacentFor.Count > 0) {
tilesProcessed++;
Vector2 tileToCheck = tilesToCheckAdjacentFor.Dequeue();
List<Vector2> adjacentTiles = GetAdjacentTilePositions(tileToCheck);
_tiles[(int) tileToCheck.x, (int) tileToCheck.y].RoomIndex = currentRoomIndex;
for (int i = 0; i < adjacentTiles.Count; i++) {
if (
_tiles[(int) adjacentTiles[i].x, (int) adjacentTiles[i].y].RoomIndex != WorldRoom.UNASSIGNED_ROOM_VALUE
) {
continue;
}
tilesToCheckAdjacentFor.Enqueue(adjacentTiles[i]);
tilesToFloodFill.Remove(adjacentTiles[i]);
_tiles[(int) adjacentTiles[i].x, (int) adjacentTiles[i].y].RoomIndex = currentRoomIndex;
}
}
_worldRooms.Add(new WorldRoom());
// current room should be filled, clean up for next room
currentRoomIndex++;
}
t.LogRunningTimeMilliseconds($"room flood fill (tiles processed: {tilesProcessed}): ");
Debug.Log($"total rooms: {currentRoomIndex}");
}
The general timing for this is:
- room flood fill first pass: : 1.8787ms
- room flood fill (tiles processed: 58463): : 6069.9302ms
While I only expect to have to run this algorithm on the entire map on the initial load of the map (and maybe a few special cases) I am still wondering if there is any way to optimize it to take less than 6 seconds to do it (I would ideally like to have maps 2 - 4 times bigger than the one I am currently testing on so I imagine it would be at 2 - 4 times slower on those maps).
Are there any optimization I can make here (including using a different algorithm) to optimize this process?