Sign in to follow this  
dmtuan

Are float4x4 arrays supported in Shader Model 2 (D3D9, vs_4_0_level_9_3)?

Recommended Posts

Hi,

 

I have this Vertex Shader...

 

Common.hlsl:

// Constant buffer to be updated by application per object
cbuffer PerObject : register(b0)
{
    // WorldViewProjection matrix
    float4x4 WorldViewProjection;
    
    // We need the world matrix so that we can
    // calculate the lighting in world space
    float4x4 World;
    
    // Inverse transpose of world, used for
    // bringing normals into world space, especially
    // necessary where non-uniform scaling has been applied
    float4x4 WorldInverseTranspose;
};

// A simple directional light (e.g. the sun)
struct DirectionalLight
{
    float4 Color;
    float3 Direction;
};

// Constant buffer - updated once per frame
// Note: HLSL data is packed in such a
// way that it does not cross a 16-byte boundary
cbuffer PerFrame: register (b1)
{
    DirectionalLight Light;
    float3 CameraPosition;
};

// Constant buffer to hold our material configuration
// Note: HLSL data is packed in such a
// way that it does not cross a 16-bytes boundary
cbuffer PerMaterial : register (b2)
{
    float4 MaterialAmbient;
    float4 MaterialDiffuse;
    float4 MaterialSpecular;
    float MaterialSpecularPower;
    bool HasTexture;
    float4 MaterialEmissive;
    float4x4 UVTransform;
};

// Constant buffer to hold our skin matrices for each bone.
// Note: 1024*64 = maximum bytes for a constant buffer in SM5
cbuffer PerArmature : register(b3)
{
    float4x4 Bones[50];
};

// Vertex Shader input structure (from Application)
struct VertexShaderInput
{
    float4 Position : SV_Position;// Position - xyzw
    float3 Normal : NORMAL;    // Normal - for lighting and mapping operations
    float4 Color : COLOR0;     // Color - vertex color, used to generate a diffuse color
    float2 TextureUV: TEXCOORD0; // UV - texture coordinate
    uint4 SkinIndices : BLENDINDICES0; // blend indices
    float4 SkinWeights : BLENDWEIGHT0; // blend weights
};

// Pixel Shader input structure (from Vertex Shader)
struct PixelShaderInput
{
    float4 Position : SV_Position;
    // Interpolation of combined vertex and material diffuse
    float4 Diffuse : COLOR;
    // Interpolation of vertex UV texture coordinate
    float2 TextureUV: TEXCOORD0;

    // We need the World Position and normal for light calculations
    float3 WorldNormal : NORMAL;
    float3 WorldPosition : WORLDPOS;
};

float3 Lambert(float4 pixelDiffuse, float3 normal, float3 toLight)
{
    // Calculate diffuse color (using Lambert's Cosine Law - dot product of 
    // light and normal) Saturate to clamp the value within 0 to 1.
    float3 diffuseAmount = saturate(dot(normal, toLight));
    return pixelDiffuse.rgb * diffuseAmount;
}

float3 SpecularPhong(float3 normal, float3 toLight, float3 toEye)
{
    // R = reflect(i,n) => R = i - 2 * n * dot(i,n)
    float3 reflection = reflect(-toLight, normal);

    // Calculate the specular amount (smaller specular power = larger specular highlight)
    // Cannot allow a power of 0 otherwise the model will appear black and white
    float specularAmount = pow(saturate(dot(reflection,toEye)), max(MaterialSpecularPower,0.00001f));
    return MaterialSpecular.rgb * specularAmount;
}

float3 SpecularBlinnPhong(float3 normal, float3 toLight, float3 toEye)
{
    // Calculate the half vector
    float3 halfway = normalize(toLight + toEye);

    // Saturate is used to prevent backface light reflection
    // Calculate specular (smaller specular power = larger specular highlight)
    float specularAmount = pow(saturate(dot(normal, halfway)), max(MaterialSpecularPower,0.00001f));
    return MaterialSpecular.rgb * specularAmount;
}

VS.hlsl:

#include "Common.hlsl"

void SkinVertex(float4 weights, uint4 bones, inout float4 position, inout float3 normal)
{
    // If there are skin weights apply vertex skinning
    if (weights.x != 0)
    {
        // Calculate the skin transform from up to four bones and weights
        float4x4 skinTransform = Bones[bones.x] * weights.x +
            Bones[bones.y] * weights.y +
            Bones[bones.z] * weights.z +
            Bones[bones.w] * weights.w;
   
        // Apply skinning to vertex and normal
        position = mul(position, skinTransform);
        
        // We assume here that the skin transform includes only uniform scaling (if any)
        normal = mul(normal, (float3x3)skinTransform);
    }
}

// Vertex shader main function
PixelShaderInput VSMain(VertexShaderInput vertex)
{
    PixelShaderInput result = (PixelShaderInput)0;

    // Apply vertex skinning if any
    SkinVertex(vertex.SkinWeights, (uint4)vertex.SkinIndices, vertex.Position, vertex.Normal);

    result.Position = mul(vertex.Position, WorldViewProjection);
    result.Diffuse = vertex.Color * MaterialDiffuse;
    // Apply material UV transformation
    result.TextureUV = mul(float4(vertex.TextureUV.x, vertex.TextureUV.y, 0, 1), (float4x2)UVTransform).xy;

    // We use the inverse transpose of the world so that if there is non uniform
    // scaling the normal is transformed correctly. We also use a 3x3 so that 
    // the normal is not affected by translation (i.e. a vector has the same direction
    // and magnitude regardless of translation)
    result.WorldNormal = mul(vertex.Normal, (float3x3)WorldInverseTranspose);
    
    result.WorldPosition = mul(vertex.Position, World).xyz;
    
    return result;
}

Can anyone please tell me, if Shader Model 2, namely Shader Profile vs_4_0_level_9_3 does support arrays in constant buffers? I am speaking about the line 53 in Common.hlsl. I have declared a float4x4 Bones[60] array to store bones for character skinning. But when I run the renderer on the Windows Phone 8.1, the character is not skinning right (it is not animating, only staying in TPose and moving in the space). After days of debugging I've find out, that when I load bones into PerArmature cbuffer, only 1 bone gets in there. That is what is causing the weird movement and not actual skinning.

 

So can anybody confirm, that you cannot store a float4x4 array in a cbuffer (Shader Model 2, D3D9)?... or explain to me what I might be doing wrong, that always only 1 bone loads into PerArmature cbuffer?

 

Thank you in advance.

Share this post


Link to post
Share on other sites

float4x4 for bone matricies is not apropriate, since a bone matrix is 3x4 matrix. I would strongly advice you to reform your code to send your matricies into a float4 matriciesrows[60*3] array if you want to conform to older compiler versions smoothly and not outperform them. This way, if you send rows, you can achive 3x4 optimization - constructing float3x4 or float4x4 objects in vertex shader from the rows is just trivial fast operation - or you may use the rows for transforming right away as well. And also this way you will use more native and stable uniform setters on cpu device, since setting array of float4's is what every gpu is ready for the most.

Share this post


Link to post
Share on other sites

float4x4 for bone matricies is not apropriate, since a bone matrix is 3x4 matrix. I would strongly advice you to reform your code to send your matricies into a float4 matriciesrows[60*3] array if you want to conform to older compiler versions smoothly and not outperform them. This way, if you send rows, you can achive 3x4 optimization - constructing float3x4 or float4x4 objects in vertex shader from the rows is just trivial fast operation - or you may use the rows for transforming right away as well. And also this way you will use more native and stable uniform setters on cpu device, since setting array of float4's is what every gpu is ready for the most.

 

Thank you for your answer. Do u think that whas the reason for the problem, that I was experiencing? I still do not know if using float4x4 array is supported in DirectX Feature Level 9_3 (Shader Model 2). You're saying that float4 array should be fine then?

Share this post


Link to post
Share on other sites

Is there a way to check the content of the constant buffer in the Graphics Debugger (Visual Studio 2013)? I rewrote the code so the PerArmature cbuffer stores float4 matricesRows[numberOfBones * 3] now. In the shader code I am assembling the skinTransform matrix from the 3 rows and then do the skinning. 

 

This still did not solve the problem. The character is still skinning as if there were only 1 bone loaded in the cbuffer. I need to check what is really loaded in the constant buffer. Is there a way to check it?

Share this post


Link to post
Share on other sites

I gess then - as if the character is influenced only by one bone - that your uniformal matrcies and their setting from cpu is rather correct, you should check your bone indicies and weights in vertex attributes, their declarations for the shader and so on.

Share this post


Link to post
Share on other sites

I gess then - as if the character is influenced only by one bone - that your uniformal matrcies and their setting from cpu is rather correct, you should check your bone indicies and weights in vertex attributes, their declarations for the shader and so on.

 

A weird thing is, that if I run in the Emulator, it works fine. The character is animating just as it should. But when I try to run the exact same code in the real device, it is doing this weird thing. Anyway I will recheck the indicies and weights in the vertex attributes.

 

Could u please look over these for me? This is how I have it atm:

 

In shader:

// Vertex Shader input structure (from Application)
struct VertexShaderInput
{
    float4 Position : SV_Position;// Position - xyzw
    float3 Normal : NORMAL;    // Normal - for lighting and mapping operations
    float4 Color : COLOR0;     // Color - vertex color, used to generate a diffuse color
    float2 TextureUV: TEXCOORD0; // UV - texture coordinate
    uint4 SkinIndices : BLENDINDICES0; // blend indices
    float4 SkinWeights : BLENDWEIGHT0; // blend weights
};

This is in the engine:

[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct Vertex
{
    public Vector3 Position;
    public Vector3 Normal;
    public Color Color;
    public Vector2 UV;
    public Common.Mesh.SkinningVertex Skin;
}

...

[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct SkinningVertex
{
    public uint BoneIndex0;
    public uint BoneIndex1;
    public uint BoneIndex2;
    public uint BoneIndex3;
    public float BoneWeight0;
    public float BoneWeight1;
    public float BoneWeight2;
    public float BoneWeight3;
}

Share this post


Link to post
Share on other sites



[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct SkinningVertex
{
public uint BoneIndex0;
public uint BoneIndex1;
public uint BoneIndex2;
public uint BoneIndex3;
public float BoneWeight0;
public float BoneWeight1;
public float BoneWeight2;
public float BoneWeight3;
}

I find this strange, if it is really the gpu vertex buffer memory explanation in this structure, then uint for a index and float for a weight is incredible waste, since standard is 4 bytes for  4 indicies, and 4 bytes for 4 weights. Then you usualy put vertex declaration to vertex shader as float4 vector for the four weights, and float4 vector for the four indicies, and in vertex function you convert the indicies floating vecor member to int type before using it as the index to uniform array, such as:

 

struct VertexShaderInput
{
    float4 Position : SV_Position;// Position - xyzw
    float3 Normal : NORMAL;    // Normal - for lighting and mapping operations
    float4 Color : COLOR0;     // Color - vertex color, used to generate a diffuse color
    float2 TextureUV: TEXCOORD0; // UV - texture coordinate
    float4 SkinIndices : BLENDINDICES0; // blend indices
    float4 SkinWeights : BLENDWEIGHT0; // blend weights
};

 

...

int4 indx=(int4)SkinIndices; // or check proper float to int conversion in HLSL

u_matricies[indx.x] // to refer with it

 

with weights you must usualy perform   

float influence=SkinWeights.x*(1/255)  ;// since weights tend to be unnormalized 0-255 byte values

Share this post


Link to post
Share on other sites

all and all, you should post your vertex buffer declaration you establish on cpu side. At lower gpu devices, only floating values are actual types that enter the vertex function, wheather they are of any declaration in the real vertex buffer memory, wheather 16 bit integers, or 8bit integers or whatever, cpu side declaration just explains how to interpret the number value to establish actual floating value to the shader.

Share this post


Link to post
Share on other sites


I find this strange, if it is really the gpu vertex buffer memory explanation in this structure, then uint for a index and float for a weight is incredible waste, since standard is 4 bytes for  4 indicies, and 4 bytes for 4 weights. Then you usualy put vertex declaration to vertex shader as float4 vector for the four weights, and float4 vector for the four indicies, and in vertex function you convert the indicies floating vecor member to int type before using it as the index to uniform array, such as:
 
struct VertexShaderInput
{
    float4 Position : SV_Position;// Position - xyzw
    float3 Normal : NORMAL;    // Normal - for lighting and mapping operations
    float4 Color : COLOR0;     // Color - vertex color, used to generate a diffuse color
    float2 TextureUV: TEXCOORD0; // UV - texture coordinate
    float4 SkinIndices : BLENDINDICES0; // blend indices
    float4 SkinWeights : BLENDWEIGHT0; // blend weights
};
 
...
int4 indx=(int4)SkinIndices; // or check proper float to int conversion in HLSL
u_matricies[indx.x] // to refer with it
 
with weights you must usualy perform   
float influence=SkinWeights.x*(1/255)  ;// since weights tend to be unnormalized 0-255 byte values

 

I have it declared as I wrote in my previous post - the wasteful way as you said, but I use it in a shader as a float4 for weights and uint4 for indices, if you look at my very 1st post. I have the VSMain method there. Although, I will do the changes according to your advices.

Share this post


Link to post
Share on other sites


all and all, you should post your vertex buffer declaration you establish on cpu side. At lower gpu devices, only floating values are actual types that enter the vertex function, wheather they are of any declaration in the real vertex buffer memory, wheather 16 bit integers, or 8bit integers or whatever, cpu side declaration just explains how to interpret the number value to establish actual floating value to the shader.

 

Im sorry, I'm not 100% sure what you mean. You want me to do what?

Share this post


Link to post
Share on other sites

you have posted only definition of two structures, and a vertex layout in vertex shader. Actual interpretation of vertex buffer memory towards a shader are multiple untrivial dx calls.

 

I see, here is how I declare vertex buffer:

// Initialize vertex buffers
            for (int indx = 0; indx < mesh.VertexBuffers.Count; indx++)
            {
                var vb = mesh.VertexBuffers[indx];
                Vertex[] vertices = new Vertex[vb.Length];
                for (var i = 0; i < vb.Length; i++)
                {
                    // Retrieve skinning information for vertex
                    Common.Mesh.SkinningVertex skin = new Common.Mesh.SkinningVertex();
                    if (mesh.SkinningVertexBuffers.Count > 0)
                        skin = mesh.SkinningVertexBuffers[indx][i];

                    // Create vertex
                    vertices[i] = new Vertex(vb[i].Position, vb[i].Normal, vb[i].Color, vb[i].UV, skin);
                }

                vertexBuffers.Add(ToDispose(Buffer.Create(device, BindFlags.VertexBuffer, vertices.ToArray())));
                vertexBuffers[vertexBuffers.Count - 1].DebugName = "VertexBuffer_" + indx.ToString();
            }

As you can see here, I have List<Buffer> vertexBuffers (every submesh of a mesh gets its own vertex buffer). The line 88 is where I create a vertex buffer on the CPU side. In the for-cycle I load the info about 1 vertex. 

 

If you wanna see the InputLayout for the vertex shader, here it is:

vertexLayout = ToDispose(new InputLayout(device,
                   bytecode.GetPart(ShaderBytecodePart.InputSignatureBlob).Data,
                new[]
                {
                    // "SV_Position" = vertex coordinate in object space
                    new SharpDX.Direct3D11.InputElement("SV_Position", 0, Format.R32G32B32_Float, 0, 0),
                    // "NORMAL" = the vertex normal
                    new SharpDX.Direct3D11.InputElement("NORMAL", 0, Format.R32G32B32_Float, 12, 0),
                    // "COLOR"
                    new SharpDX.Direct3D11.InputElement("COLOR", 0, Format.R8G8B8A8_UNorm, 24, 0),
                    // "UV"
                    new SharpDX.Direct3D11.InputElement("TEXCOORD", 0, Format.R32G32_Float, 28, 0),
                    // "BLENDINDICES"
                    // NOTE: commented line is for WinRT client, we must use Format.R32G32B32A32_Float (supported in 9_3)
                    //new InputElement("BLENDINDICES", 0, Format.R32G32B32A32_UInt, 36, 0), 
                    new SharpDX.Direct3D11.InputElement("BLENDINDICES", 0, Format.R32G32B32A32_Float, 36, 0),
                    // "BLENDWEIGHT"
                    new SharpDX.Direct3D11.InputElement("BLENDWEIGHT", 0, Format.R32G32B32A32_Float, 52, 0),
                }));

Share this post


Link to post
Share on other sites

Seems legit, but my gess is that the very vertex buffer data, you bound to render on, actualy does have  4byte indicies and verticies in itself, so all 4 values get swallowed into first component, somehow being 1.0 value in shader. Any sane exporter would export like that anyway, so you seem to have correct art asset, but incorrect declaration in those calls, change them to as I've adviced, 4 bytes/4bytes and try out

 


new SharpDX.Direct3D11.InputElement("BLENDINDICES", 0, Format.R32G32B32A32_Float, 36, 0),
// "BLENDWEIGHT"
new SharpDX.Direct3D11.InputElement("BLENDWEIGHT", 0, Format.R32G32B32A32_Float, 52, 0),

Share this post


Link to post
Share on other sites
This:
...
public uint BoneIndex0;
public uint BoneIndex1;
public uint BoneIndex2;
public uint BoneIndex3;
...
is not
[tt]new SharpDX.Direct3D11.InputElement("BLENDINDICES", 0, Format.R32G32B32A32_Float, 36, 0),[/tt]

Share this post


Link to post
Share on other sites

This:

...
public uint BoneIndex0;
public uint BoneIndex1;
public uint BoneIndex2;
public uint BoneIndex3;
...
is not
[tt]new SharpDX.Direct3D11.InputElement("BLENDINDICES", 0, Format.R32G32B32A32_Float, 36, 0),[/tt]

 

true as well!

Share this post


Link to post
Share on other sites

Seems legit, but my gess is that the very vertex buffer data, you bound to render on, actualy does have  4byte indicies and verticies in itself, so all 4 values get swallowed into first component, somehow being 1.0 value in shader. Any sane exporter would export like that anyway, so you seem to have correct art asset, but incorrect declaration in those calls, change them to as I've adviced, 4 bytes/4bytes and try out

 

OH MY GOD IT WORKS!!! Finally after 2 weeks I will be able to sleep... You were right about the weird struct:

[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct SkinningVertex
{
    public uint BoneIndex0;
    public uint BoneIndex1;
    public uint BoneIndex2;
    public uint BoneIndex3;
    public float BoneWeight0;
    public float BoneWeight1;
    public float BoneWeight2;
    public float BoneWeight3;
}

This is what was causing the problem. The Indices and Weights weren't right. It was ok for Windows Part of the Universal, but for Windows Phone Part not. I can finally run the engine on Windows Phone device - with proper animating and skinning. Thank you so much. I will never be able to repay you. My live depends on this project and you solved this mystery, that no one else could. So again - thank you.

 

I removed this and replaced this with 2 Vector4... Vector4 Indices, Vector4 Weights and it's working.

Edited by dmtuan

Share this post


Link to post
Share on other sites

float4x4 for bone matricies is not apropriate, since a bone matrix is 3x4 matrix. I would strongly advice you to reform your code to send your matricies into a float4 matriciesrows[60*3] array if you want to conform to older compiler versions smoothly and not outperform them. This way, if you send rows, you can achive 3x4 optimization - constructing float3x4 or float4x4 objects in vertex shader from the rows is just trivial fast operation - or you may use the rows for transforming right away as well. And also this way you will use more native and stable uniform setters on cpu device, since setting array of float4's is what every gpu is ready for the most.

 

Can I please have 1 more question? I tried to implement this as you said - passing float4 matricesrows[bonesCount * 3] array instead of float4x4 bones[bonesCount]. I made the adjusment to the code and the matrices rows are loaded in the buffer fine. The problem is that SharpDX is row-major and HLSL is column-major.

 

So if I do this in the shader code:

bonesX = { MatricesRows[indices.x * 3], MatricesRows[indices.x * 3 + 1], MatricesRows[indices.x * 3 + 2], { 0, 0, 0, 1 } };

... the rows actually become columns. Of course I could just do this:

float4x4 bonesX = { { MatricesRows[indices.x * 3].x, MatricesRows[indices.x * 3 + 1].x, MatricesRows[indices.x * 3 + 2].x, 0 }, 
{ MatricesRows[indices.x * 3].y, MatricesRows[indices.x * 3 + 1].y, MatricesRows[indices.x * 3 + 2].y, 0 },
{ MatricesRows[indices.x * 3].z, MatricesRows[indices.x * 3 + 1].z, MatricesRows[indices.x * 3 + 2].z, 0 },
{ MatricesRows[indices.x * 3].w, MatricesRows[indices.x * 3 + 1].w, MatricesRows[indices.x * 3 + 2].w, 1 } };

... but there are too many instructions, and since we're in D3D9, we are limited to 256 instructions. So this won't work. Then I tried:

row_major float4x4 bonesX = { MatricesRows[indices.x * 3], MatricesRows[indices.x * 3 + 1], MatricesRows[indices.x * 3 + 2], { 0, 0, 0, 1 } };

I added row_major for every float4x4 I use in the shader, but still the result was the same as if there was no "row_major". It seems when I call position = mul(position, skinTransform); the multiplication still is like the skinTransform had no row_major.

 

Is there a short way (short in instruction counts) to compose a bone transform matrix, when I have it's original 3 rows? Or is there anything else I can do?

 

 

---

 

Oh wait... how stupid of me. There is transpose(matrix) fucntion. Sorry ^_^

Edited by dmtuan

Share this post


Link to post
Share on other sites

 


float4x4 for bone matricies is not apropriate, since a bone matrix is 3x4 matrix. I would strongly advice you to reform your code to send your matricies into a float4 matriciesrows[60*3] array if you want to conform to older compiler versions smoothly and not outperform them. This way, if you send rows, you can achive 3x4 optimization - constructing float3x4 or float4x4 objects in vertex shader from the rows is just trivial fast operation - or you may use the rows for transforming right away as well. And also this way you will use more native and stable uniform setters on cpu device, since setting array of float4's is what every gpu is ready for the most.

 

Can I please have 1 more question? I tried to implement this as you said - passing float4 matricesrows[bonesCount * 3] array instead of float4x4 bones[bonesCount]. I made the adjusment to the code and the matrices rows are loaded in the buffer fine. The problem is that SharpDX is row-major and HLSL is column-major.

 

So if I do this in the shader code:

bonesX = { MatricesRows[indices.x * 3], MatricesRows[indices.x * 3 + 1], MatricesRows[indices.x * 3 + 2], { 0, 0, 0, 1 } };

... the rows actually become columns. Of course I could just do this:

float4x4 bonesX = { { MatricesRows[indices.x * 3].x, MatricesRows[indices.x * 3 + 1].x, MatricesRows[indices.x * 3 + 2].x, 0 }, 
{ MatricesRows[indices.x * 3].y, MatricesRows[indices.x * 3 + 1].y, MatricesRows[indices.x * 3 + 2].y, 0 },
{ MatricesRows[indices.x * 3].z, MatricesRows[indices.x * 3 + 1].z, MatricesRows[indices.x * 3 + 2].z, 0 },
{ MatricesRows[indices.x * 3].w, MatricesRows[indices.x * 3 + 1].w, MatricesRows[indices.x * 3 + 2].w, 1 } };

... but there are too many instructions, and since we're in D3D9, we are limited to 256 instructions. So this won't work. Then I tried:

row_major float4x4 bonesX = { MatricesRows[indices.x * 3], MatricesRows[indices.x * 3 + 1], MatricesRows[indices.x * 3 + 2], { 0, 0, 0, 1 } };

I added row_major for every float4x4 I use in the shader, but still the result was the same as if there was no "row_major". It seems when I call position = mul(position, skinTransform); the multiplication still is like the skinTransform had no row_major.

 

Is there a short way (short in instruction counts) to compose a bone transform matrix, when I have it's original 3 rows? Or is there anything else I can do?

 

Just drop advanced objects of algebra, alowing yourself to acomodate direct transformations. Though, your issue is too narrow, no worries to deal with it directly. You cannot beat out more optimizations as it is norowed down now though directly, your shader functions are well optimzed by now. I gess at least :)

Share this post


Link to post
Share on other sites


Just drop advanced objects of algebra, alowing yourself to acomodate direct transformations. Though, your issue is too narrow, no worries to deal with it directly. You cannot beat out more optimizations as it is norowed down now though directly, your shader functions are well optimzed by now. I gess at least

 

Hopefully it is. Thanks to you :) I will keep an eye on it.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this