• Advertisement

3D Ugly HLSL output when converting pos->UV in VS, not in PS?

Recommended Posts

I'm an amateur, trying to learn HLSL techniques.  I'm currently trying to implement texture projection (making a movie projector) in a DX9 environment.

I'm running my vertices through an alternate view and projection and using that as UV coordinates on a texture.  However, I find that the coordinates are very different depending on whether I convert them from screen coordinates to texture coordinates in the vertex shader or the pixel shader and I don't know why.  I suspect it may have something to do with some kind of automatic conversions going on between the vertex shader and the pixel shader?

I don't care much about performance, but I really want to use the vertex shader for this calculation so that I can shadow the projection, shadow-buffer style.  But there are artifacts and clones that I can't live with.

I'm attaching two pics, one showing the artifacts when calculating UV coordinates in the vertex shader, one when calculating the UV coordinates in the pixel shader (which, other than shadowing, I'm happy with.)

Here is the almost-complete code (I'm leaving out the wide variety of technique calls that all look the same).  I'm never sure whether to whittle this down to what's relevant in order to save you some effort in understanding, or to leave it complete in case I turn out unqualified to be the one-that-whittles.  Here, there is a single line in the pixel shader that I'm uncommenting in order to replace the UV coordinates with those computed in the vertex shader.

I'm certain that there are a lot of other things that I'm doing poorly as well, and appreciate any extra recommendations.  I don't have access to the main executable, just the HLSL.

I greatly appreciate any help anyone is willing to offer.  Thanks for looking.
 

#define MOVIETEX "b.png"
//#define MOVIETEX "test.gif"
//#define MOVIETEX "NT.gif"

#define VSVRS vs_2_0
#define PSVRS ps_2_0    //animated textures don't work in v3.0
#define PI 3.14159265f
#define IDENTITYMATRIX {{1,0,0,0},{0,1,0,0},{0,0,1,0},{0, 0, 0, 1}}

#define BLACK float4(0,0,0,1)
#define CONT_MODEL_INSTANCE "Projector.pmx"

float4x4 cProjector    : CONTROLOBJECT < string name = CONT_MODEL_INSTANCE; string item = "Projector"; >;
float4 cFOV : CONTROLOBJECT < string name = CONT_MODEL_INSTANCE; string item = "FOV"; >;
float4 cBrightness : CONTROLOBJECT < string name = CONT_MODEL_INSTANCE; string item = "Brightness"; >;
float4 cCol : CONTROLOBJECT < string name = CONT_MODEL_INSTANCE; string item = "Color"; >;
float4 cNearFar : CONTROLOBJECT < string name = CONT_MODEL_INSTANCE; string item = "NearFar"; >;
float3 cZVec : CONTROLOBJECT < string name = CONT_MODEL_INSTANCE; string item = "NearFar"; >;
static float3 projWPos = float3(cProjector._41, cProjector._42, cProjector._43);

float4x4 WorldMatrix              : WORLD;
float4x4 ViewMatrix               : VIEW;
float4x4 ViewProjMatrix           : VIEWPROJECTION;
float4x4 WorldViewProjMatrix            : WORLDVIEWPROJECTION;
float4x4 ProjMatrix                        : PROJECTION;
float4 MaterialDiffuse   : DIFFUSE  < string Object = "Geometry"; >;
float3 MaterialAmbient   : AMBIENT  < string Object = "Geometry"; >;
float4 TextureAddValue  : ADDINGTEXTURE;
float4 TextureMulValue  : MULTIPLYINGTEXTURE;


texture MovieTex : ANIMATEDTEXTURE <
    string ResourceName = MOVIETEX;
>;

sampler MovieSamp = sampler_state {
    texture = <MovieTex>;
    MINFILTER = LINEAR;
    MAGFILTER = LINEAR;
    MIPFILTER = LINEAR;
    ADDRESSU  = BORDER;
    ADDRESSV  = BORDER;
    BORDERCOLOR = BLACK;
};


texture ObjectTexture: MATERIALTEXTURE;
sampler ObjTexSampler = sampler_state {
    texture = <ObjectTexture>;
    MINFILTER = LINEAR;
    MAGFILTER = LINEAR;
    MIPFILTER = LINEAR;
    ADDRESSU  = WRAP;
    ADDRESSV  = WRAP;
};

technique EdgeTec < string MMDPass = "edge"; > {        //disable
}


technique ShadowTec < string MMDPass = "shadow"; > {    //disable
}

technique ZplotTec <string MMDPass = "zplot";> {        //disable
}

float4x4 mat3tomat4 (float3x3 inpM) {
    float4x4 outp = IDENTITYMATRIX;
    outp._11 = inpM._11; outp._12 = inpM._12; outp._13 = inpM._13;
    outp._21 = inpM._21; outp._22 = inpM._22; outp._23 = inpM._23;
    outp._31 = inpM._31; outp._32 = inpM._32; outp._33 = inpM._33;
    outp._41 = 0.0f; outp._42 = 0.0f; outp._43 = 0.0f;
    outp._14 = 0.0f; outp._24 = 0.0f; outp._34 = 0.0f;
    return outp;
}

float4x4 invertTR4x4 (float4x4 inpM) {
//inverts a typical 4x4 matrix composed of only translations and rotations
    float4x4 invTr = IDENTITYMATRIX;
    invTr._41 = -inpM._41; invTr._42 = -inpM._42; invTr._43 = -inpM._43;
    float3x3 invRot3x3 = transpose((float3x3)inpM);
    float4x4 invRot4x4 = mat3tomat4(invRot3x3);
    float4x4 outpM = mul(invTr, invRot4x4);
    return outpM;
}

float4x4 getPerspProj (float2 Fov, float near, float far) {
//http://www.codinglabs.net/article_world_view_projection_matrix.aspx
//receives FOV in degrees
    Fov *= PI / 180.0f;
    Fov = 1.0f/Fov;
    float4x4 outp = IDENTITYMATRIX;
    outp._11 = atan(Fov.x/2.0f);
    outp._22 = atan(Fov.y/2.0f);
    outp._33 = -(far+near)/(far-near);
    outp._43 = (-2.0f*near*far)/(far-near);
    outp._34 = -1.0f;
    outp._44 = 0.0f;
    return outp;
}

struct BufferShadow_OUTPUT {
    float4 Pos      : POSITION;
    float4 PTex        : TEXCOORD0;        //texture coordinates in alternate projection
    float4 UV        : TEXCOORD1;
    float3 Normal   : TEXCOORD2;
    float3 PEye        : TEXCOORD3;
    float2 Tex        : TEXCOORD4;
    float4 wPos        : TEXCOORD5;
    float4 Color    : COLOR0;
};

BufferShadow_OUTPUT BufferShadow_VS(float4 Pos : POSITION, float3 Normal : NORMAL, float2 Tex : TEXCOORD0, float2 Tex2 : TEXCOORD1, uniform bool useTexture, uniform bool useSphereMap, uniform bool useToon)
{
    BufferShadow_OUTPUT Out = (BufferShadow_OUTPUT)0;
    
    Pos = mul( Pos, WorldMatrix );
    Out.PEye = cZVec - projWPos.xyz;  //easier than transforming Zvec
    Out.wPos = Pos;
    Out.Pos = mul(Pos, ViewProjMatrix);

    float4x4 invTR = invertTR4x4(cProjector);
    Out.PTex = mul(Pos, invTR);
    
    float4x4 altProj = getPerspProj((cFOV.xy)*cFOV.z, cNearFar.x, cNearFar.y);
    Out.PTex = mul(Out.PTex, altProj);
    
    Out.UV = Out.PTex;
    Out.UV.xyz /= Out.UV.w;
    Out.UV.x = (Out.UV.x + 0.5f)*2.0f;
    Out.UV.y = (-Out.UV.y + 0.5f)*2.0f;
    Out.UV.xy -= 0.5f;    //texture is centered on 0,0
        
    Out.Normal = normalize( mul( Normal, (float3x3)WorldMatrix ) );
    Out.Tex = Tex;
    Out.Color.rgb = MaterialAmbient;
    Out.Color.a = MaterialDiffuse.a;
    return Out;
}

float4 BufferShadow_PS(BufferShadow_OUTPUT IN, uniform bool useTexture, uniform bool useSphereMap, uniform bool useToon) : COLOR
{
    float4 Color = IN.Color;
    float3 PEn = normalize(IN.PEye); float3 Nn = normalize(IN.Normal);
    if ( useTexture ) {
        float4 TexColor = tex2D( ObjTexSampler, IN.Tex );
        TexColor.rgb = lerp(1, TexColor * TextureMulValue + TextureAddValue, TextureMulValue.a + TextureAddValue.a).rgb;
        Color *= TexColor;
    }
    float4 UV = IN.PTex;
    UV.xyz /= UV.w;
    UV.x = (UV.x + 0.5f) *2.0f;
    UV.y = (-UV.y+0.5f) * 2.0f;
    UV.xy -= 0.5f;
        
    //uncommenting seems like it should provide same output yet doesn't
    //UV = IN.UV;
    
    float4 projTex = tex2D(MovieSamp, UV.xy);
    Color *= projTex;
    
    Color = projTex;
    Color.rgb *= pow(dot(Nn, PEn), 0.6f);
    Color.rgb *= cCol.rgb;
    Color.rgb *= cBrightness.x;
    if ((UV.z < 0.0f) || (UV.z > 1.0f) || (UV.x < 0.0f) || (UV.x > 1.0f) || (UV.y < 0.0f) || (UV.y > 1.0f)){
        return BLACK;
    //outside range; using border mode giving me artifacts i don't understand
    }
    else {return Color;}
}

technique MainTecBS0 < string MMDPass = "object_ss"; bool UseTexture = false; bool UseSphereMap = false; bool UseToon = false; > {
    pass DrawObject {
        VertexShader = compile vs_3_0 BufferShadow_VS(false, false, false);
        PixelShader  = compile ps_3_0 BufferShadow_PS(false, false, false);
    }
}

 

vertexshader.png

pixelshader.png

Share this post


Link to post
Share on other sites
Advertisement

The results of your UV calculation are not going to interpolate linearly across a triangle. In other words, Lerp(CalcUV(Vtx0), CalcUV(Vtx1)) != CalcUV(Lerp(Vtx0, Vtx1)). You can either do the whole calculation in the pixel shader, or you can do everything before the homogoneous divide-by-w in the vertex shader and then perform the divide and the rest in the pixel shader (you can actually bake the scale/offset stuff that goes from [-1, 1] -> [0, 1] range into your projection matrix, in which case you only need to do the divide-by-w in your pixel shader).

Share this post


Link to post
Share on other sites

Thanks, I think I see what you're saying.  If I understand correctly, the UV coordinates won't quite be the same if I apply them before the w dvide, but that's probably an error in my current version.  (This is the first time I've ever made a projection matrix, or even an inverse matrix, and I wanted to keep them clean.  Because I guess I'm scared I'll never be able to get close again :) But I'll make a new matrix and multiply it in before the w divide, which should let me make a shadow buffer.)

 

I don't know if you have any comments on anything else?  With that 3x3-matrix-to-4x4 function, it feels like there should be a better way, one that I just don't know about.  (The fov is also not what I'm treating it as, but that may be related to my scale+shift after divide.)

Share this post


Link to post
Share on other sites

Right: the results aren't the same under interpolation. You can imagine linear interpolation as taking two points on a 2D graph, and drawing a straight line through them (hence the "linear" name). For a linear function like y = 4x + 5 you could take two points, draw a line between them, and anywhere on that line will match the original function. However for a non-linear function like y = x^2, your straight line won't match the function and will therefore give you incorrect results. In your case you're dealing with a non-linear function due to using a perspective projection (linear transformations like rotation and translation are still ok).

You can do the 3x3->4x4 matrix a bit more concisely by using an alternate matrix constructor and some swizzles:

float4x4 mat3tomat4 (float3x3 inpM) {
    return float4x4(float4(inpM._11_12_13, 0.0f),
                    float4(inpM._21_22_23, 0.0f),
                    float4(inpM._31_32_33, 0.0f),
                    float4(0.0f, 0.0f, 0.0f, 0.0f));
}

The scale + shift is normal when you're calculating UV coordinates after applying a projection, since a typical projection matrix will give you coordinates in the range [-1, 1] while UV's are in [0, 1] space (since [-1, 1] matches the normalized device coordinates expected by the rasterizer). If you want to create a projection matrix that produces [0, 1] coordinates after division-by-w, you can transform your projection by a scale + translation matrix:

// Transforms from [-1,1] post-projection space to [0,1] UV space, and also
// flips the Y coordinate so that it puts [0, 0] in the top-left corner
float4x4 projScaleOffset = float4x4(float4(0.5f,  0.0f, 0.0f, 0.0f),
                                    float4(0.0f, -0.5f, 0.0f, 0.0f),
                                    float4(0.0f,  0.0f, 1.0f, 0.0f),
                                    float4(0.5f,  0.5f, 0.0f, 1.0f));
float4x4 proj = mul(proj, projScaleOffset);

 

Share this post


Link to post
Share on other sites

Thanks.  I'm doing everything but the divide in the VS now and it looks good.  I'm having some issues implementing my shadow buffer, but it's a situation where I'll probably need to play with it for a few days (sleeping on it always seems to help).  Appreciate the help regarding mat3tomat4: seeing that example will help me simplify other things that I do as well.

 

Edit: Oh, I misunderstood something, but I see now.  Rather than trying to fit my screen into to the (0-1, 0-1) range, I should try to fit my texture into -1 to 1 range.  Doing this after the w-divide is appropriate.

 

Edit2: I believe everything is working, but I need to do more testing to be sure, and make sure I'm handling things like alpha.  There is something extremely magical about making my own shadow buffer for the first time.  Thank you again for all of your help.

Edited by bandages

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now


  • Advertisement
  • Advertisement
  • Popular Tags

  • Advertisement
  • Popular Now

  • Similar Content

    • By _RoboCat_
      Hi,
      Can anyone point me into good direction how to resolve this?
      I have flat mesh made from many quads (size 1x1 each) each split into 2 triangles. (made procedural)
      What i want to achieve is : "merge" small quads into bigger ones (show on picture 01), English is not my mother language and my search got no result... maybe i just form question wrong.
      i have array[][] where i store "map" information, for now i'm looking for blobs of same value in it -> and then for each position i create 1 quad. and on end create mesh from all.
      is there any good algorithm for creating mesh between random points on same plane? less triangles better. Or "de-tesselate" this to bigger/less triangles/quads?
      Also i would like to find "edges" and create "faces" between edge points (picture 02 shows what i want to achieve).
      No need for whole code, just if someone can point me in good direction would be nice.
      Thanks


    • By Karol Plewa
      Hi, 
       
      I am working on a project where I'm trying to use Forward Plus Rendering on point lights. I have a simple reflective scene with many point lights moving around it. I am using effects file (.fx) to keep my shaders in one place. I am having a problem with Compute Shader code. I cannot get it to work properly and calculate the tiles and lighting properly. 
       
      Is there anyone that is wishing to help me set up my compute shader?
      Thank you in advance for any replies and interest!
    • By PhillipHamlyn
      Hi
      I have a procedurally generated tiled landscape, and want to apply 'regional' information to the tiles at runtime; so Forests, Roads - pretty much anything that could be defined as a 'region'. Up until now I've done this by creating a mesh defining the 'region' on the CPU and interrogating that mesh during the landscape tile generation; I then add regional information to the landscape tile via a series of Vertex boolean properties. For each landscape tile vertex I do a ray-mesh intersect into the 'region' mesh and get some value from that mesh.

      For example my landscape vertex could be;
      struct Vtx { Vector3 Position; bool IsForest; bool IsRoad; bool IsRiver; } I would then have a region mesh defining a forest, another defining rivers etc. When generating my landscape veretexes I do an intersect check on the various 'region' meshes to see what kind of landscape that vertex falls within.

      My ray-mesh intersect code isn't particularly fast, and there may be many 'region' meshes to interrogate, and I want to see if I can move this work onto the GPU, so that when I create a set of tile vertexes I can call a compute/other shader and pass the region mesh to it, and interrogate that mesh inside the shader. The output would be a buffer where all the landscape vertex boolean values have been filled in.

      The way I see this being done is to pass in two RWStucturedBuffer to a compute shader, one containing the landscape vertexes, and the other containing some definition of the region mesh, (possibly the region might consist of two buffers containing a set of positions and indexes). The compute shader would do a ray-mesh intersect check on each landscape vertex and would set the boolean flags on a corresponding output buffer.

      In theory this is a parallelisable operation (no one landscape vertex relies on another for its values) but I've not seen any examples of a ray-mesh intersect being done in a compute shader; so I'm wondering if my approach is wrong, and the reason I've not seen any examples, is because no-one does it that way. If anyone can comment on;
      Is this a really bad idea ? If no-one does it that way, does everyone use a Texture to define this kind of 'region' information ? If so - given I've only got a small number of possible types of region, what Texture Format would be appropriate, as 32bits seems really wasteful. Is there a common other approach to adding information to a basic height-mapped tile system that would perform well for runtime generated tiles ? Thanks
      Phillip
    • By GytisDev
      Hello,
      without going into any details I am looking for any articles or blogs or advice about city building and RTS games in general. I tried to search for these on my own, but would like to see your input also. I want to make a very simple version of a game like Banished or Kingdoms and Castles,  where I would be able to place like two types of buildings, make farms and cut trees for resources while controlling a single worker. I have some problem understanding how these games works in the back-end: how various data can be stored about the map and objects, how grids works, implementing work system (like a little cube (human) walks to a tree and cuts it) and so on. I am also pretty confident in my programming capabilities for such a game. Sorry if I make any mistakes, English is not my native language.
      Thank you in advance.
  • Advertisement