# DX11 [solved]My tessellation pipeline that draws nothing.

This topic is 1769 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

## Recommended Posts

Ok, I believe that I am using the right blend state, and I believe that my ProjectionView matrix is ok, and I've eliminated relevant DX11 warnings and errors, but I still can't get anything to appear on screen. I'm greatly hampered by the fact that there is no way to debug my tessellation shaders with SharpDX and VS2012 express. This is my first stab at tessellation so I probably am doing something obviously wrong, so I would like to post my shaders to see if anything jumps out at anyone. What I am trying to do is draw instanced 3rd order bezier triangles. Besides the above, what other things might I check when nothing is drawn?

Here is the VS:

cbuffer PerFrameBuffer : register(b0)
{
//float4x4 World; This is just the identity matrix, so not needed
float4x4 ViewProjection;
float4 vecEye;
float4 LightDirection;
float4 LightColor;
};

struct VertexInput
{
//imediate data
float3 PositionLocal		: POSITION0;
int index					: BLENDINDICES;
//instance data
float3 Translation			: POSITION1;
float Altitudes[4]			: PSIZE;
};

struct VertexOutput
{
float3 PositionWorld	: POSITION;
float2 MainTexCoord		: TEXCOORD;
};

VertexOutput VS(VertexInput vin)
{
VertexOutput vout;

float3 transPos = vin.PositionLocal + vin.Translation;
transPos.y += vin.Altitudes[vin.index];

vout.MainTexCoord = transPos.xz;
vout.PositionWorld = transPos;

return vout;
}


Here is the pass thru Hull shader:


struct VertexOutput
{
float3 PositionWorld	: POSITION;
float2 MainTexCoord		: TEXCOORD;
};

struct PatchTess
{
float EdgeTess[3]	: SV_TessFactor;
float InsideTess	: SV_InsideTessFactor;
};

PatchTess PatchHS(InputPatch<VertexOutput,10> patch, uint patchID : SV_PrimitiveID)
{
PatchTess pt;

pt.EdgeTess[0] = 6;
pt.EdgeTess[1] = 6;
pt.EdgeTess[2] = 6;
pt.InsideTess = 6;

return pt;
}

struct HullOut
{
float3 PositionWorld	: POSITION;
float2 MainTexCoord		: TEXCOORD;
};

[domain("tri")]
[partitioning("fractional_even")]
[outputtopology("triangle_cw")]
[outputcontrolpoints(10)]
[patchconstantfunc("PatchHS")]
HullOut HS(InputPatch<VertexOutput,10> p, uint i : SV_OutputControlPointID, uint patchId : SV_PrimitiveID)
{
HullOut hout;

hout = p;

return hout;
}


Here is the domain shader, where the meat of the intended operation is. The math here might be wrong, but should still produce visible geometry:

#define p300 0
#define p030 1
#define p003 2
#define p210 3
#define p201 4
#define p120 5
#define p021 6
#define p012 7
#define p102 8
#define p111 9

#define U bary.x
#define V bary.y
#define W bary.z

cbuffer PerFrameBuffer : register(b0)
{
//float4x4 World; This is just the identity matrix, so not needed
float4x4 ViewProjection;
float4 vecEye;
float4 LightDirection;
float4 LightColor;
};

struct PatchTess
{
float EdgeTess[3]	: SV_TessFactor;
float InsideTess	: SV_InsideTessFactor;
};

struct HullOut
{
float3 PositionWorld	: POSITION;
float2 MainTexCoord		: TEXCOORD;
};

struct DomainOut
{
float4 PositionH		: SV_Position;
float3 Normal			: NORMAL0;
float3 Tangent			: TANGENT0;
float3 Bitangent		: BITANGENT0;
float3 View				: NORMAL1;
float2 MainTexCoord		: TEXCOORD0;
};

[domain("tri")]
DomainOut DS(PatchTess patchTess, float3 bary : SV_DomainLocation, const OutputPatch<HullOut,10> cp)
{
DomainOut dout;

float3 position =
cp[p300].PositionWorld * pow(U,3) +
cp[p030].PositionWorld * pow(V,3) +
cp[p003].PositionWorld * pow(W,3) +
cp[p210].PositionWorld * 3 * pow(U,2) * V +
cp[p201].PositionWorld * 3 * pow(U,2) * W +
cp[p120].PositionWorld * 3 * U * pow(V,2) +
cp[p021].PositionWorld * 3 * pow(V,2) * W +
cp[p012].PositionWorld * 3 * V * pow(W,2) +
cp[p102].PositionWorld * 3 * U * pow(W,2) +
cp[p111].PositionWorld * 6 * U * V * W;

float3 tangent =
cp[p300].PositionWorld * pow(U,2) +
cp[p120].PositionWorld * pow(V,2) +
cp[p102].PositionWorld * pow(W,2) +
cp[p201].PositionWorld * 2 * U * W +
cp[p210].PositionWorld * 2 * U * V +
cp[p111].PositionWorld * 2 * V * W;

float3 bitangent =
cp[p030].PositionWorld * pow(V,2) +
cp[p012].PositionWorld * pow(W,2) +
cp[p210].PositionWorld * pow(U,2) +
cp[p120].PositionWorld * 2 * U * V +
cp[p021].PositionWorld * 2 * V * W +
cp[p111].PositionWorld * 2 * U * W;

tangent = normalize(tangent);
bitangent = normalize(bitangent);
float3 normal = normalize(cross(tangent,bitangent));

dout.View = vecEye.xyz - position.xyz;
dout.PositionH = mul(float4(position,1.0f),ViewProjection);
dout.Normal = normal;
dout.Tangent = tangent;
dout.Bitangent = bitangent;

dout.MainTexCoord = cp[p300].MainTexCoord * U + cp[p030].MainTexCoord * V + cp[p003].MainTexCoord * W;

return dout;
}


Last is the pixel shader, which has a simple shading algorithm that has been disabled to merely produce one color. I'm really hoping to see that color 'Reddish' but it's not happening:

cbuffer PerFrameBuffer : register(b0)
{
//float4x4 World; This is just the identity matrix, so not needed
float4x4 ViewProjection;
float4 vecEye;
float4 LightDirection;
float4 LightColor;
};

struct DomainOut
{
float4 PositionH		: SV_Position;
float3 Normal			: NORMAL0;
float3 Tangent			: TANGENT0;
float3 Bitangent		: BITANGENT0;
float3 View				: NORMAL1;
float2 MainTexCoord		: TEXCOORD0;
};

float4 PS(DomainOut input) : SV_Target
{
float4 Color = float4 (0, 0, 0, 1);
float4 Reddish = float4 (0.8,0.5,0.8,1.0);
float4 specularColor = float4(1,1,1,1);

float4 diffuse = {LightColor.rgb, 1.0f};
float4 ambient = { 0.03f, 0.03f, 0.03f, 1.0f};

float3 LightDir = normalize(LightDirection.xyz);
float3 ViewDir = normalize(input.View);

float4 diff = saturate(dot(input.Normal, LightDir)); // diffuse component

// R = 2 * (N.L) * N - L
float3 Reflect = normalize(2 * diff * input.Normal - LightDir);
float specular = pow(saturate(dot(Reflect, ViewDir)), 15); // R.V^n

// I = Acolor + Dcolor * N.L + (R.V)n
Color += ambient + diffuse  * diff + specular * (specularColor * LightColor);

float4 Final = Color * Reddish;
Final.a = 1.0;

//return Final;
return Reddish;

}


##### Share on other sites

When I started out with tessellation, I found it helpful to really simplify things and then build up the complexity step by step.  I would suggest making your pixel shader just output a solid white color - then you can eliminate any shading errors or missing constant buffers from the pixel shader right off the bat.

Next I would actually eliminate the hull and domain shaders, and just pass your geometry from vertex shader to pixel shader.  Verify that your vertices are appearing in the location that you expect (this will likely require moving some transformation code from the DS to the VS, but you can move it back fairly easily later).  Instead of doing instanced rendering, try with just standard rendering instead (that will eliminate issues with your instance buffers).

Only once you have the geometry input to the tessellation stages verified, and then that the output stages from the tessellation stages verified, would I try to debug the hull and domain shaders.  Have you been able to verify these things yet?  If so, then we can move a little deeper into the investigation - if we know it is a tessellation issue or not!

##### Share on other sites

Thanks Jason. That sounds like a good idea. It's time to start paring things down. One thing I forgot to mention above was the setup of the depth buffer, as if maybe it was always failing the depth test or something, I've got that set up how it should be, I had forgotten to set the DepthStencilState, but fixing that didn't help. I'll do what you suggest, although I'll have to change alot of stuff, because my inputs are just a series of control points. I'll have to decide how to turn them into triangles, or just draw points.

##### Share on other sites

About the control points - just output them as regular points, and ensure their positions are as expected.  Don't expend too much energy on the verification - you are just making sure it is set up the way you expect.  That should really only require the changes in the shaders, plus the primitive topology.

##### Share on other sites

As this is my first dx11 app, I do have a related question. Is there anything else I am supposed to do to turn on the different pipeline stages other than setting them in the immediate context? In SharpDX you would say  context.HullShader.Set(hexCornerHS).

For instance let's say I want to use tessellation on one drawn element, and then only a vertex/pixel shader on another drawn element. Would I then have to set the hull/domain shaders to 'null' in order to disable them?

##### Share on other sites
1) Well for the tesselation you need hull and domain shader. Other than that: constant buffers, views etc for the correct stages.
2) Yes. Whenever you issue a draw call, everything that is currently set goes into it. It won't change until you change it.

Oh, and cephalo, I found something useful. With a pass-through geometry shader like this...
[maxvertexcount(3)]
void PassThroughTriangleGS(triangle DomainOut gin[3], inout TriangleStream<DomainOut> stream)
{
for(int i = 0; i < 3; i++)
{
stream.Append(gin);
}
stream.RestartStrip();
}

... you can actually force PIX to show what the tesselator spits out. It even highlights the whole patch

##### Share on other sites

interesting tip unbird. I thought that I couldn't have a geometry shader when the tessellation stages were active.

##### Share on other sites

Ok, I tried unbird's trick, and kindof unfortunately I got almost the exact geometry I expected in the post GS window. However, I don't understand why the viewport shows a mere dot. In actual rendering, I don't see a dot even though my pixel shader currently puts out one color.

[attachment=14875:PIXpic.png]

To see how this geometry looks right to me, here is a screenshot of the DirectX 9 version of this project: It's basically a hex terrain map.

[attachment=14876:HexMap.jpg]

##### Share on other sites

interesting tip unbird. I thought that I couldn't have a geometry shader when the tessellation stages were active.

There isn't any restriction like that.  You have to have a vertex shader and a pixel shader, but outside of that there is no hard rule except that the last stage before the rasterizer has to output the SV_Position semantic.

##### Share on other sites

The w-components of your post GS vertices is very large - that means that your geometry is very far away, and will appear small in your viewport.  Can you double check your view and projection matrices to make sure they are properly set?  Have you tried these matrices with a normal rendering setup (i.e. sans tessellation)?

##### Share on other sites

Hmm, I am starting to suspect the matrices. There is some confusion here because my camera class began with XNA, changed to SlimDX under DX9 and then to SharpDX under DX11, and I'm not sure if the methods with the same name are doing the same thing anymore. Here is the code to my update method in the camera.

        public override void Update()
{
Quaternion rot = Quaternion.RotationYawPitchRoll(input.Yaw,input.Pitch,input.Roll);
avatarRotation = rot * avatarRotation;

avatarPosition += Vector3.TransformCoordinate(new Vector3(0.0f, 0.0f, input.Velocity), Matrix.RotationQuaternion(avatarRotation));

Matrix rotMatrix = Matrix.RotationQuaternion(avatarRotation);
Vector3 transformedRef = Vector3.TransformCoordinate(thirdPersonRef, rotMatrix);

cameraPosition = transformedRef + avatarPosition;
//            Vector4 upVector4 = rotMatrix.get_Rows(1);
Vector4 upVector4 = rotMatrix.Row1;
Vector3 upVector3 = new Vector3(upVector4.X, upVector4.Y, upVector4.Z);
view = Matrix.LookAtRH(cameraPosition, avatarPosition, upVector3);
aspectRatio, 0.1f, 500.0f);

Matrix frustumMatrix = view * Matrix.PerspectiveFovRH(UsefulFunctions.ToRadians(50.0f),
aspectRatio, 0.1f, 500.0f);
frustum.BuildViewFrustum(frustumMatrix);

base.Update();
}



Hmm, when I debug this code, I am getting a different View matrix from my DX9 version than with my DX11 version. The Projection matrix is the same in both versions. I'm not sure what's going on here.

EDIT: I have another clue, when I move the camera at all with my controls, PIX showed no draw calls. I realized that my frustum checker was rejecting everything as being outside the frustum. That doesn't actually solve the problem, but it definately indicates that the matrices are wrong. I just need to know why I'm getting different matrices with the same code.

Edited by cephalo

##### Share on other sites

Ok, I found one mistake in my camera code. It turns out that SlimDX uses a zero base index on the row getter method, while SharpDX uses a one base index. You can see the commented out old SlimDX code above, instead of Row1, I needed Row2 to match the old Rows(1). Now that my initial up vector is (0,1,0) like it is supposed to be, all my matrices are the same between my DX9 version and my DX11 version. Unfortunately the problem still exists and I can't see anything. I'll have to test this again with my computer that allows PIX to run to see if my W values become sane. Although having the wrong up vector shouldn't make things invisible.

Edited by cephalo

##### Share on other sites
Shot in the dark: Forgot to transpose the matrices ?

How do you update your constant buffers ?

##### Share on other sites

Ok, fixing that glich with the up vector gets rid of the weird W values. Now that I think about it, if you have an up vector that is nowhere close to perpendicular with your view direction, you're going to get skewed results. Here is my new screenshot:

[attachment=14883:PIXpic2.png]

Shot in the dark: Forgot to transpose the matrices ?

How do you update your constant buffers ?

I used to know what that means... but I forget now. Transpose them where? Since this is a terrain map of sorts, it actually defines world space in a way. The world matrix for this data would be the identity matrix, so I left it out of my calculations. Tell me if I'm mistaken, but m * identity == m correct?

The constant buffer is something I had doubts about, according to PIX it appears that it is being updated, but it's hard to tell if it's being done correctly. My code for creating and updating is as follows:

            perFrameBuffer = new Buffer(Game.GraphicsDevice, new BufferDescription {
Usage = ResourceUsage.Dynamic,
SizeInBytes = PerFrameBuffer.SizeOf,
BindFlags = SharpDX.Direct3D11.BindFlags.ConstantBuffer,
CpuAccessFlags = CpuAccessFlags.Write
});


            //Take a snapshot of the camera properties so that they don't change in mid draw
Vector2 cameraPos = new Vector2(camera.Position.X, camera.Position.Z);
Matrix viewMatrix = camera.View;
Matrix projectionMatrix = camera.Projection;
Vector3 vecEye = camera.Position;

DataBox box = Game.GraphicsDevice.ImmediateContext.MapSubresource(perFrameBuffer, 0, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None);
var cb = new PerFrameBuffer();
cb.ProjectionView = Matrix.Multiply(viewMatrix,projectionMatrix);
cb.VecEye = new Vector4(vecEye,1);
cb.LightDirection = new Vector4(0.0f, 0.6f, -0.9f, 1);
cb.LightColor = new Vector4(1.0f, 1.0f, 1.0f, 1.0f);

Utilities.Write(box.DataPointer, ref cb);
Game.GraphicsDevice.ImmediateContext.UnmapSubresource(perFrameBuffer, 0);



##### Share on other sites
Bullseye!

cb.ProjectionView = Matrix.Multiply(viewMatrix,projectionMatrix);


Should (usually) be

cb.ProjectionView = Matrix.Transpose(Matrix.Multiply(viewMatrix,projectionMatrix));

EDIT Ooops. put an invert first, sorry. Corrected

Explanation: Shaders default to column-major packing in constant buffers, unless you compile with a flag telling otherwise. There are also corresponding HLSL keywords. So, you have to transpose your matrices this way (The effect framework took care of that automatically).

Tell me if I'm mistaken, but m * identity == m correct?

Yup, thats fine. Identity is "the 1", the matrix multiplications neutral element. Edited by unbird

##### Share on other sites

Dang. I was really hoping that would fix it, but I still am not seeing anything. I'm away from my PIX computer at the moment unfortunately so I can't check to see if the viewport geometry has changed.

Interesting, I had no idea that the effect framework did that automatically. There's probably a lot of stuff that I have no concept to even check.

I have one question about the PIX windows.

The Post-GS window in the above example is almost exactly what I was hoping to see in my viewport. The triangle shaped gaps are there because I do the hex field in two passes, first with the triangle pointing up(uh..north) and then a pass pointing down, and the displayed geometry is only the first draw call out of two needed for a block of hexes.

Since the final screen positioning is all handled in the domain shader, before the geometry shader, why is the Post-GS window correct and the viewport window so messed up? What could be changing between the GS and the output if the GS is just a pass thru?

##### Share on other sites
Two things:
• Conversion from homogenous to cartesian coordinates aka perspective divide (x/w,y/w,z/w)
• Viewport transformation (denormalizing to fit to your render target)
Note: PIX does sort of a "zoom to extent" for every view except the last, so even if your vertices are in a very big or very small scale, you will see them all.

If your matrices are fine and the viewport too, you should now see something more sensible. But maybe you have still other bugs.

##### Share on other sites

Ooh! I'm getting closer. Check out the PIX. The viewport is now exactly right.

[attachment=14884:PIXpic3.png]

I still am not seeing anything on screen. Just in case the triangle windings are backwards, (I'm using clockwise) I tried to move the camera downward to look up at the terrain, but I was unable to do that. It's hard to know what the camera is doing when you can't see anything.

This is what happens when you code for 3 months straight without debugging, which I did because I didn't really know how to debug in this new environment. You have to debunk 50 unfounded theories one... at... a... time. Thanks everyone for sticking with me, I don't think I could have persevered without your help.

So the geometry appears correct. Period. What else could be the problem?

##### Share on other sites

I remember having a problem with my GShader sometime ago where i didnt set it to null.

This ended up that another call to a different vertex/pixel shader was using the GS Stage too displayed wierd stuff on the screen.

For testing i would set all the stages you are using for your tesselation to null after your are done with drawing.

And disable all other drawing you are doing beside your terrain rendering.

Edited by dxdude

##### Share on other sites

Can you do a PIX pixel history to see where the pixels generated from that geometry get dumped?  Typically you should be looking for something like a viewport not set (which appears not to be the case now) rasterizer state not correct (i.e. backface culling), scissor rect not set correctly (but the default state is neutral, so unless you set one it can't be the problem), and then depth test, stencil test, and blending.

Can you show us the rasterizer state and output merger state that are being used for your tessellation draw call?

##### Share on other sites

dxdude, I have turned off any other draw calls for now such as skybox etc. so what you mentioned should not be an issue.

Jason, I have a blend state, and a depthstencil state. I didn't know about the rasterizer state or the output merger state! So those are in some default condition. I'll have to look into that. As for pixel history, the only thing that shows up on the render window is the background color, so I'm not sure which pixel to click on. I'll try a few when I get a chance.

EDIT: Ok, I've now set this rasterizer state, but I still get no drawing:

            RasterizerStateDescription renderStateDesc = new RasterizerStateDescription();
renderStateDesc.CullMode = CullMode.None;
renderStateDesc.IsFrontCounterClockwise = false;
renderStateDesc.FillMode = FillMode.Solid;
RasterizerState rasterState = new RasterizerState(Game.GraphicsDevice, renderStateDesc);
Game.GraphicsDevice.ImmediateContext.Rasterizer.State = rasterState;


As for the output merger state, I can't find that exact terminology in SharpDX. Does it go by another name?

EDIT2: also, I am clearing the depthstencil buffer with this command for every frame:

       public static void Render()
{
elapsedTime += GameTime.ElapsedTimeLastUpdate;
if (maxFrameRate > 0 && ticksPerFrame > elapsedTime.Ticks)
return;
context.ClearDepthStencilView(depthView, DepthStencilClearFlags.Depth, 1.0f, 0);
context.ClearRenderTargetView(renderTarget, new Color4(0.25f, 0.5f, 1.0f, 1.0f));
//device.Clear(ClearFlags.Target | ClearFlags.ZBuffer, Color.CornflowerBlue, 1.0f, 0);
//device.BeginScene();

foreach (DrawableGameComponent comp in drawableGameComponents)
{
if(comp.Enabled)
comp.Render();
}

//device.EndScene();
swapChain.Present(0, PresentFlags.None);

elapsedTime = TimeSpan.Zero;

}



I'm not sure if these values are carried over from DX9, and that maybe they are not valid anymore.

Edited by cephalo

##### Share on other sites
There are a couple of differences, sometimes more, sometimes less options (no alpha test anymore, and of course no fixed function stuff). But the main difference is that the states come now in whole blocks, are - more logically - set to the corresponding pipeline stage and you can't change single values atomically. But IMO most of it is quite similar.

There's no output merger state. There are two states: blend state and depth stencil state. Jason probably meant that.

Hmmm, ah yeah. I think I found something. Please allow a little detour first.

The parameterless constructor for c# structs will set everything to default ("zero", depending on type to 0, 0.0f, 0.0, null, "none" for flags and enums, etc.).
And: There's no way to change that - you have to know !

This is where object initializers and intellisense come in handy.
Example:
var renderTargetBlendDescription = new RenderTargetBlendDescription()
{
BlendEnable = true,
x                                       // <- cursor position, type any letter here
};

Intellisense will then list all properties you haven't used so far. This way you won't miss any.

Long story short: If you're still using that blendstate here, you got a problem: It has a RenderTargetWriteMask of "None", meaning you don't write anything at all. You probably want ColorWriteMaskFlags.All.

Check your other state creations too.

PS: Grrrr, I'm so close to kill that forum editor, it even changes non-WISIWIG previews

##### Share on other sites

Wow that's good to know. I assumed that stuff I didn't set would have some sensible default value. The problem here is that there are tons of parameters, and I only know what a few of them are for. I did my best guess on what to put in there, and I still see nothing. Here are the new state creations, the first is in my initialization method and the second two are in my draw call:

            RasterizerStateDescription renderStateDesc = new RasterizerStateDescription
{
CullMode = CullMode.None,
DepthBias = 0,
DepthBiasClamp = 0,
FillMode = FillMode.Solid,
IsAntialiasedLineEnabled = false,
IsDepthClipEnabled = true,
IsFrontCounterClockwise = false,
IsMultisampleEnabled = true,
IsScissorEnabled = false,
SlopeScaledDepthBias = 0
};
RasterizerState rasterState = new RasterizerState(Game.GraphicsDevice, renderStateDesc);
Game.GraphicsDevice.ImmediateContext.Rasterizer.State = rasterState;

...

RenderTargetBlendDescription rendBlendDesc = new RenderTargetBlendDescription
{
DestinationAlphaBlend = BlendOption.Zero,
DestinationBlend = BlendOption.Zero,
IsBlendEnabled = true,
SourceAlphaBlend = BlendOption.One,
SourceBlend = BlendOption.One
};
BlendStateDescription blendDesc = new BlendStateDescription
{
AlphaToCoverageEnable = false,
IndependentBlendEnable = false
};
blendDesc.RenderTarget[0] = rendBlendDesc;
BlendState blendState = new BlendState(Game.GraphicsDevice, blendDesc);
Game.GraphicsDevice.ImmediateContext.OutputMerger.SetBlendState(blendState);

DepthStencilStateDescription depthDesc = new DepthStencilStateDescription
{
BackFace = new DepthStencilOperationDescription
{
Comparison = Comparison.Less,
DepthFailOperation = StencilOperation.Keep,
FailOperation = StencilOperation.Keep,
PassOperation = StencilOperation.Keep
},
DepthComparison = Comparison.Less,
FrontFace = new DepthStencilOperationDescription
{
Comparison = Comparison.Less,
DepthFailOperation = StencilOperation.Keep,
FailOperation = StencilOperation.Keep,
PassOperation = StencilOperation.Keep
},
IsDepthEnabled = true,
IsStencilEnabled = true,
};
DepthStencilState depthState = new DepthStencilState(Game.GraphicsDevice, depthDesc);
Game.GraphicsDevice.ImmediateContext.OutputMerger.SetDepthStencilState(depthState);



##### Share on other sites

Unbird is right - I meant the depth/stencil state and blend state combined when I referred to the output merger state.  Sorry for the confusion.

Regarding the default parameters, you can take a look at the description pages for each of the states, and they list the true defaults that should be sensible.  For example, here is the blend state description.  Once you get those initialized, it should be fairly easy to proceed with the debugging.

##### Share on other sites
One thing: Disable stencil.

IsStencilEnabled = false,

Also, try setting all states to null for once, maybe the default states will give you at least something. One at a time. Then all.

Edit: Sort of ninja'ed Edited by unbird

• 10
• 12
• 10
• 10
• 11
• ### Similar Content

• Hi, right now building my engine in visual studio involves a shader compiling step to build hlsl 5.0 shaders. I have a separate project which only includes shader sources and the compiler is the visual studio integrated fxc compiler. I like this method because on any PC that has visual studio installed, I can just download the solution from GitHub and everything just builds without additional dependencies and using the latest version of the compiler. I also like it because the shaders are included in the solution explorer and easy to browse, and double-click to open (opening files can be really a pain in the ass in visual studio run in admin mode). Also it's nice that VS displays the build output/errors in the output window.
Anyone with some experience in this?

• Hello!
Have a problem with reflection shader for D3D11:
1>engine_render_d3d11_system.obj : error LNK2001: unresolved external symbol IID_ID3D11ShaderReflection
#include <D3Dcompiler.h>
#include <D3DCompiler.inl>
#pragma comment(lib, "D3DCompiler.lib")
//#pragma comment(lib, "D3DCompiler_47.lib")
As MSDN tells me but still no fortune. I think lot of people did that already, what I missing?
where recommend to use SDK headers and libs before Wind SDK, but I am not using DirectX SDK for this project at all, should I?

• Hi there, this is my first post in what looks to be a very interesting forum.
I am using DirectXTK to put together my 2D game engine but would like to use the GPU depth buffer in order to avoid sorting back-to-front on the CPU and I think I also want to use GPU instancing, so can I do that with SpriteBatch or am I looking at implementing my own sprite rendering?

• I am trying to draw a screen-aligned quad with arbitrary sizes.

currently I just send 4 vertices to the vertex shader like so:
pDevCon->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
pDevCon->Draw(4, 0);

then in the vertex shader I am doing this:
float4 main(uint vI : SV_VERTEXID) : SV_POSITION
{
float2 texcoord = float2(vI & 1, vI >> 1);
return float4((texcoord.x - 0.5f) * 2, -(texcoord.y - 0.5f) * 2, 0, 1);
}
that gets me a screen-sized quad...ok .. what's the correct way to get arbitrary sizes?...I have messed around with various numbers, but I think I don't quite get something in these relationships.
one thing I tried is:

float4 quad = float4((texcoord.x - (xpos/screensizex)) * (width/screensizex), -(texcoord.y - (ypos/screensizey)) * (height/screensizey), 0, 1);

.. where xpos and ypos is number of pixels from upper right corner..width and height is the desired size of the quad in pixels
this gets me somewhat close, but not right.. a bit too small..so I'm missing something ..any ideas?

.
• By Stewie.G
Hi,
I've been trying to implement a gaussian blur recently, it would seem the best way to achieve this is by running a bur on one axis, then another blur on the other axis.
I think I have successfully implemented the blur part per axis, but now I have to blend both calls with a proper BlendState, at least I think this is where my problem is.
Here are my passes:
D3DX11_TECHNIQUE_DESC techDesc; mBlockEffect->mTech->GetDesc( &techDesc ); for(UINT p = 0; p < techDesc.Passes; ++p) { deviceContext->IASetVertexBuffers(0, 2, bufferPointers, stride, offset); deviceContext->IASetIndexBuffer(mIB, DXGI_FORMAT_R32_UINT, 0); mBlockEffect->mTech->GetPassByIndex(p)->Apply(0, deviceContext); deviceContext->DrawIndexedInstanced(36, mNumberOfActiveCubes, 0, 0, 0); } No blur

PS_BlurV

PS_BlurH

P0 + P1

As you can see, it does not work at all.
I think the issue is in my BlendState, but I am not sure.
I've seen many articles going with the render to texture approach, but I've also seen articles where both shaders were called in succession, and it worked just fine, I'd like to go with that second approach. Unfortunately, the code was in OpenGL where the syntax for running multiple passes is quite different (http://rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/). So I need some help doing the same in HLSL :-)

Thanks!