Sign in to follow this  

DX11 pixel shader multiple render targets dx11

This topic is 2325 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

how do you define a pixel shader so it outputs to more than one render target, im trying this->

struct PS_P_OUTPUT
{
float4 col0 : SV_Target0;
float4 col1 : SV_Target1;
};


but its not working, what have i got wrong?

Share this post


Link to post
Share on other sites
That is correct... So you're either not correctly binding the render targets or something is wrong in your pixel shader...

-How are you binding multiple render targets?

-Can you post your pixel shader?

Share this post


Link to post
Share on other sites
shader->

struct V2S_INPUT
{
float4 pc : TEXCOORD0;
// float2 c : TEXCOORD1;
};

struct P2S_INPUT
{
float4 pos : SV_POSITION;
float3 nor : TEXCOORD0;
float3 wpos : TEXCOORD1;
};

struct G2S_INPUT
{
float4 pos : POSITION;
float3 nor : TEXCOORD0;
};

struct PS_P_OUTPUT
{
float4 col0 : SV_Target0;
float4 col1 : SV_Target1;
};


G2S_INPUT VS_P(V2S_INPUT Input)
{
G2S_INPUT Output;

Output.pos = float4(float3(Input.pc.x*255,Input.pc.y*255,Input.pc.z*255)*voxel_size+chunk_pos,1);
Output.nor= float3(Input.pc.w,Input.pc.w,Input.pc.w);

return Output;
}



[maxvertexcount(4)]
void GS_P(point G2S_INPUT In[1], inout TriangleStream<P2S_INPUT> TriStream)
{
P2S_INPUT Out;

Out.nor.xyz = In[0].nor.xyz;


float3 up=float3(view._12,view._22,view._32);
float3 right=float3(view._11,view._21,view._31);

float3 ppos;

ppos=In[0].pos.xyz+up*voxel_size*1.25f;
Out.pos=mul(float4(ppos,1),wvp);
Out.wpos=In[0].pos.xyz;
TriStream.Append(Out);
ppos=In[0].pos.xyz+right*voxel_size*1.25f+up*voxel_size*1.25f;
Out.pos=mul(float4(ppos,1),wvp);
Out.wpos=In[0].pos.xyz;
TriStream.Append(Out);
ppos=In[0].pos.xyz;
Out.pos=mul(float4(ppos,1),wvp);
Out.wpos=In[0].pos.xyz;
TriStream.Append(Out);
ppos=In[0].pos.xyz+right*voxel_size*1.25f;
Out.pos=mul(float4(ppos,1),wvp);
Out.wpos=In[0].pos.xyz;
TriStream.Append(Out);
TriStream.RestartStrip();
}

PS_P_OUTPUT PS_P(P2S_INPUT Input)
{
PS_P_OUTPUT pspo;

pspo.col0=float4(Input.nor,1);
pspo.col1=float4(Input.wpos,1);

return pspo;
}



binding render targets->

//set multiple render targets
RTV mrt[2];

mrt[0]=rtv;
mrt[1]=ws_r;


dc->OMSetRenderTargets(2,mrt,dsv);
dc->ClearRenderTargetView(rtv, ClearColor);
dc->ClearRenderTargetView(ws_r, ClearColor);
dc->ClearDepthStencilView(dsv, D3D11_CLEAR_DEPTH, 1.0f, 0);




its actually rendering the first render target, but not the second... if it switch them around it draws the opposite colour, but for some reason it wont draw the second target...

Share this post


Link to post
Share on other sites
The code you posted looks correct... How are you creating the render targets?

Also, can you use the [.source] or [.code] tags (without the dot)? So your source looks like this:
[source]
PS_P_OUTPUT PS_P(P2S_INPUT Input)
{
PS_P_OUTPUT pspo;

pspo.col0=float4(Input.nor,1);
pspo.col1=float4(Input.wpos,1);

return pspo;
}
[/source]

P.S: Why are you trying to store the world position?

Share this post


Link to post
Share on other sites
i need worldspace cause im writing a brush that paints 3d points, so i need the world position to place the brush sphere on the surface.
note i could just render the whole thing again, but it would be very computational as its a point cloud (rendered as lots of billboards, hence the gs)...

heres the second render target
i wrote this myself.
scw and sch are the screen dimensions, the same as the main render target.

[source]
if(1)
{
int size_x=scw;
int size_y=sch;
ID3D11Texture2D * pTexture2D = NULL;

D3D11_TEXTURE2D_DESC desc;
memset(&desc,0,sizeof(D3D11_TEXTURE2D_DESC));
desc.Width = (UINT)size_x;
desc.Height = (UINT)size_y;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE|D3D11_BIND_RENDER_TARGET;
desc.CPUAccessFlags = NULL;//D3D11_CPU_ACCESS_WRITE;

D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;

srvDesc.Format = desc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MipLevels = desc.MipLevels;

D3D11_RENDER_TARGET_VIEW_DESC rtvdesc;
memset(&rtvdesc,0,sizeof(D3D11_RENDER_TARGET_VIEW_DESC));
rtvdesc.Format=desc.Format;
rtvdesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;



// Create the shader resource view.
dev->CreateTexture2D(&desc, NULL, &pTexture2D);
dev->CreateShaderResourceView( pTexture2D, &srvDesc, &ws);
dev->CreateRenderTargetView(pTexture2D, &rtvdesc, &ws_r);
}

[/source]



heres the first.
its different because i created this when i created the device (copy paste from sample)
you get it from the swap chain (sc)

[source]

// Create a render target view
TEX pBackBuffer = NULL;
hr = sc->GetBuffer(0, __uuidof( ID3D11Texture2D ), ( LPVOID* )&pBackBuffer);

hr = dev->CreateRenderTargetView(pBackBuffer, NULL, &rtv);
pBackBuffer->Release();

[/source]

Share this post


Link to post
Share on other sites
so as you can see, i am without a doubt confused... i actually did it mostly right... there must be something small stopping it from working.

note, this is my first time ever using mrt, so its understandable i stuffed it up in a small way.

thanks for the help tho, TiagoCosta.

Share this post


Link to post
Share on other sites

This topic is 2325 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Similar Content

    • By GreenGodDiary
      Having some issues with a geometry shader in a very basic DX app.
      We have an assignment where we are supposed to render a rotating textured quad, and in the geometry shader duplicate this quad and offset it by its normal. Very basic stuff essentially.
      My issue is that the duplicated quad, when rendered in front of the original quad, seems to fail the Z test and thus the original quad is rendered on top of it.
      Whats even weirder is that this only happens for one of the triangles in the duplicated quad, against one of the original quads triangles.

      Here's a video to show you what happens: Video (ignore the stretched textures)

      Here's my GS: (VS is simple passthrough shader and PS is just as basic)
      struct VS_OUT { float4 Pos : SV_POSITION; float2 UV : TEXCOORD; }; struct VS_IN { float4 Pos : POSITION; float2 UV : TEXCOORD; }; cbuffer cbPerObject : register(b0) { float4x4 WVP; }; [maxvertexcount(6)] void main( triangle VS_IN input[3], inout TriangleStream< VS_OUT > output ) { //Calculate normal float4 faceEdgeA = input[1].Pos - input[0].Pos; float4 faceEdgeB = input[2].Pos - input[0].Pos; float3 faceNormal = normalize(cross(faceEdgeA.xyz, faceEdgeB.xyz)); //Input triangle, transformed for (uint i = 0; i < 3; i++) { VS_OUT element; VS_IN vert = input[i]; element.Pos = mul(vert.Pos, WVP); element.UV = vert.UV; output.Append(element); } output.RestartStrip(); for (uint j = 0; j < 3; j++) { VS_OUT element; VS_IN vert = input[j]; element.Pos = mul(vert.Pos + float4(faceNormal, 0.0f), WVP); element.Pos.xyz; element.UV = vert.UV; output.Append(element); } }  
      I havent used geometry shaders much so im not 100% on what happens behind the scenes.
      Any tips appreciated! 
    • By mister345
      Hi, I'm building a game engine using DirectX11 in c++.
      I need a basic physics engine to handle collisions and motion, and no time to write my own.
      What is the easiest solution for this? Bullet and PhysX both seem too complicated and would still require writing my own wrapper classes, it seems. 
      I found this thing called PAL - physics abstraction layer that can support bullet, physx, etc, but it's so old and no info on how to download or install it.
      The simpler the better. Please let me know, thanks!
    • By Hexaa
      I try to draw lines with different thicknesses using the geometry shader approach from here:
      https://forum.libcinder.org/topic/smooth-thick-lines-using-geometry-shader
      It seems to work great on my development machine (some Intel HD). However, if I try it on my target (Nvidia NVS 300, yes it's old) I get different results. See the attached images. There
      seem to be gaps in my sine signal that the NVS 300 device creates, the intel does what I want and expect in the other picture.
      It's a shame, because I just can't figure out why. I expect it to be the same. I get no Error in the debug output, with enabled native debugging. I disabled culling with CullMode.None. Could it be some z-fighting? I have little clue about it but I tested to play around with the RasterizerStateDescription and DepthBias properties with no success, no change at all. Maybe I miss something there?
      I develop the application with SharpDX btw.
      Any clues or help is very welcome
       


    • By Beny Benz
      Hi,
      I'm currently trying to write a shader which shoud compute a fast fourier transform of some data, manipulating the transformed data, do an inverse FFT an then displaying the result as vertex offset and color. I use Unity3d and HLSL as shader language. One of the main problems is that the data should not be passed from CPU to GPU for every frame if possible. My original plan was to use a vertex shader and do the fft there, but I fail to find out how to store changing data betwen shader calls/passes. I found a technique called ping-ponging which seems to be based on writing and exchangeing render targets, but I couldn't find an example for HLSL as a vertex shader yet.
      I found https://social.msdn.microsoft.com/Forums/en-US/c79a3701-d028-41d9-ad74-a2b3b3958383/how-to-render-to-multiple-render-targets-in-hlsl?forum=xnaframework
      which seem to use COLOR0 and COLOR1 as such render targets.
      Is it even possible to do such calculations on the gpu only? (/in this shader stage?, because I need the result of the calculation to modify the vertex offsets there)
      I also saw the use of compute shaders in simmilar projects (ocean wave simulation), do they realy copy data between CPU / GPU for every frame?
      How does this ping-ponging / rendertarget switching technique work in HLSL?
      Have you seen an example of usage?
      Any answer would be helpfull.
      Thank you
      appswert
    • By ADDMX
      Hi
      Just a simple question about compute shaders (CS5, DX11).
      Do the atomic operations (InterlockedAdd in my case) should work without any issues on RWByteAddressBuffer and be globaly coherent ?
      I'v come back from CUDA world and commited fairly simple kernel that does some job, the pseudo-code is as follows:
      (both kernels use that same RWByteAddressBuffer)
      first kernel does some job and sets Result[0] = 0;
      (using Result.Store(0, 0))
      I'v checked with debugger, and indeed the value stored at dword 0 is 0
      now my second kernel
      RWByteAddressBuffer Result;  [numthreads(8, 8, 8)] void main() {     for (int i = 0; i < 5; i++)     {         uint4 v0 = DoSomeCalculations1();         uint4 v1 = DoSomeCalculations2();         uint4 v2 = DoSomeCalculations3();                  if (v0.w == 0 && v1.w == 0 && v2.w)             continue;         //    increment counter by 3, and get it previous value         // this should basically allocate space for 3 uint4 values in buffer         uint prev;         Result.InterlockedAdd(0, 3, prev);                  // this fills the buffer with 3 uint4 values (+1 is here as the first 16 bytes is occupied by DrawInstancedIndirect data)         Result.Store4((prev+0+1)*16, v0);         Result.Store4((prev+1+1)*16, v1);         Result.Store4((prev+2+1)*16, v2);     } } Now I invoke it with Dispatch(4,4,4)
      Now I use DrawInstancedIndirect to draw the buffer, but ocassionaly there is missed triangle here and there for a frame, as if the atomic counter does not work as expected
      do I need any additional synchronization there ?
      I'v tried 'AllMemoryBarrierWithGroupSync' at the end of kernel, but without effect.
      If I do not use atomic counter, and istead just output empty vertices (that will transform into degenerated triangles) the all is OK - as if I'm missing some form of synchronization, but I do not see such a thing in DX11.
      I'v tested on both old and new nvidia hardware (680M and 1080, the behaviour is that same).
       
  • Popular Now