Sign in to follow this  
ybatra

DX11 Don't know RT is clearing but why primitive is not rendering

Recommended Posts

// Initialize D3D
bool D3DRenderer::Initialize()
{
HRESULT hr;
D3D_DRIVER_TYPE m_pDriverType;
D3D_FEATURE_LEVEL       m_pFeatureLevel;
 
D3D_DRIVER_TYPE driverTypes[] =
    {
        D3D_DRIVER_TYPE_HARDWARE,
        D3D_DRIVER_TYPE_WARP,
        D3D_DRIVER_TYPE_REFERENCE,
    };
UINT numDriverTypes = ARRAYSIZE( driverTypes );
 
// Create the D3D device
    D3D_FEATURE_LEVEL featureLevel[]= 
{
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
};
int num= ARRAYSIZE(featureLevel);
 
    // Create device, context and swapchain
    DXGI_SWAP_CHAIN_DESC sdDesc;
    ZeroMemory(&sdDesc, sizeof(sdDesc));
sdDesc.BufferDesc.ScanlineOrdering=DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
sdDesc.BufferDesc.Scaling=DXGI_MODE_SCALING_UNSPECIFIED;
sdDesc.BufferDesc.RefreshRate.Numerator   = 60;
    sdDesc.BufferDesc.RefreshRate.Denominator = 1;
sdDesc.BufferDesc.Width                   = m_pWindow->GetWidth();
    sdDesc.BufferDesc.Height                  = m_pWindow->GetHeight();
    sdDesc.BufferDesc.Format                  = DXGI_FORMAT_R8G8B8A8_UNORM;
    sdDesc.BufferCount                        = 1;
sdDesc.BufferUsage                        = DXGI_USAGE_RENDER_TARGET_OUTPUT;
    sdDesc.SampleDesc.Count                   = 1;
    sdDesc.SampleDesc.Quality                 = 0;
    sdDesc.OutputWindow                       = m_pWindow->GetHwnd();
    sdDesc.Windowed                           = TRUE;
    
for( UINT driverTypeIndex = 0; driverTypeIndex < numDriverTypes; driverTypeIndex++ )
    {
        m_pDriverType = driverTypes[driverTypeIndex];
hr = D3D11CreateDeviceAndSwapChain( NULL, m_pDriverType, NULL, NULL, featureLevel, num,
D3D11_SDK_VERSION, &sdDesc, &m_pSwapChain, &m_pDevice, &m_pFeatureLevel, &m_pDeviceContext );
        if( SUCCEEDED( hr ) )
            break;
    }
if(FAILED(hr))
return FALSE;
 
    // Make and set D3D11 render target objects
    hr = m_pSwapChain->GetBuffer(0, __uuidof(*m_pRTTexture2D), reinterpret_cast<void**>(&m_pRTTexture2D));
    CHECK_FAILURE(hr, "Could not create render target texture.");
 
    hr = m_pDevice->CreateRenderTargetView(m_pRTTexture2D, NULL, &m_pRTView);
    CHECK_FAILURE(hr, "Could not create render target view.");
 
 
    // Make and set depth stencil
    D3D11_TEXTURE2D_DESC dsTexDesc;
ZeroMemory(&dsTexDesc, sizeof(dsTexDesc));
    dsTexDesc.Width              = m_pWindow->GetWidth();
    dsTexDesc.Height             = m_pWindow->GetHeight();
    dsTexDesc.MipLevels          = 1;
    dsTexDesc.ArraySize          = 1;
    dsTexDesc.Format             = DXGI_FORMAT_D32_FLOAT;
    dsTexDesc.SampleDesc.Count   = 1;
    dsTexDesc.SampleDesc.Quality = 0;
    dsTexDesc.Usage              = D3D11_USAGE_DEFAULT;
    dsTexDesc.BindFlags          = D3D11_BIND_DEPTH_STENCIL;
    dsTexDesc.CPUAccessFlags     = 0;
    dsTexDesc.MiscFlags          = 0;
    hr = m_pDevice->CreateTexture2D(&dsTexDesc, NULL, &m_pDSTexture2D);
    CHECK_FAILURE(hr, "Could not create depth stencil texture.");
 
hr = m_pDevice->CreateDepthStencilView(m_pDSTexture2D, NULL, &m_pDSView);
    CHECK_FAILURE(hr, "Could not create depth stencil view.");
 
m_pDeviceContext->OMSetRenderTargets(1, &m_pRTView, m_pDSView);
 
// Set depth stencil state
    D3D11_DEPTH_STENCIL_DESC depthStencilDesc;
ZeroMemory(&depthStencilDesc, sizeof(depthStencilDesc));
depthStencilDesc.DepthEnable                  = TRUE;
    depthStencilDesc.DepthWriteMask               = D3D11_DEPTH_WRITE_MASK_ALL;
    depthStencilDesc.DepthFunc                    = D3D11_COMPARISON_ALWAYS;
    depthStencilDesc.StencilEnable                = FALSE;
    hr = m_pDevice->CreateDepthStencilState(&depthStencilDesc, &m_pDSState);
    CHECK_FAILURE(hr, "Could not create depth stencil state.");
    m_pDeviceContext->OMSetDepthStencilState(m_pDSState,0);
 
    // Setup the viewport
    D3D11_VIEWPORT vp;
ZeroMemory(&vp, sizeof(vp));
    vp.Width = m_pWindow->GetWidth();
    vp.Height =m_pWindow->GetHeight();
    vp.MinDepth = 0.0f;
    vp.MaxDepth = 1.0f;
    vp.TopLeftX = 0;
    vp.TopLeftY = 0;
    m_pDeviceContext->RSSetViewports( 1, &vp );
 
// Compile the vertex shader
    ID3DBlob* pVSBlob = NULL;
    hr = CompileShaderFromFile( L"Main.fx", "VS", "vs_4_0", &pVSBlob );
    CHECK_FAILURE(hr, "Could not Compile Vertex Shader");
 
// Create the vertex shader
hr = m_pDevice->CreateVertexShader( pVSBlob->GetBufferPointer(), pVSBlob->GetBufferSize(), NULL, &m_pVertexShader );
CHECK_FAILURE(hr, "Could not Create Vertex Shader");
 
    // Define the input layout
    D3D11_INPUT_ELEMENT_DESC layout[] =
    {
        { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
        { "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
    };
    UINT numElements = sizeof( layout ) / sizeof( layout[0] );
 
    // Create the input layout
hr = m_pDevice->CreateInputLayout( layout, numElements, pVSBlob->GetBufferPointer(),
                                          pVSBlob->GetBufferSize(), &m_pVertexLayout );
pVSBlob->Release();
CHECK_FAILURE(hr, "Could not create ");
 
    // Set the input layout
    m_pDeviceContext->IASetInputLayout( m_pVertexLayout );
 
// Compile the pixel shader
ID3DBlob* pPSBlob = NULL;
    hr = CompileShaderFromFile( L"Main.fx", "PS", "ps_4_0", &pPSBlob );
    CHECK_FAILURE(hr, "Could not Compile Pixel Shader");
 
// Create the pixel shader
hr = m_pDevice->CreatePixelShader( pPSBlob->GetBufferPointer(), pPSBlob->GetBufferSize(), NULL, &m_pPixelShader);
pPSBlob->Release();
    CHECK_FAILURE(hr, "Could not create Pixel Shader");
 
    // Create vertex buffer
    SimpleVertex vertices[] =
    {
{ XMFLOAT3( -1.0f, 1.0f, 0.0f ), XMFLOAT4( 1.0f, 0.0f, 0.0f, 1.0f ) },
        { XMFLOAT3( 1.0f, 1.0f, 0.0f ), XMFLOAT4( 1.0f, 0.0f, 0.0f, 1.0f ) },
        { XMFLOAT3( -1.0f,-1.0f, 1.0f ), XMFLOAT4( 1.0f, 0.0f, 0.0f, 1.0f ) },
        { XMFLOAT3( -1.0f, -1.0f, 1.0f ), XMFLOAT4( 1.0f, 0.0f, 0.0f, 1.0f ) },
        { XMFLOAT3( 1.0f, 1.0f, 0.0f ), XMFLOAT4( 1.0f, 0.0f, 0.0f, 1.0f ) },
        { XMFLOAT3( 1.0f,-1.0f, 1.0f ), XMFLOAT4( 1.0f, 0.0f, 0.0f, 1.0f ) },
 
        { XMFLOAT3( -1.0f, 1.0f, 0.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
        { XMFLOAT3( 1.0f, 1.0f, 0.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
        { XMFLOAT3( -1.0f,-1.0f, 1.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
        { XMFLOAT3( -1.0f, -1.0f, 1.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
        { XMFLOAT3( 1.0f, 1.0f, 0.0f ), XMFLOAT4( 1.0f, 1.0f, 0.0f, 1.0f ) },
        { XMFLOAT3( 1.0f,-1.0f, 1.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
    };
 
    D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
    bd.Usage = D3D11_USAGE_DEFAULT;
    bd.ByteWidth = sizeof( SimpleVertex ) *12;
    bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
    bd.CPUAccessFlags = 0;
    bd.MiscFlags = 0;
 
    D3D11_SUBRESOURCE_DATA InitData;
    InitData.pSysMem = vertices;
    hr = m_pDevice->CreateBuffer( &bd, &InitData, &m_pVertexBuffer );
    CHECK_FAILURE(hr, "Could not create Vertex Buffer.");
 
    // Set vertex buffer
    UINT stride = sizeof( SimpleVertex );
    UINT offset = 0;
    m_pDeviceContext->IASetVertexBuffers( 0, 1, &m_pVertexBuffer, &stride, &offset );
 
    // Set primitive topology
    m_pDeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
 
 
D3D11_RASTERIZER_DESC RSdesc;
ZeroMemory(&RSdesc, sizeof(RSdesc));
RSdesc.CullMode=D3D11_CULL_NONE;
RSdesc.MultisampleEnable=FALSE;
RSdesc.AntialiasedLineEnable=FALSE;
RSdesc.DepthClipEnable=TRUE;
RSdesc.FillMode=D3D11_FILL_SOLID;
RSdesc.FrontCounterClockwise=FALSE;
RSdesc.ScissorEnable=FALSE;
 
m_pDevice->CreateRasterizerState(&RSdesc, &m_pRasterStateNoCulling);  
m_pDeviceContext->RSSetState(m_pRasterStateNoCulling);
 
//Create TExture for Staging resource
    D3D11_TEXTURE2D_DESC descStaging;
ZeroMemory(&descStaging, sizeof(descStaging));
    descStaging.Width = m_pWindow->GetWidth();;
    descStaging.Height= m_pWindow->GetHeight();;
    descStaging.MipLevels = 1;
    descStaging.ArraySize = 1;
    descStaging.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
    descStaging.SampleDesc.Count = 1;
    descStaging.SampleDesc.Quality = 0;
    descStaging.Usage = D3D11_USAGE_STAGING;
    descStaging.BindFlags = 0;
    descStaging.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
    descStaging.MiscFlags = 0;
    hr=m_pDevice->CreateTexture2D(&descStaging,NULL,&m_sRTTexture2D);
    CHECK_FAILURE(hr, "Could not create texture for staging resource.");
return true;
}
 
//--------------------------------------------------------------------------------------
// Helper for compiling shaders with D3DX11
//--------------------------------------------------------------------------------------
HRESULT D3DRenderer::CompileShaderFromFile( WCHAR* szFileName, LPCSTR szEntryPoint, LPCSTR szShaderModel, ID3DBlob** ppBlobOut )
{
    HRESULT hr = S_OK;
 
    DWORD dwShaderFlags = D3DCOMPILE_ENABLE_STRICTNESS;
#if defined( DEBUG ) || defined( _DEBUG )
    // Set the D3DCOMPILE_DEBUG flag to embed debug information in the shaders.
    // Setting this flag improves the shader debugging experience, but still allows 
    // the shaders to be optimized and to run exactly the way they will run in 
    // the release configuration of this program.
    dwShaderFlags |= D3DCOMPILE_DEBUG;
#endif
 
    ID3DBlob* pErrorBlob;
    hr = D3DCompileFromFile(szFileName, NULL, NULL, szEntryPoint, szShaderModel, 
        dwShaderFlags, 0, ppBlobOut, &pErrorBlob);
    if( FAILED(hr) )
    {
        if( pErrorBlob != NULL )
            OutputDebugStringA( (char*)pErrorBlob->GetBufferPointer() );
        if( pErrorBlob ) pErrorBlob->Release();
        return hr;
    }
    if( pErrorBlob ) pErrorBlob->Release();
 
    return S_OK;
}

Run(TestCase* pTest)
{
    strcpy_s(m_lastError, "");
    ClearRenderTargetView();
    ClearDepthStencilView();
 
    // Render before NVAPI test enabled
m_pDeviceContext->VSSetShader( m_pVertexShader, NULL, 0 );
m_pDeviceContext->PSSetShader( m_pPixelShader, NULL, 0 );
    m_pDeviceContext->Draw( 6, 0 );

}

Share this post


Link to post
Share on other sites

You will not get many responses with a post like this.  In general, you should try to describe the problem more thoroughly (and not only in the subject line...).  What have you tried, and what symptoms do you see?  Where did your code come from - was it based on a known working program, or is it completely new code?

 

Information like this will help others help you.

Share this post


Link to post
Share on other sites

I tried rendering some geometry on to the back buffer, with enabling Z testing. but I found that "ClearRenderTargetView()" is clearing the render target(Backbuffer), but draw call was not successful. I meant primitives are not drawing onto the backbuffer.  

Share this post


Link to post
Share on other sites

I tried rendering some geometry on to the back buffer, with enabling Z testing. but I found that "ClearRenderTargetView()" is clearing the render target(Backbuffer), but draw call was not successful. I meant primitives are not drawing onto the backbuffer.  

even I disabled the Z testing, still I was getting the same results.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Announcements

  • Forum Statistics

    • Total Topics
      628337
    • Total Posts
      2982164
  • Similar Content

    • By GalacticCrew
      Hello,
      I want to improve the performance of my game (engine) and some of your helped me to make a GPU Profiler. After creating the GPU Profiler, I started to measure the time my GPU needs per frame. I refined my GPU time measurements to find my bottleneck.
      Searching the bottleneck
      Rendering a small scene in an Idle state takes around 15.38 ms per frame. 13.54 ms (88.04%) are spent while rendering the scene, 1.57 ms (10.22%) are spent during the SwapChain.Present call (no VSync!) and the rest is spent on other tasks like rendering the UI. I further investigated the scene rendering, since it takes über 88% of my GPU frame rendering time.
      When rendering my scene, most of the time (80.97%) is spent rendering my models. The rest is spent to render the background/skybox, updating animation data, updating pixel shader constant buffer, etc. It wasn't really suprising that most of the time is spent for my models, so I further refined my measurements to find the actual bottleneck.
      In my example scene, I have five animated NPCs. When rendering these NPCs, most actions are almost for free. Setting the proper shaders in the input layout (0.11%), updating vertex shader constant buffers (0.32%), setting textures (0.24%) and setting vertex and index buffers (0.28%). However, the rest of the GPU time (99.05% !!) is spent in two function calls: DrawIndexed and DrawIndexedInstance.
      I searched this forum and the web for other articles and threads about these functions, but I haven't found a lot of useful information. I use SharpDX and .NET Framework 4.5 to develop my game (engine). The developer of SharpDX said, that "The method DrawIndexed in SharpDX is a direct call to DirectX" (Source). DirectX 11 is widely used and SharpDX is "only" a wrapper for DirectX functions, I assume the problem is in my code.
      How I render my scene
      When rendering my scene, I render one model after another. Each model has one or more parts and one or more positions. For example, a human model has parts like head, hands, legs, torso, etc. and may be placed in different locations (on the couch, on a street, ...). For static elements like furniture, houses, etc. I use instancing, because the positions never change at run-time. Dynamic models like humans and monster don't use instancing, because positions change over time.
      When rendering a model, I use this work-flow:
      Set vertex and pixel shaders, if they need to be updated (e.g. PBR shaders, simple shader, depth info shaders, ...) Set animation data as constant buffer in the vertex shader, if the model is animated Set generic vertex shader constant buffer (world matrix, etc.) Render all parts of the model. For each part: Set diffuse, normal, specular and emissive texture shader views Set vertex buffer Set index buffer Call DrawIndexedInstanced for instanced models and DrawIndexed models What's the problem
      After my GPU profiling, I know that over 99% of the rendering time for a single model is spent in the DrawIndexedInstanced and DrawIndexed function calls. But why do they take so long? Do I have to try to optimize my vertex or pixel shaders? I do not use other types of shaders at the moment. "Le Comte du Merde-fou" suggested in this post to merge regions of vertices to larger vertex buffers to reduce the number of Draw calls. While this makes sense to me, it does not explain why rendering my five (!) animated models takes that much GPU time. To make sure I don't analyse something I wrong, I made sure to not use the D3D11_CREATE_DEVICE_DEBUG flag and to run as Release version in Visual Studio as suggested by Hodgman in this forum thread.
      My engine does its job. Multi-texturing, animation, soft shadowing, instancing, etc. are all implemented, but I need to reduce the GPU load for performance reasons. Each frame takes less than 3ms CPU time by the way. So the problem is on the GPU side, I believe.
    • By noodleBowl
      I was wondering if someone could explain this to me
      I'm working on using the windows WIC apis to load in textures for DirectX 11. I see that sometimes the WIC Pixel Formats do not directly match a DXGI Format that is used in DirectX. I see that in cases like this the original WIC Pixel Format is converted into a WIC Pixel Format that does directly match a DXGI Format. And doing this conversion is easy, but I do not understand the reason behind 2 of the WIC Pixel Formats that are converted based on Microsoft's guide
      I was wondering if someone could tell me why Microsoft's guide on this topic says that GUID_WICPixelFormat40bppCMYKAlpha should be converted into GUID_WICPixelFormat64bppRGBA and why GUID_WICPixelFormat80bppCMYKAlpha should be converted into GUID_WICPixelFormat64bppRGBA
      In one case I would think that: 
      GUID_WICPixelFormat40bppCMYKAlpha would convert to GUID_WICPixelFormat32bppRGBA and that GUID_WICPixelFormat80bppCMYKAlpha would convert to GUID_WICPixelFormat64bppRGBA, because the black channel (k) values would get readded / "swallowed" into into the CMY channels
      In the second case I would think that:
      GUID_WICPixelFormat40bppCMYKAlpha would convert to GUID_WICPixelFormat64bppRGBA and that GUID_WICPixelFormat80bppCMYKAlpha would convert to GUID_WICPixelFormat128bppRGBA, because the black channel (k) bits would get redistributed amongst the remaining 4 channels (CYMA) and those "new bits" added to those channels would fit in the GUID_WICPixelFormat64bppRGBA and GUID_WICPixelFormat128bppRGBA formats. But also seeing as there is no GUID_WICPixelFormat128bppRGBA format this case is kind of null and void
      I basically do not understand why Microsoft says GUID_WICPixelFormat40bppCMYKAlpha and GUID_WICPixelFormat80bppCMYKAlpha should convert to GUID_WICPixelFormat64bppRGBA in the end
       
    • By DejayHextrix
      Hi, New here. 
      I need some help. My fiance and I like to play this mobile game online that goes by real time. Her and I are always working but when we have free time we like to play this game. We don't always got time throughout the day to Queue Buildings, troops, Upgrades....etc.... 
      I was told to look into DLL Injection and OpenGL/DirectX Hooking. Is this true? Is this what I need to learn? 
      How do I read the Android files, or modify the files, or get the in-game tags/variables for the game I want? 
      Any assistance on this would be most appreciated. I been everywhere and seems no one knows or is to lazy to help me out. It would be nice to have assistance for once. I don't know what I need to learn. 
      So links of topics I need to learn within the comment section would be SOOOOO.....Helpful. Anything to just get me started. 
      Thanks, 
      Dejay Hextrix 
    • By GalacticCrew
      In some situations, my game starts to "lag" on older computers. I wanted to search for bottlenecks and optimize my game by searching for flaws in the shaders and in the layer between CPU and GPU. My first step was to measure the time my render function needs to solve its tasks. Every second I wrote the accumulated times of each task into my console window. Each second it takes around
      170ms to call render functions for all models (including settings shader resources, updating constant buffers, drawing all indexed and non-indexed vertices, etc.) 40ms to render the UI 790ms to call SwapChain.Present <1ms to do the rest (updating structures, etc.) In my Swap Chain description I set a frame rate of 60 Hz, if it's supported by the computer. It made sense for me that the Present function waits some time until it starts the next frame. However, I wanted to check, if this might be a problem for me. After a web search I found articles like this one, which states 
      My drivers are up-to-date so that's no issue. I installed Microsoft's PIX, but I was unable to use it. I could configure my game for x64, but PIX is not able to process DirectX 11.. After getting only error messages, I installed NVIDIA's NSight. After adjusting my game and installing all components, I couldn't get a proper result, because my game freezes after a few frames. I haven't figured out why. There is no exception or error message and other debug mechanisms like log messages and break points tell me the game freezes at the end of the render function after a few frames. So, I looked for another profiling tool and found Jeremy's GPUProfiler. However, the information returned by this tool is too basic to get an in-depth knowledge about my performance issues.
      Can anyone recommend a GPU Profiler or any other tool that might help me to find bottlenecks in my game and or that is able to indicate performance problems in my shaders? My custom graphics engine can handle subjects like multi-texturing, instancing, soft shadowing, animation, etc. However, I am pretty sure, there are things I can optimize!
      I am using SharpDX to develop a game (engine) based on DirectX 11 with .NET Framework 4.5. My graphics cards is from NVIDIA and my processor is made by Intel.
    • By GreenGodDiary
      SOLVED: I had written 
      Dispatch(32, 24, 0) instead of
      Dispatch(32, 24, 1)  
       
      I'm attempting to implement some basic post-processing in my "engine" and the HLSL part of the Compute Shader and such I think I've understood, however I'm at a loss at how to actually get/use it's output for rendering to the screen.
      Assume I'm doing something to a UAV in my CS:
      RWTexture2D<float4> InputOutputMap : register(u0); I want that texture to essentially "be" the backbuffer.
       
      I'm pretty certain I'm doing something wrong when I create the views (what I think I'm doing is having the backbuffer be bound as render target aswell as UAV and then using it in my CS):
       
      DXGI_SWAP_CHAIN_DESC scd; ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC)); scd.BufferCount = 1; scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT | DXGI_USAGE_SHADER_INPUT | DXGI_USAGE_UNORDERED_ACCESS; scd.OutputWindow = wndHandle; scd.SampleDesc.Count = 1; scd.Windowed = TRUE; HRESULT hr = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL, D3D11_SDK_VERSION, &scd, &gSwapChain, &gDevice, NULL, &gDeviceContext); // get the address of the back buffer ID3D11Texture2D* pBackBuffer = nullptr; gSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&pBackBuffer); // use the back buffer address to create the render target gDevice->CreateRenderTargetView(pBackBuffer, NULL, &gBackbufferRTV); // set the render target as the back buffer CreateDepthStencilBuffer(); gDeviceContext->OMSetRenderTargets(1, &gBackbufferRTV, depthStencilView); //UAV for compute shader D3D11_UNORDERED_ACCESS_VIEW_DESC uavd; ZeroMemory(&uavd, sizeof(uavd)); uavd.Format = DXGI_FORMAT_R8G8B8A8_UNORM; uavd.ViewDimension = D3D11_UAV_DIMENSION_TEXTURE2D; uavd.Texture2D.MipSlice = 1; gDevice->CreateUnorderedAccessView(pBackBuffer, &uavd, &gUAV); pBackBuffer->Release();  
      After I render the scene, I dispatch like this:
      gDeviceContext->OMSetRenderTargets(0, NULL, NULL); m_vShaders["cs1"]->Bind(); gDeviceContext->CSSetUnorderedAccessViews(0, 1, &gUAV, 0); gDeviceContext->Dispatch(32, 24, 0); //hard coded ID3D11UnorderedAccessView* nullview = { nullptr }; gDeviceContext->CSSetUnorderedAccessViews(0, 1, &nullview, 0); gDeviceContext->OMSetRenderTargets(1, &gBackbufferRTV, depthStencilView); gSwapChain->Present(0, 0); Worth noting is the scene is rendered as usual, but I dont get any results from the CS (simple gaussian blur)
      I'm sure it's something fairly basic I'm doing wrong, perhaps my understanding of render targets / views / what have you is just completely wrong and my approach just makes no sense.

      If someone with more experience could point me in the right direction I would really appreciate it!

      On a side note, I'd really like to learn more about this kind of stuff. I can really see the potential of the CS aswell as rendering to textures and using them for whatever in the engine so I would love it if you know some good resources I can read about this!

      Thank you <3
       
      P.S I excluded the .hlsl since I cant imagine that being the issue, but if you think you need it to help me just ask

      P:P:S. As you can see this is my first post however I do have another account, but I can't log in with it because gamedev.net just keeps asking me to accept terms and then logs me out when I do over and over
  • Popular Now