• Advertisement

Search the Community

Showing results for tags 'DX11' in content posted in Graphics and GPU Programming.



More search options

  • Search By Tags

    Type tags separated by commas.
  • Search By Author

Content Type


Categories

  • Audio
    • Music and Sound FX
  • Business
    • Business and Law
    • Career Development
    • Production and Management
  • Game Design
    • Game Design and Theory
    • Writing for Games
    • UX for Games
  • Industry
    • Interviews
    • Event Coverage
  • Programming
    • Artificial Intelligence
    • General and Gameplay Programming
    • Graphics and GPU Programming
    • Engines and Middleware
    • Math and Physics
    • Networking and Multiplayer
  • Visual Arts
  • Archive

Categories

  • News

Categories

  • Audio
  • Visual Arts
  • Programming
  • Writing

Categories

  • GameDev Unboxed

Categories

  • Game Dev Loadout

Categories

  • Game Developers Conference
    • GDC 2017

Forums

  • Audio
    • Music and Sound FX
  • Business
    • Games Career Development
    • Production and Management
    • Games Business and Law
  • Game Design
    • Game Design and Theory
    • Writing for Games
  • Programming
    • Artificial Intelligence
    • Engines and Middleware
    • General and Gameplay Programming
    • Graphics and GPU Programming
    • Math and Physics
    • Networking and Multiplayer
  • Visual Arts
    • 2D and 3D Art
    • Critique and Feedback
  • Topical
    • Virtual and Augmented Reality
    • News
  • Community
    • GameDev Challenges
    • For Beginners
    • GDNet+ Member Forum
    • GDNet Lounge
    • GDNet Comments, Suggestions, and Ideas
    • Coding Horrors
    • Your Announcements
    • Hobby Project Classifieds
    • Indie Showcase
    • Article Writing
  • Affiliates
    • NeHe Productions
    • AngelCode
  • Workshops
    • C# Workshop
    • CPP Workshop
    • Freehand Drawing Workshop
    • Hands-On Interactive Game Development
    • SICP Workshop
    • XNA 4.0 Workshop
  • Archive
    • Topical
    • Affiliates
    • Contests
    • Technical

Calendars

  • Community Calendar
  • Games Industry Events
  • Game Jams

Blogs

There are no results to display.

There are no results to display.

Marker Groups

  • Members

Developers

Developers


Group


About Me


Website


Industry Role


Twitter


Github


Twitch


Steam

Found 1418 results

  1. I'm trying to duplicate vertices using std::map to be used in a vertex buffer. I don't get the correct index buffer(myInds) or vertex buffer(myVerts). I can get the index array from FBX but it differs from what I get in the following std::map code. Any help is much appreciated. struct FBXVTX { XMFLOAT3 Position; XMFLOAT2 TextureCoord; XMFLOAT3 Normal; }; std::map< FBXVTX, int > myVertsMap; std::vector<FBXVTX> myVerts; std::vector<int> myInds; HRESULT FBXLoader::Open(HWND hWnd, char* Filename, bool UsePositionOnly) { HRESULT hr = S_OK; if (FBXM) { FBXIOS = FbxIOSettings::Create(FBXM, IOSROOT); FBXM->SetIOSettings(FBXIOS); FBXI = FbxImporter::Create(FBXM, ""); if (!(FBXI->Initialize(Filename, -1, FBXIOS))) { hr = E_FAIL; MessageBox(hWnd, (wchar_t*)FBXI->GetStatus().GetErrorString(), TEXT("ALM"), MB_OK); } FBXS = FbxScene::Create(FBXM, "REALMS"); if (!FBXS) { hr = E_FAIL; MessageBox(hWnd, TEXT("Failed to create the scene"), TEXT("ALM"), MB_OK); } if (!(FBXI->Import(FBXS))) { hr = E_FAIL; MessageBox(hWnd, TEXT("Failed to import fbx file content into the scene"), TEXT("ALM"), MB_OK); } FbxAxisSystem OurAxisSystem = FbxAxisSystem::DirectX; FbxAxisSystem SceneAxisSystem = FBXS->GetGlobalSettings().GetAxisSystem(); if(SceneAxisSystem != OurAxisSystem) { FbxAxisSystem::DirectX.ConvertScene(FBXS); } FbxSystemUnit SceneSystemUnit = FBXS->GetGlobalSettings().GetSystemUnit(); if( SceneSystemUnit.GetScaleFactor() != 1.0 ) { FbxSystemUnit::cm.ConvertScene( FBXS ); } if (FBXI) FBXI->Destroy(); FbxNode* MainNode = FBXS->GetRootNode(); int NumKids = MainNode->GetChildCount(); FbxNode* ChildNode = NULL; for (int i=0; i<NumKids; i++) { ChildNode = MainNode->GetChild(i); FbxNodeAttribute* NodeAttribute = ChildNode->GetNodeAttribute(); if (NodeAttribute->GetAttributeType() == FbxNodeAttribute::eMesh) { FbxMesh* Mesh = ChildNode->GetMesh(); if (UsePositionOnly) { NumVertices = Mesh->GetControlPointsCount();//number of vertices MyV = new XMFLOAT3[NumVertices]; for (DWORD j = 0; j < NumVertices; j++) { FbxVector4 Vertex = Mesh->GetControlPointAt(j);//Gets the control point at the specified index. MyV[j] = XMFLOAT3((float)Vertex.mData[0], (float)Vertex.mData[1], (float)Vertex.mData[2]); } NumIndices = Mesh->GetPolygonVertexCount();//number of indices MyI = (DWORD*)Mesh->GetPolygonVertices();//index array } else { FbxLayerElementArrayTemplate<FbxVector2>* uvVertices = NULL; Mesh->GetTextureUV(&uvVertices); int idx = 0; for (int i = 0; i < Mesh->GetPolygonCount(); i++)//polygon(=mostly triangle) count { for (int j = 0; j < Mesh->GetPolygonSize(i); j++)//retrieves number of vertices in a polygon { FBXVTX myVert; int p_index = 3*i+j; int t_index = Mesh->GetTextureUVIndex(i, j); FbxVector4 Vertex = Mesh->GetControlPointAt(p_index);//Gets the control point at the specified index. myVert.Position = XMFLOAT3((float)Vertex.mData[0], (float)Vertex.mData[1], (float)Vertex.mData[2]); FbxVector4 Normal; Mesh->GetPolygonVertexNormal(i, j, Normal); myVert.Normal = XMFLOAT3((float)Normal.mData[0], (float)Normal.mData[1], (float)Normal.mData[2]); FbxVector2 uv = uvVertices->GetAt(t_index); myVert.TextureCoord = XMFLOAT2((float)uv.mData[0], (float)uv.mData[1]); if ( myVertsMap.find( myVert ) != myVertsMap.end() ) myInds.push_back( myVertsMap[ myVert ]); else { myVertsMap.insert( std::pair<FBXVTX, int> (myVert, idx ) ); myVerts.push_back(myVert); myInds.push_back(idx); idx++; } } } } } } } else { hr = E_FAIL; MessageBox(hWnd, TEXT("Failed to create the FBX Manager"), TEXT("ALM"), MB_OK); } return hr; } bool operator < ( const FBXVTX &lValue, const FBXVTX &rValue) { if (lValue.Position.x != rValue.Position.x) return(lValue.Position.x < rValue.Position.x); if (lValue.Position.y != rValue.Position.y) return(lValue.Position.y < rValue.Position.y); if (lValue.Position.z != rValue.Position.z) return(lValue.Position.z < rValue.Position.z); if (lValue.TextureCoord.x != rValue.TextureCoord.x) return(lValue.TextureCoord.x < rValue.TextureCoord.x); if (lValue.TextureCoord.y != rValue.TextureCoord.y) return(lValue.TextureCoord.y < rValue.TextureCoord.y); if (lValue.Normal.x != rValue.Normal.x) return(lValue.Normal.x < rValue.Normal.x); if (lValue.Normal.y != rValue.Normal.y) return(lValue.Normal.y < rValue.Normal.y); return(lValue.Normal.z < rValue.Normal.z); }
  2. Hi, I've been trying to implement a gaussian blur recently, it would seem the best way to achieve this is by running a bur on one axis, then another blur on the other axis. I think I have successfully implemented the blur part per axis, but now I have to blend both calls with a proper BlendState, at least I think this is where my problem is. Here are my passes: RasterizerState DisableCulling { CullMode = BACK; }; BlendState AdditiveBlend { BlendEnable[0] = TRUE; BlendEnable[1] = TRUE; SrcBlend[0] = SRC_COLOR; BlendOp[0] = ADD; BlendOp[1] = ADD; SrcBlend[1] = SRC_COLOR; }; technique11 BlockTech { pass P0 { SetVertexShader(CompileShader(vs_5_0, VS())); SetGeometryShader(NULL); SetPixelShader(CompileShader(ps_5_0, PS_BlurV())); SetRasterizerState(DisableCulling); SetBlendState(AdditiveBlend, float4(0.0, 0.0, 0.0, 0.0), 0xffffffff); } pass P1 { SetVertexShader(CompileShader(vs_5_0, VS())); SetGeometryShader(NULL); SetPixelShader(CompileShader(ps_5_0, PS_BlurH())); SetRasterizerState(DisableCulling); } } D3DX11_TECHNIQUE_DESC techDesc; mBlockEffect->mTech->GetDesc( &techDesc ); for(UINT p = 0; p < techDesc.Passes; ++p) { deviceContext->IASetVertexBuffers(0, 2, bufferPointers, stride, offset); deviceContext->IASetIndexBuffer(mIB, DXGI_FORMAT_R32_UINT, 0); mBlockEffect->mTech->GetPassByIndex(p)->Apply(0, deviceContext); deviceContext->DrawIndexedInstanced(36, mNumberOfActiveCubes, 0, 0, 0); } No blur PS_BlurV PS_BlurH P0 + P1 As you can see, it does not work at all. I think the issue is in my BlendState, but I am not sure. I've seen many articles going with the render to texture approach, but I've also seen articles where both shaders were called in succession, and it worked just fine, I'd like to go with that second approach. Unfortunately, the code was in OpenGL where the syntax for running multiple passes is quite different (http://rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/). So I need some help doing the same in HLSL :-) Thanks!
  3. Hello! I would like to introduce Diligent Engine, a project that I've been recently working on. Diligent Engine is a light-weight cross-platform abstraction layer between the application and the platform-specific graphics API. Its main goal is to take advantages of the next-generation APIs such as Direct3D12 and Vulkan, but at the same time provide support for older platforms via Direct3D11, OpenGL and OpenGLES. Diligent Engine exposes common front-end for all supported platforms and provides interoperability with underlying native API. Shader source code converter allows shaders authored in HLSL to be translated to GLSL and used on all platforms. Diligent Engine supports integration with Unity and is designed to be used as a graphics subsystem in a standalone game engine, Unity native plugin or any other 3D application. It is distributed under Apache 2.0 license and is free to use. Full source code is available for download on GitHub. Features: True cross-platform Exact same client code for all supported platforms and rendering backends No #if defined(_WIN32) ... #elif defined(LINUX) ... #elif defined(ANDROID) ... No #if defined(D3D11) ... #elif defined(D3D12) ... #elif defined(OPENGL) ... Exact same HLSL shaders run on all platforms and all backends Modular design Components are clearly separated logically and physically and can be used as needed Only take what you need for your project (do not want to keep samples and tutorials in your codebase? Simply remove Samples submodule. Only need core functionality? Use only Core submodule) No 15000 lines-of-code files Clear object-based interface No global states Key graphics features: Automatic shader resource binding designed to leverage the next-generation rendering APIs Multithreaded command buffer generation 50,000 draw calls at 300 fps with D3D12 backend Descriptor, memory and resource state management Modern c++ features to make code fast and reliable The following platforms and low-level APIs are currently supported: Windows Desktop: Direct3D11, Direct3D12, OpenGL Universal Windows: Direct3D11, Direct3D12 Linux: OpenGL Android: OpenGLES MacOS: OpenGL iOS: OpenGLES API Basics Initialization The engine can perform initialization of the API or attach to already existing D3D11/D3D12 device or OpenGL/GLES context. For instance, the following code shows how the engine can be initialized in D3D12 mode: #include "RenderDeviceFactoryD3D12.h" using namespace Diligent; // ... GetEngineFactoryD3D12Type GetEngineFactoryD3D12 = nullptr; // Load the dll and import GetEngineFactoryD3D12() function LoadGraphicsEngineD3D12(GetEngineFactoryD3D12); auto *pFactoryD3D11 = GetEngineFactoryD3D12(); EngineD3D12Attribs EngD3D12Attribs; EngD3D12Attribs.CPUDescriptorHeapAllocationSize[0] = 1024; EngD3D12Attribs.CPUDescriptorHeapAllocationSize[1] = 32; EngD3D12Attribs.CPUDescriptorHeapAllocationSize[2] = 16; EngD3D12Attribs.CPUDescriptorHeapAllocationSize[3] = 16; EngD3D12Attribs.NumCommandsToFlushCmdList = 64; RefCntAutoPtr<IRenderDevice> pRenderDevice; RefCntAutoPtr<IDeviceContext> pImmediateContext; SwapChainDesc SwapChainDesc; RefCntAutoPtr<ISwapChain> pSwapChain; pFactoryD3D11->CreateDeviceAndContextsD3D12( EngD3D12Attribs, &pRenderDevice, &pImmediateContext, 0 ); pFactoryD3D11->CreateSwapChainD3D12( pRenderDevice, pImmediateContext, SwapChainDesc, hWnd, &pSwapChain ); Creating Resources Device resources are created by the render device. The two main resource types are buffers, which represent linear memory, and textures, which use memory layouts optimized for fast filtering. To create a buffer, you need to populate BufferDesc structure and call IRenderDevice::CreateBuffer(). The following code creates a uniform (constant) buffer: BufferDesc BuffDesc; BufferDesc.Name = "Uniform buffer"; BuffDesc.BindFlags = BIND_UNIFORM_BUFFER; BuffDesc.Usage = USAGE_DYNAMIC; BuffDesc.uiSizeInBytes = sizeof(ShaderConstants); BuffDesc.CPUAccessFlags = CPU_ACCESS_WRITE; m_pDevice->CreateBuffer( BuffDesc, BufferData(), &m_pConstantBuffer ); Similar, to create a texture, populate TextureDesc structure and call IRenderDevice::CreateTexture() as in the following example: TextureDesc TexDesc; TexDesc.Name = "My texture 2D"; TexDesc.Type = TEXTURE_TYPE_2D; TexDesc.Width = 1024; TexDesc.Height = 1024; TexDesc.Format = TEX_FORMAT_RGBA8_UNORM; TexDesc.Usage = USAGE_DEFAULT; TexDesc.BindFlags = BIND_SHADER_RESOURCE | BIND_RENDER_TARGET | BIND_UNORDERED_ACCESS; TexDesc.Name = "Sample 2D Texture"; m_pRenderDevice->CreateTexture( TexDesc, TextureData(), &m_pTestTex ); Initializing Pipeline State Diligent Engine follows Direct3D12 style to configure the graphics/compute pipeline. One big Pipelines State Object (PSO) encompasses all required states (all shader stages, input layout description, depth stencil, rasterizer and blend state descriptions etc.) Creating Shaders To create a shader, populate ShaderCreationAttribs structure. An important member is ShaderCreationAttribs::SourceLanguage. The following are valid values for this member: SHADER_SOURCE_LANGUAGE_DEFAULT - The shader source format matches the underlying graphics API: HLSL for D3D11 or D3D12 mode, and GLSL for OpenGL and OpenGLES modes. SHADER_SOURCE_LANGUAGE_HLSL - The shader source is in HLSL. For OpenGL and OpenGLES modes, the source code will be converted to GLSL. See shader converter for details. SHADER_SOURCE_LANGUAGE_GLSL - The shader source is in GLSL. There is currently no GLSL to HLSL converter. To allow grouping of resources based on the frequency of expected change, Diligent Engine introduces classification of shader variables: Static variables (SHADER_VARIABLE_TYPE_STATIC) are variables that are expected to be set only once. They may not be changed once a resource is bound to the variable. Such variables are intended to hold global constants such as camera attributes or global light attributes constant buffers. Mutable variables (SHADER_VARIABLE_TYPE_MUTABLE) define resources that are expected to change on a per-material frequency. Examples may include diffuse textures, normal maps etc. Dynamic variables (SHADER_VARIABLE_TYPE_DYNAMIC) are expected to change frequently and randomly. This post describes the resource binding model in Diligent Engine. The following is an example of shader initialization: ShaderCreationAttribs Attrs; Attrs.Desc.Name = "MyPixelShader"; Attrs.FilePath = "MyShaderFile.fx"; Attrs.SearchDirectories = "shaders;shaders\\inc;"; Attrs.EntryPoint = "MyPixelShader"; Attrs.Desc.ShaderType = SHADER_TYPE_PIXEL; Attrs.SourceLanguage = SHADER_SOURCE_LANGUAGE_HLSL; BasicShaderSourceStreamFactory BasicSSSFactory(Attrs.SearchDirectories); Attrs.pShaderSourceStreamFactory = &BasicSSSFactory; ShaderVariableDesc ShaderVars[] = { {"g_StaticTexture", SHADER_VARIABLE_TYPE_STATIC}, {"g_MutableTexture", SHADER_VARIABLE_TYPE_MUTABLE}, {"g_DynamicTexture", SHADER_VARIABLE_TYPE_DYNAMIC} }; Attrs.Desc.VariableDesc = ShaderVars; Attrs.Desc.NumVariables = _countof(ShaderVars); Attrs.Desc.DefaultVariableType = SHADER_VARIABLE_TYPE_STATIC; StaticSamplerDesc StaticSampler; StaticSampler.Desc.MinFilter = FILTER_TYPE_LINEAR; StaticSampler.Desc.MagFilter = FILTER_TYPE_LINEAR; StaticSampler.Desc.MipFilter = FILTER_TYPE_LINEAR; StaticSampler.TextureName = "g_MutableTexture"; Attrs.Desc.NumStaticSamplers = 1; Attrs.Desc.StaticSamplers = &StaticSampler; ShaderMacroHelper Macros; Macros.AddShaderMacro("USE_SHADOWS", 1); Macros.AddShaderMacro("NUM_SHADOW_SAMPLES", 4); Macros.Finalize(); Attrs.Macros = Macros; RefCntAutoPtr<IShader> pShader; m_pDevice->CreateShader( Attrs, &pShader ); Creating the Pipeline State Object To create a pipeline state object, define instance of PipelineStateDesc structure. The structure defines the pipeline specifics such as if the pipeline is a compute pipeline, number and format of render targets as well as depth-stencil format: // This is a graphics pipeline PSODesc.IsComputePipeline = false; PSODesc.GraphicsPipeline.NumRenderTargets = 1; PSODesc.GraphicsPipeline.RTVFormats[0] = TEX_FORMAT_RGBA8_UNORM_SRGB; PSODesc.GraphicsPipeline.DSVFormat = TEX_FORMAT_D32_FLOAT; The structure also defines depth-stencil, rasterizer, blend state, input layout and other parameters. For instance, rasterizer state can be defined as in the code snippet below: // Init rasterizer state RasterizerStateDesc &RasterizerDesc = PSODesc.GraphicsPipeline.RasterizerDesc; RasterizerDesc.FillMode = FILL_MODE_SOLID; RasterizerDesc.CullMode = CULL_MODE_NONE; RasterizerDesc.FrontCounterClockwise = True; RasterizerDesc.ScissorEnable = True; //RSDesc.MultisampleEnable = false; // do not allow msaa (fonts would be degraded) RasterizerDesc.AntialiasedLineEnable = False; When all fields are populated, call IRenderDevice::CreatePipelineState() to create the PSO: m_pDev->CreatePipelineState(PSODesc, &m_pPSO); Binding Shader Resources Shader resource binding in Diligent Engine is based on grouping variables in 3 different groups (static, mutable and dynamic). Static variables are variables that are expected to be set only once. They may not be changed once a resource is bound to the variable. Such variables are intended to hold global constants such as camera attributes or global light attributes constant buffers. They are bound directly to the shader object: PixelShader->GetShaderVariable( "g_tex2DShadowMap" )->Set( pShadowMapSRV ); Mutable and dynamic variables are bound via a new object called Shader Resource Binding (SRB), which is created by the pipeline state: m_pPSO->CreateShaderResourceBinding(&m_pSRB); Dynamic and mutable resources are then bound through SRB object: m_pSRB->GetVariable(SHADER_TYPE_VERTEX, "tex2DDiffuse")->Set(pDiffuseTexSRV); m_pSRB->GetVariable(SHADER_TYPE_VERTEX, "cbRandomAttribs")->Set(pRandomAttrsCB); The difference between mutable and dynamic resources is that mutable ones can only be set once for every instance of a shader resource binding. Dynamic resources can be set multiple times. It is important to properly set the variable type as this may affect performance. Static variables are generally most efficient, followed by mutable. Dynamic variables are most expensive from performance point of view. This post explains shader resource binding in more details. Setting the Pipeline State and Invoking Draw Command Before any draw command can be invoked, all required vertex and index buffers as well as the pipeline state should be bound to the device context: // Clear render target const float zero[4] = {0, 0, 0, 0}; m_pContext->ClearRenderTarget(nullptr, zero); // Set vertex and index buffers IBuffer *buffer[] = {m_pVertexBuffer}; Uint32 offsets[] = {0}; Uint32 strides[] = {sizeof(MyVertex)}; m_pContext->SetVertexBuffers(0, 1, buffer, strides, offsets, SET_VERTEX_BUFFERS_FLAG_RESET); m_pContext->SetIndexBuffer(m_pIndexBuffer, 0); m_pContext->SetPipelineState(m_pPSO); Also, all shader resources must be committed to the device context: m_pContext->CommitShaderResources(m_pSRB, COMMIT_SHADER_RESOURCES_FLAG_TRANSITION_RESOURCES); When all required states and resources are bound, IDeviceContext::Draw() can be used to execute draw command or IDeviceContext::DispatchCompute() can be used to execute compute command. Note that for a draw command, graphics pipeline must be bound, and for dispatch command, compute pipeline must be bound. Draw() takes DrawAttribs structure as an argument. The structure members define all attributes required to perform the command (primitive topology, number of vertices or indices, if draw call is indexed or not, if draw call is instanced or not, if draw call is indirect or not, etc.). For example: DrawAttribs attrs; attrs.IsIndexed = true; attrs.IndexType = VT_UINT16; attrs.NumIndices = 36; attrs.Topology = PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; pContext->Draw(attrs); Tutorials and Samples The GitHub repository contains a number of tutorials and sample applications that demonstrate the API usage. Tutorial 01 - Hello Triangle This tutorial shows how to render a simple triangle using Diligent Engine API. Tutorial 02 - Cube This tutorial demonstrates how to render an actual 3D object, a cube. It shows how to load shaders from files, create and use vertex, index and uniform buffers. Tutorial 03 - Texturing This tutorial demonstrates how to apply a texture to a 3D object. It shows how to load a texture from file, create shader resource binding object and how to sample a texture in the shader. Tutorial 04 - Instancing This tutorial demonstrates how to use instancing to render multiple copies of one object using unique transformation matrix for every copy. Tutorial 05 - Texture Array This tutorial demonstrates how to combine instancing with texture arrays to use unique texture for every instance. Tutorial 06 - Multithreading This tutorial shows how to generate command lists in parallel from multiple threads. Tutorial 07 - Geometry Shader This tutorial shows how to use geometry shader to render smooth wireframe. Tutorial 08 - Tessellation This tutorial shows how to use hardware tessellation to implement simple adaptive terrain rendering algorithm. AntTweakBar sample demonstrates how to use AntTweakBar library to create simple user interface. Atmospheric scattering sample is a more advanced example. It demonstrates how Diligent Engine can be used to implement various rendering tasks: loading textures from files, using complex shaders, rendering to textures, using compute shaders and unordered access views, etc. The repository includes Asteroids performance benchmark based on this demo developed by Intel. It renders 50,000 unique textured asteroids and lets compare performance of D3D11 and D3D12 implementations. Every asteroid is a combination of one of 1000 unique meshes and one of 10 unique textures. Integration with Unity Diligent Engine supports integration with Unity through Unity low-level native plugin interface. The engine relies on Native API Interoperability to attach to the graphics API initialized by Unity. After Diligent Engine device and context are created, they can be used us usual to create resources and issue rendering commands. GhostCubePlugin shows an example how Diligent Engine can be used to render a ghost cube only visible as a reflection in a mirror.
  4. I hope this is the right place to ask questions about DirectXTK which aren't really about graphics, if not please let me know a better place. Can anyone tell me why I cannot do this: DirectX::SimpleMath::Rectangle rectangle = {...}; RECT rect = rectangle; or RECT rect = static_cast<RECT>(rectangle); or const RECT rect(m_textureRect); despite Rectangle having the following operator RECT: operator RECT() { RECT rct; rct.left = x; rct.top = y; rct.right = (x + width); rct.bottom = (y + height); return rct; } VS2017 tells me: error C2440: 'initializing': cannot convert from 'const DirectX::SimpleMath::Rectangle' to 'const RECT' Thanks in advance
  5. Hi, I am working on a project where I'm trying to use Forward Plus Rendering on point lights. I have a simple reflective scene with many point lights moving around it. I am using effects file (.fx) to keep my shaders in one place. I am having a problem with Compute Shader code. I cannot get it to work properly and calculate the tiles and lighting properly. Is there anyone that is wishing to help me set up my compute shader? Thank you in advance for any replies and interest!
  6. I am trying to draw a screen-aligned quad with arbitrary sizes. currently I just send 4 vertices to the vertex shader like so: pDevCon->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP); pDevCon->Draw(4, 0); then in the vertex shader I am doing this: float4 main(uint vI : SV_VERTEXID) : SV_POSITION { float2 texcoord = float2(vI & 1, vI >> 1); return float4((texcoord.x - 0.5f) * 2, -(texcoord.y - 0.5f) * 2, 0, 1); } that gets me a screen-sized quad...ok .. what's the correct way to get arbitrary sizes?...I have messed around with various numbers, but I think I don't quite get something in these relationships. one thing I tried is: float4 quad = float4((texcoord.x - (xpos/screensizex)) * (width/screensizex), -(texcoord.y - (ypos/screensizey)) * (height/screensizey), 0, 1); .. where xpos and ypos is number of pixels from upper right corner..width and height is the desired size of the quad in pixels this gets me somewhat close, but not right.. a bit too small..so I'm missing something ..any ideas? .
  7. Hi, right now building my engine in visual studio involves a shader compiling step to build hlsl 5.0 shaders. I have a separate project which only includes shader sources and the compiler is the visual studio integrated fxc compiler. I like this method because on any PC that has visual studio installed, I can just download the solution from GitHub and everything just builds without additional dependencies and using the latest version of the compiler. I also like it because the shaders are included in the solution explorer and easy to browse, and double-click to open (opening files can be really a pain in the ass in visual studio run in admin mode). Also it's nice that VS displays the build output/errors in the output window. But now I have the HLSL 6 compiler and want to build hlsl 6 shaders as well (and as I understand I can also compile vulkan compatible shaders with it later). Any idea how to do this nicely? I want only a single project containing shader sources, like it is now, but build them for different targets. I guess adding different building projects would be the way to go that reference the shader source project? But how would they differentiate from shader type of the sources (eg. pixel shader, compute shader,etc.)? Now the shader building project contains for each shader the shader type, how can other building projects reference that? Anyone with some experience in this?
  8. Hello! Have a problem with reflection shader for D3D11: 1>engine_render_d3d11_system.obj : error LNK2001: unresolved external symbol IID_ID3D11ShaderReflection I tried to add this: #include <D3D11Shader.h> #include <D3Dcompiler.h> #include <D3DCompiler.inl> #pragma comment(lib, "D3DCompiler.lib") //#pragma comment(lib, "D3DCompiler_47.lib") As MSDN tells me but still no fortune. I think lot of people did that already, what I missing? I also find this article http://mattfife.com/?p=470 where recommend to use SDK headers and libs before Wind SDK, but I am not using DirectX SDK for this project at all, should I?
  9. Hi there, this is my first post in what looks to be a very interesting forum. I am using DirectXTK to put together my 2D game engine but would like to use the GPU depth buffer in order to avoid sorting back-to-front on the CPU and I think I also want to use GPU instancing, so can I do that with SpriteBatch or am I looking at implementing my own sprite rendering? Thanks in advance!
  10. Back around 2006 I spent a good year or two reading books, articles on this site, and gobbling up everything game dev related I could. I started an engine in DX10 and got through basics. I eventually gave up, because I couldn't do the harder things. Now, my C++ is 12 years stronger, my mind is trained better, and I am thinking of giving it another go. Alot has changed. There is no more SDK, there is evidently a DX Toolkit, XNA died, all the sweet sites I used to go to are 404, and google searches all point to Unity and Unreal. I plainly don't like Unity or Unreal, but might learn them for reference. So, what is the current path? Does everyone pretty much use the DX Toolkit? Should I start there? I also read that DX12 is just expert level DX11, so I guess I am going DX 11. Is there a current and up to date list of learning resources anywhere? I am about tired of 404s..
  11. Is it possible to asynchronously create a Texture2D using DirectX11? I have a native Unity plugin that downloads 8K textures from a server and displays them to the user for a VR application. This works well, but there's a large frame drop when calling CreateTexture2D. To remedy this, I've tried creating a separate thread that creates the texture, but the frame drop is still present. Is there anything else that I could do to prevent that frame drop from occuring?
  12. Hi, I've been trying to implement a basic gaussian blur using the gaussian formula, and here is what it looks like so far: float gaussian(float x, float sigma) { float pi = 3.14159; float sigma_square = sigma * sigma; float a = 1 / sqrt(2 * pi*sigma_square); float b = exp(-((x*x) / (2 * sigma_square))); return a * b; } My problem is that I don't quite know what sigma should be. It seems that if I provide a random value for sigma, weights in my kernel won't add up to 1. So I ended up calling my gaussian function with sigma == 1, which gives me weights adding up to 1, but also a very subtle blur. Here is what my kernel looks like with sigma == 1 [0] 0.0033238872995488885 [1] 0.023804742479357766 [2] 0.09713820127276819 [3] 0.22585307043511713 [4] 0.29920669915475656 [5] 0.22585307043511713 [6] 0.09713820127276819 [7] 0.023804742479357766 [8] 0.0033238872995488885 I would have liked it to be more "rounded" at the top, or a better spread instead of wasting [0], [1], [2] with values bellow 0.1. Based on my experiments, the key to this is to provide a different sigma, but if I do, my kernel values no longer adds up to 1, which results to a darker blur. I've found this post ... which helped me a bit, but I am really confused with this the part where he divide sigma by 3. Can someone please explain how sigma works? How is it related to my kernel size, how can I balance my weights with different sigmas, ect... Thanks :-)
  13. I managed convert opengl code on http://john-chapman-graphics.blogspot.co.uk/2013/02/pseudo-lens-flare.html to hlsl, but unfortunately I don't know how to add it to my atmospheric scattering code (Sky - first image). Can anyone help me? I tried to bind the sky texture as SRV and implement lens flare code in pixel shader, I don't know how to separate them (second image)
  14. i'm trying draw a circule using math: class coordenates { public: coordenates(float x=0, float y=0) { X = x; Y = y; } float X; float Y; }; coordenates RotationPoints(coordenates ActualPosition, double angle) { coordenates NewPosition; NewPosition.X = ActualPosition.X*sin(angle) - ActualPosition.Y*sin(angle); NewPosition.Y = ActualPosition.Y*cos(angle) + ActualPosition.X*cos(angle); return NewPosition; } but now i know that these have 1 problem, because i don't use the orign. even so i'm getting problems on how i can rotate the point. these coordinates works between -1 and 1 floating points. can anyone advice more for i create the circule?
  15. Hello, I have a Problem with GPU Skinning, I load from a COLLADA File my object with the vertices and the weights and bone indices for it and the bones with the matrices. For every vertex I choose 4 weights and 4 bone indices. For every non skin vertex i choose by the weights 1 0 0 0 and bone indices 0 0 0 0 (In the Bone Matrices Array is index 0 a Matrix Idetity) And i check up if all weights values together is always 1 or i calculate it to 1. So far so good, my Shader looks like this: bool HasBones; matrix BoneMatrices[256]; struct Vertex { float3 Position : POSITION; float3 Normal : NORMAL; float2 UV : TEXCOORD0; float3 Tangent : TANGENT; float4 Weights : WEIGHTS; int4 BoneIndices : BONEINDICES; }; float4 ApplyBoneTransform(Vertex input, float4 value) { if(HasBones) { float4x4 skinTransform = (float4x4)0; skinTransform += BoneMatrices[input.BoneIndices.x] * input.Weights.x; skinTransform += BoneMatrices[input.BoneIndices.y] * input.Weights.y; skinTransform += BoneMatrices[input.BoneIndices.z] * input.Weights.z; skinTransform += BoneMatrices[input.BoneIndices.w] * input.Weights.w; float4 position = mul(value, skinTransform); return position; } else return value; } Pixel vertexShader(Vertex input) { Pixel result = (Pixel) 0; float4 posWorld = mul(ApplyBoneTransform(input, float4(input.Position.xyz, 1.0f)), World); result.Position = mul(mul(posWorld, View), Projection); result.Normal = normalize(mul(ApplyBoneTransform(input, float4(input.Normal.xyz, 1.0f)), WorldIT)); result.UV = input.UV; result.View = ViewInverse[3] - mul(float4(input.Position.xyz, 1.0f), World); result.Tangent = normalize(mul(ApplyBoneTransform(input, float4(input.Tangent.xyz, 1.0f)), WorldIT).xyz); result.Binormal = normalize(cross(input.Normal, input.Tangent)); return result; } And if i set HasBones to true, my object will not draw right anymore, i only see two dark triangles I believe it depends on the bone matrices i load from the controller_lib of the COLLADA File and send it to the BoneMatrices in the shader + at the index 0 the Matrix Idetity. Has anyone an Idea what I make wrong and could help and explain me it? And i upload to this post the Collada file and images of the object draw in HasBones = false and HasBones = true Greets Benajmin Model.dae
  16. I have some code (not written by me) that is creating a window to draw stuff into using these: CreateDXGIFactory1 to create an IDXGIFactory1 dxgi_factory->CreateSwapChain to create an IDXGISwapChain D3D11CreateDevice to create an ID3D11Device and an ID3D11DeviceContext Other code (that I dont quite understand) that creates various IDXGIAdapter1 and IDXGIOutput instances Still other code (that I dont quite understand) that is creating some ID3D11RenderTargetView and ID3D11DepthStencilView instances and is doing something with those as well (possibly loading them into the graphics context somewhere although I cant quite see where) What I want to do is to create a second window and draw stuff to that as well as to the main window (all drawing would happen on the one thread with all the drawing to the sub-window happening in one block and outside of any rendering being done to the main window). Do I need to create a second IDXGISwapChain for my new window? Do I need to create a second ID3D11Device or different IDXGIAdapter1 and IDXGIOutput interfaces? How do I tell Direct3D which window I want to render to? Are there particular d3d11 functions I should be looking for that are involved in this? I am good with Direct3D9 but this is the first time I am working with Direct3D11 (and the guy who wrote the code has left our team so I cant ask him for help
  17. I have found an example here: http://xboxforums.create.msdn.com/forums/t/66208.aspx about sharing surfaces/textures created by DX11 into the ID2D1DCRenderTarget, however the above example is based on SwapChain bound to a window (hwnd) What I need is to draw a shared texture into GDI as this is the only plugin interface in an old software I have. Any ideas ? Here is what I do. SharedSurface::SharedSurface(): { //Initialize the ID2D1Factory object D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory); //D2D1CreateFactory(D2D1_FACTORY_TYPE_MULTI_THREADED, &pD2DFactory); //initialize the ID2D1DCRenderTarget D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties( D2D1_RENDER_TARGET_TYPE_HARDWARE, // D2D1_RENDER_TARGET_TYPE_DEFAULT, D2D1::PixelFormat( DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED), 0, 0, D2D1_RENDER_TARGET_USAGE_GDI_COMPATIBLE, D2D1_FEATURE_LEVEL_DEFAULT ); HRESULT hr = pD2DFactory->CreateDCRenderTarget(&props, &pRT); DWORD createDeviceFlags = 0; createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG; ID3D11DeviceContext* context; D3D_FEATURE_LEVEL fl; DXGI_SWAP_CHAIN_DESC sd; ZeroMemory( &sd, sizeof( sd ) ); sd.BufferCount = 1; sd.BufferDesc.Width = width; sd.BufferDesc.Height = height; sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; sd.BufferDesc.RefreshRate.Numerator = 60; sd.BufferDesc.RefreshRate.Denominator = 1; sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT | DXGI_USAGE_SHARED; sd.OutputWindow = 0; // g_hWnd; sd.SampleDesc.Count = 1; sd.SampleDesc.Quality = 0; sd.Windowed = FALSE, // TRUE; hr = D3D11CreateDeviceAndSwapChain(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, createDeviceFlags, nullptr, 0, D3D11_SDK_VERSION, &sd, &pSwapChain, &mDevice, &fl, &context); hr = m_pSwapChain->GetBuffer(0, IID_PPV_ARGS(&pBackBuffer)); } bool SharedSurface::CreateTexture(ID3D11Device* device, UINT width, UINT height) { HRESULT hr; D3D11_TEXTURE2D_DESC desc; ZeroMemory(&desc, sizeof(desc)); desc.Width = width; desc.Height = height; desc.MipLevels = 1; desc.ArraySize = 1; desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; desc.SampleDesc.Count = 1; desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX; // D3D11_RESOURCE_MISC_SHARED; // desc.Usage = D3D11_USAGE_DEFAULT; desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET; desc.CPUAccessFlags = 0; // D3D11_CPU_ACCESS_READ; // 0; if (device != nullptr) mDevice = device; hr = mDevice->CreateTexture2D(&desc, NULL, &pTexture); IDXGIResource * pDXGIResource = NULL; hr = pTexture->QueryInterface(__uuidof(IDXGIResource), (void **)&pDXGIResource); if SUCCEEDED(hr) { hr = pDXGIResource->GetSharedHandle(&sharedHandle); pDXGIResource->Release(); if SUCCEEDED(hr) { hr = pTexture->QueryInterface(__uuidof(IDXGIKeyedMutex), (LPVOID*)&pMutex); } } hr = pTexture->QueryInterface(__uuidof(IDXGISurface), (void **)&pSurface); FLOAT dpiX; FLOAT dpiY; pD2DFactory->GetDesktopDpi(&dpiX, &dpiY); D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(D2D1_RENDER_TARGET_TYPE_HARDWARE/*D2D1_RENDER_TARGET_TYPE_DEFAULT*/, D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED),dpiX,dpiY); hr = pD2DFactory->CreateDxgiSurfaceRenderTarget(pBackBuffer,&props, &pBackBufferRT); DXGI_SURFACE_DESC sdesc; D2D1_BITMAP_PROPERTIES bp; ZeroMemory(&bp, sizeof(bp)); pSurface->GetDesc(&sdesc); bp.pixelFormat = D2D1::PixelFormat(sdesc.Format, D2D1_ALPHA_MODE_PREMULTIPLIED); hr = pBackBufferRT->CreateSharedBitmap(__uuidof(ID3D11Texture2D), pSurface,&bp, &pBitmap); return SUCCEEDED(hr); } void SharedSurface::Draw() { pBackBufferRT->BeginDraw(); pBackBufferRT->DrawBitmap(pBitmap); pBackBufferRT->EndDraw(); pBackBufferRT->Present(); } void SharedSurface::BindDC(HDC hdc, int width, int height) { RECT rct; rct.top = 0; rct.left = 0; rct.right = width; rct.bottom = height; pRT->BindDC(hdc, &rct); } // HOW TO EXCHANGE between pBackBufferRT and pRT ?
  18. Hi, I am trying to use shared textures with my rendering but with no success. I create texture which I share with another process which draws on that texture. Later on I want to use that texture in my rendering loop. //class members - once initialized, kept static during the rendering ID3D11ShaderResourceView* g_mTexture; ID3D11Texture2D *mTexture; bool MyTexture::CreateTexture(ID3D11Device* device, UINT width, UINT height, int targetWidth, int targetHeight) { HRESULT hr; D3D11_TEXTURE2D_DESC desc; ZeroMemory(&desc, sizeof(desc)); desc.Width = width; desc.Height = height; desc.MipLevels = desc.ArraySize = 1; desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; desc.SampleDesc.Count = 1; desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED; // D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX desc.Usage = D3D11_USAGE_DEFAULT; desc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE; desc.CPUAccessFlags = 0; hr = device->CreateTexture2D( &desc, NULL, &mTexture ); if (SUCCEEDED(hr)) { D3D11_RENDER_TARGET_VIEW_DESC rtDesc; ZeroMemory(&rtDesc, sizeof(rtDesc)); rtDesc.Format = desc.Format; rtDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; rtDesc.Texture2D.MipSlice = 0; D3D11_SHADER_RESOURCE_VIEW_DESC svDesc; ZeroMemory(&svDesc, sizeof(svDesc)); svDesc.Format = desc.Format; svDesc.Texture2D.MipLevels = 1; svDesc.Texture2D.MostDetailedMip = 0; svDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; hr = device->CreateShaderResourceView(mTexture, &svDesc, &g_mTexture); } IDXGIResource * pDXGIResource; HRESULT hr = mTexture->QueryInterface(__uuidof(IDXGIResource), (void **)&pDXGIResource); if SUCCEEDED(hr) { hr = pDXGIResource->GetSharedHandle(&sharedHandle); pDXGIResource->Release(); if SUCCEEDED(hr) { OutputDebug(L"RequestSharedHandle: w=%d, h=%d, handle=%d", width, height, sharedHandle); return (unsigned long long) sharedHandle; } } .... } the problem is to use that shared handle during my rendering loop as the texture is always black, below are all the options I tried: 1) OPTION 1 (bare texture) In this option I simply tried to use mTexture object created with device->CreateTexture2D() that I shared with another process , so basically I left my legacy code untouched with the exception of CreateTexture where I modified D3D11_TEXTURE2D_DESC options for shared version. In my rendering loop I used g_mTexture created during init by CreateShaderResourceView ``` #!c++ pDeviceContext->PSSetShaderResources( 0, 1, &(*it_region)->g_mTexture ); ``` in the legacy (without shared texture) I simply mapped the texture, copied the bits and unmapped and was working fine: D3D11_MAPPED_SUBRESOURCE mappedResource; HRESULT hr = pDeviceContext->Map(mTexture, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource); if (SUCCEEDED(hr)) { BYTE* mappedData = reinterpret_cast<BYTE*>(mappedResource.pData); if (mappedData != NULL) // { for (UINT i = 0; i < h; ++i) { if (bUpsideDown) { memcpy(mappedData, bits, w * 4); mappedData -= mappedResource.RowPitch; bits += (UINT)w * 4; } else { memcpy(mappedData, bits, w * 4); mappedData += mappedResource.RowPitch; bits += (UINT)w * 4; } } } if ((*it_region)->mTexture != NULL) pDeviceContext->Unmap(mTexture, 0); 2) OPTION 2 - OpenSharedResource In this version I tried the get the handle to the shared texture using OpenSharedResource (with 2 combination) // Option 2.1 - get pTexture directly ID3D11Texture2D *pTexture; // temp handler HRESULT hr = mDevice->OpenSharedResource(sharedHandle, __uuidof(ID3D11Texture2D), (LPVOID*)&pTexture); // Option 2.2 get pTexture indirectly by using QueryInterface IDXGIResource* sharedResource = 0; HRESULT hr = mDevice->OpenSharedResource(sharedHandle, __uuidof(ID3D11Texture2D), (LPVOID*)&sharedResource); hr = sharedResource->QueryInterface(__uuidof(ID3D11Texture2D), (void**)(&pTexture)); OutputDebug(L"OpenSharedResource:%d\n", hr); Now, having new temporary pTexture handle I tried the following options (with the combination of the above options retrieving the shared pTexture) OPTION 2.3 - copy pTexure into mTexture #!c++ mDevice->CopyResource(mTexture,pTexture); pDeviceContext->PSSetShaderResources( 0, 1, &g_mTexture ); ``` OPTION 2.4 - create new temporary shader resource using temporary pTexture ``` #!c++ ID3D11ShaderResourceView* g_pTexture; // temp handler hr = device->CreateShaderResourceView(pTexture, &svDesc, &g_pTexture); pDeviceContext->PSSetShaderResources( 0, 1, &g_pTexture ); ``` OPTION 3 - MUTEX version Basically I tried all above options using D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX flag during texture creation and the following code to acquire the mutex: UINT acqKey = 1; UINT relKey = 0; DWORD timeOut = 5; IDXGIKeyedMutex pMutex; hr = pTexture->QueryInterface( __uuidof(IDXGIKeyedMutex), (LPVOID*)&pMutex ); DWORD result = pMutex->AcquireSync(acqKey, timeOut); if (result == WAIT_OBJECT_0) // Rendering using 2.2 and 2.3 options .... else // nothing here - skip frame result = pMutex->ReleaseSync(relKey)); if ( result == WAIT_OBJECT_0 ) return S_OK; NONE of those solutions worked for me, any HINTS ?
  19. While I am running my directX11 engine in windowed mode, I can use the snipping too to get a screen cap of it. However, if I run it in full screen and hit print screen button, and try to copy paste that into paint or photoshop, it has nothing - print screen isnt capturing the image for some reason, and I cant use snipping tool to get the screen cap while its in full screen? Is there some setting or soemthing thats not allowing windows to detect the game while it's in full screen? Very important to get these captures for my portfolio, and if I capture it in windowed mode, it looks really bad and low res. Heres a link to the D3D class in my engine for reference https://github.com/mister51213/DirectX11Engine/blob/master/DirectX11Engine/D3DClass.cpp Any help would be much appreciated, thanks!
  20. Would a 1024x16384 texture generally perform as well as a 4096x4096 texture? Memory wise they should be the same but perhaps having such a large dimension as 16384 would be worse anyway? I'm considering making a texture atlas like a long strip where it would be tilable in one direction.
  21. I am having trouble with the graphics device disconnecting with the reason DXGI_ERROR_DRIVER_INTERNAL_ERROR on some of our devices after our game terminal has been running for a few hours. Specifically the Intel HD 530, though a few of the other Intel based GPUs will crash as well. The odd thing is that the same code will run indefinitely on the same computers if we put in a discreet card. It also runs without any problem on other Intel chipsets, even those that are quite a bit older. We have tried the basics like Windows updates and getting the latest drivers from Intel. We have also verified that this is not a computer-specific problem, as the problem persists across different computers with the same build. I have tried adding ID3D11DeviceContext::Flush calls by resource creation as suggested by https://software.intel.com/en-us/forums/graphics-driver-bug-reporting/topic/610376 with no apparent help. I have also verified that no graphics handles are being held onto for a very long time, and our GPU memory usage never gets much above 400 Mb, which should be well within the ability of an integrated card to handle. We actually wrote a watchdog application to monitor that, and the usually the device is removed while the memory is lower than normal. I'm having a hard time finding any resources that would help us find the root problem, as DXGI_ERROR_DRIVER_INTERNAL_ERROR is not a very helpful error message. We are using the DirectX 11 api, and run on a variety of Windows based computers, including both all in ones and desktops. I would appreciate any help or ideas anyone has, as we haven't been able to make much forward progress even after a few weeks of intensive debugging and engine changes.
  22. My shadows (drawn using a depth buffer which I later sample from in the shadow texture), seem to be detaching slightly from their objects. I looked this up and I think it's "peter panning," and they were saying you have to change the depth offset, but not sure how to do that. https://msdn.microsoft.com/en-us/library/windows/desktop/ee416324(v=vs.85).aspx Is there a fast way I can tweak the code to fix this? Should I change the "bias" perhaps, or something else? Thanks, Here is the code for the shadows, for reference: // ENTRY POINT float4 main(PixelInputType input) : SV_TARGET { float2 projectTexCoord; float depthValue; float lightDepthValue; //float4 lightColor = float4(0,0,0,0); float4 lightColor = float4(0.05,0.05,0.05,1); // Set the bias value for fixing the floating point precision issues. float bias = 0.001f; //////////////// SHADOWING LOOP //////////////// for(int i = 0; i < NUM_LIGHTS; ++i) { // Calculate the projected texture coordinates. projectTexCoord.x = input.vertex_ProjLightSpace[i].x / input.vertex_ProjLightSpace[i].w / 2.0f + 0.5f; projectTexCoord.y = -input.vertex_ProjLightSpace[i].y / input.vertex_ProjLightSpace[i].w / 2.0f + 0.5f; if((saturate(projectTexCoord.x) == projectTexCoord.x) && (saturate(projectTexCoord.y) == projectTexCoord.y)) { // Sample the shadow map depth value from the depth texture using the sampler at the projected texture coordinate location. depthValue = depthTextures[i].Sample(SampleTypeClamp, projectTexCoord).r; // Calculate the depth of the light. lightDepthValue = input.vertex_ProjLightSpace[i].z / input.vertex_ProjLightSpace[i].w; // Subtract the bias from the lightDepthValue. lightDepthValue = lightDepthValue - bias; // Compare the depth of the shadow map value and the depth of the light to determine whether to shadow or to light this pixel. // If the light is in front of the object then light the pixel, if not then shadow this pixel since an object (occluder) is casting a shadow on it. if(lightDepthValue < depthValue) { // Calculate the amount of light on this pixel. float lightIntensity = saturate(dot(input.normal, normalize(input.lightPos_LS[i]))); if(lightIntensity > 0.0f) { float spotlightIntensity = CalculateSpotLightIntensity(input.lightPos_LS[i], cb_lights[i].lightDirection, input.normal); //lightColor += (float4(1.0f, 1.0f, 1.0f, 1.0f) * lightIntensity) * .3f; // spotlight lightColor += float4(1.0f, 1.0f, 1.0f, 1.0f) /** lightIntensity*/ * spotlightIntensity * .3f; // spotlight } } } } return saturate(lightColor); } https://github.com/mister51213/DirectX11Engine/blob/master/DirectX11Engine/MultiShadows_ps.hlsl
  23. Hi! Pretty new to 3D programming, using SharpDX wrapper to build a 3D world (for testing and learning). I am adding several visible camera objects (very rudimentary models) in order to visualize different views. Let's say I have a "world" floor grid covering vectors {0,0,0 - 1,1,0} I add a pretend camera "CAM2" object at {0.5, 1.5, -1.0} I am looking at this world by setting the parameters for "CAM1" a worldView projection position at pos: {0.0, 1.5, 1.5} lookat: {0.5, 0.0, 0.5} (looking down from upper left towards the center of the floor). I would like to draw a line from the pretend camera "CAM2" model origin, to the center of the floor as it is projected through "CAM1" view. Obviously a line from "CAM1" to the Lookat point would be invisible. But I can't for my life figure out how to apply the correct conversions to the vector end point for "CAM2". As can be seen in the snashot, the line (green) from "CAM2" points to.... well.. Russia?? :-D Can anyone help? BR Per
  24. Hi, so I imported some new models into my engine, and some of them show up with ugly seams or dark patches, while others look perfect (see pictures) I'm using the same shader for all of them, and all of these models have had custom UV mapped textures created for them, which should wrap fully around them, instead of using tiled textures. I have no idea why the custom UV mapped textures are mapping correctly on some, but not others. Possible causes are 1. Am I using the wrong SamplerState to sample the textures? (Im using SampleTypeClamp ) 2. The original models had quads, and were UV mapped by an artist in that state, then I reimported them into 3DS Max and reexported them as all triangles (my engine object loader only accepts triangles). 3. Could the original model UVs just be wrong? Please let me know if somebody can help identify this problem, I'm completely baffled. Thanks. For reference, here's a link to the shader being used to draw the problematic models and the shader code below. https://github.com/mister51213/DirectX11Engine/blob/master/DirectX11Engine/Light_SoftShadows_ps.hlsl ///////////// // DEFINES // ///////////// #define NUM_LIGHTS 3 ///////////// // GLOBALS // ///////////// // texture resource that will be used for rendering the texture on the model Texture2D shaderTextures[7];// NOTE - we only use one render target for drawing all the shadows here! // allows modifying how pixels are written to the polygon face, for example choosing which to draw. SamplerState SampleType; /////////////////// // SAMPLE STATES // /////////////////// SamplerState SampleTypeClamp : register(s0); SamplerState SampleTypeWrap : register(s1); /////////////////// // TYPEDEFS // /////////////////// // This structure is used to describe the lights properties struct LightTemplate_PS { int type; float3 padding; float4 diffuseColor; float3 lightDirection; //(lookat?) //@TODO pass from VS BUFFER? float specularPower; float4 specularColor; }; ////////////////////// // CONSTANT BUFFERS // ////////////////////// cbuffer SceneLightBuffer:register(b0) { float4 cb_ambientColor; LightTemplate_PS cb_lights[NUM_LIGHTS]; } ////////////////////// // CONSTANT BUFFERS // ////////////////////// // value set here will be between 0 and 1. cbuffer TranslationBuffer:register(b1) { float textureTranslation; //@NOTE = hlsl automatically pads floats for you }; // for alpha blending textures cbuffer TransparentBuffer:register(b2) { float blendAmount; }; struct PixelInputType { float4 vertex_ModelSpace : SV_POSITION; float2 tex : TEXCOORD0; float3 normal : NORMAL; float3 tangent : TANGENT; float3 binormal : BINORMAL; float3 viewDirection : TEXCOORD1; float3 lightPos_LS[NUM_LIGHTS] : TEXCOORD2; float4 vertex_ScrnSpace : TEXCOORD5; }; float4 main(PixelInputType input) : SV_TARGET { bool bInsideSpotlight = true; float2 projectTexCoord; float depthValue; float lightDepthValue; float4 textureColor; float gamma = 7.f; /////////////////// NORMAL MAPPING ////////////////// float4 bumpMap = shaderTextures[4].Sample(SampleType, input.tex); // Sample the shadow value from the shadow texture using the sampler at the projected texture coordinate location. projectTexCoord.x = input.vertex_ScrnSpace.x / input.vertex_ScrnSpace.w / 2.0f + 0.5f; projectTexCoord.y = -input.vertex_ScrnSpace.y / input.vertex_ScrnSpace.w / 2.0f + 0.5f; float shadowValue = shaderTextures[6].Sample(SampleTypeClamp, projectTexCoord).r; // Expand the range of the normal value from (0, +1) to (-1, +1). bumpMap = (bumpMap * 2.0f) - 1.0f; // Change the COORDINATE BASIS of the normal into the space represented by basis vectors tangent, binormal, and normal! float3 bumpNormal = normalize((bumpMap.x * input.tangent) + (bumpMap.y * input.binormal) + (bumpMap.z * input.normal)); //////////////// AMBIENT BASE COLOR //////////////// // Set the default output color to the ambient light value for all pixels. float4 lightColor = cb_ambientColor * saturate(dot(bumpNormal, input.normal) + .2); // Calculate the amount of light on this pixel. for(int i = 0; i < NUM_LIGHTS; ++i) { float lightIntensity = saturate(dot(bumpNormal, normalize(input.lightPos_LS[i]))); if(lightIntensity > 0.0f) { lightColor += (cb_lights[i].diffuseColor * lightIntensity) * 0.3; } } // Saturate the final light color. lightColor = saturate(lightColor); // TEXTURE ANIMATION - Sample pixel color from texture at this texture coordinate location. input.tex.x += textureTranslation; // BLENDING float4 color1 = shaderTextures[0].Sample(SampleTypeWrap, input.tex); float4 color2 = shaderTextures[1].Sample(SampleTypeWrap, input.tex); float4 alphaValue = shaderTextures[3].Sample(SampleTypeWrap, input.tex); //textureColor = saturate((alphaValue * color1) + ((1.0f - alphaValue) * color2)); textureColor = color1; // Combine the light and texture color. float4 finalColor = lightColor * textureColor * shadowValue * gamma; //if(lightColor.x == 0) //{ // finalColor = cb_ambientColor * saturate(dot(bumpNormal, input.normal) + .2) * textureColor; //} return finalColor; }
  25. I make DXGI adapters and monitors enumeration. The second monitor connected to my computer is Dell P2715Q, which has 3840*2160 resolution. However, the program reports it as 2560*1440, the second available resolution. Minimal code to reproduce: #include "stdafx.h" #include <Windows.h> #include <stdio.h> #include <tchar.h> #include <iostream> #include <DXGI.h> #pragma comment(lib, "DXGI.lib") using namespace std; int main() { IDXGIFactory1* pFactory1; HRESULT hr = CreateDXGIFactory1(__uuidof(IDXGIFactory1), (void**)(&pFactory1)); if (FAILED(hr)) { wcout << L"CreateDXGIFactory1 failed. " << endl; return 0; } for (UINT i = 0;; i++) { IDXGIAdapter1* pAdapter1 = nullptr; hr = pFactory1->EnumAdapters1(i, &pAdapter1); if (hr == DXGI_ERROR_NOT_FOUND) { // no more adapters break; } if (FAILED(hr)) { wcout << L"EnumAdapters1 failed. " << endl; return 0; } DXGI_ADAPTER_DESC1 desc; hr = pAdapter1->GetDesc1(&desc); if (FAILED(hr)) { wcout << L"GetDesc1 failed. " << endl; return 0; } wcout << L"Adapter: " << desc.Description << endl; for (UINT j = 0;; j++) { IDXGIOutput *pOutput = nullptr; HRESULT hr = pAdapter1->EnumOutputs(j, &pOutput); if (hr == DXGI_ERROR_NOT_FOUND) { // no more outputs break; } if (FAILED(hr)) { wcout << L"EnumOutputs failed. " << endl; return 0; } DXGI_OUTPUT_DESC desc; hr = pOutput->GetDesc(&desc); if (FAILED(hr)) { wcout << L"GetDesc1 failed. " << endl; return 0; } wcout << L" Output: " << desc.DeviceName << L" (" << desc.DesktopCoordinates.left << L"," << desc.DesktopCoordinates.top << L")-(" << (desc.DesktopCoordinates.right - desc.DesktopCoordinates.left) << L"," << (desc.DesktopCoordinates.bottom - desc.DesktopCoordinates.top) << L")" << endl; } } return 0; } Program output: Adapter: Intel(R) Iris(TM) Pro Graphics 6200 Output: \\.\DISPLAY1 (0,0)-(1920,1200) Output: \\.\DISPLAY2 (1920,0)-(2560,1440) DISPLAY2 is reported with low resolution. Environment: Windows 10 x64 Intel(R) Iris(TM) Pro Graphics 6200 DELL P2715Q What can cause this behavior: DirectX restrictions, video memory, display adapter, driver, monitor? How can I fix this and get full available resolution?
  • Advertisement