Jump to content

  • Log In with Google      Sign In   
  • Create Account


caballero

Member Since 12 Mar 2011
Offline Last Active Feb 16 2012 01:30 PM
-----

Topics I've Started

Using multiple constant buffer

29 January 2012 - 02:04 PM

Hi,
I've started to learn dx11 with the slimdx framework.
Currently i tried to use two constant buffer to update informations more frequent then other.

But with my current shader code I get some strange behaviour using the two constant buffers:
// cbuffer definition
struct ShaderLightBuffer
{
public Vector3 Ambient;
public float Alpha;
public Vector3 Diffuse;
public float Shininess;
public Vector3 Specular;
public float padding;
public Vector2 SpecularTextured;
public Vector2 padding2;
}

struct ShaderMatrixBuffer
{
public Matrix World;
public Matrix WorldViewProjection;
public Vector3 CameraPosition;
public float padding1;
}

// buffer initialization
_light_constant_buffer = new Buffer(Device, Marshal.SizeOf(typeof(ShaderLightBuffer)), ResourceUsage.Default, BindFlags.ConstantBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
_matrix_constant_buffer = new Buffer(Device, Marshal.SizeOf(typeof(ShaderMatrixBuffer)), ResourceUsage.Default, BindFlags.ConstantBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);

// render logic
ImmediateContext.InputAssembler.InputLayout = _input_layout;
ImmediateContext.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;

ImmediateContext.InputAssembler.SetVertexBuffers(0, _buffer_bindings.ToArray());
ImmediateContext.VertexShader.Set(_vertex_shader);

ImmediateContext.VertexShader.SetConstantBuffer(_matrix_constant_buffer, 0);
ImmediateContext.VertexShader.SetConstantBuffer(_light_constant_buffer, 1);

ImmediateContext.Rasterizer.SetViewports(Engine.DeviceManager.Viewport);

ImmediateContext.PixelShader.Set(_pixel_shader);
ImmediateContext.PixelShader.SetConstantBuffer(_light_constant_buffer, 2);

ImmediateContext.OutputMerger.SetTargets(DepthBuffer, RenderTarget);

// .. set wvp, world, camera position to matrix buffer..
ShaderMatrixBuffer matrix_buffer = new ShaderMatrixBuffer();
matrix_buffer.WorldViewProjection = view_proj;
matrix_buffer.World = world_matrix;
matrix_buffer.CameraPosition = Engine.Camera.Eye;

// update matrix constant buffer
var matrix_stream = new DataStream(Marshal.SizeOf(typeof(ShaderMatrixBuffer)), true, true);
matrix_stream.Write(matrix_buffer);
matrix_stream.Position = 0;

ImmediateContext.UpdateSubresource(new DataBox(0, 0, matrix_stream), _matrix_constant_buffer, 0);
ImmediateContext.VertexShader.SetConstantBuffer(_matrix_constant_buffer, 0);

foreach (var material in Materials)
{
ShaderLightBuffer light_buffer = new ShaderLightBuffer();
light_buffer.Alpha = 1f;
light_buffer.Ambient = material.Ambient;
light_buffer.Diffuse = material.Diffuse;
light_buffer.Specular = material.Specular;
light_buffer.Shininess = (int)material.Shininess;
light_buffer.SpecularTextured = new Vector2 (0);

if (!string.IsNullOrEmpty(material.TextureFilename))
{
light_buffer.SpecularTextured = new Vector2(1);
if (_shader_resource == null)
{
var texture = Texture2D.FromFile(Device, Path.Combine(material.Path, material.TextureFilename));
_shader_resource = new ShaderResourceView(Device, texture);
}

ImmediateContext.PixelShader.SetShaderResource(_shader_resource, 0);
}

ImmediateContext.PixelShader.SetSampler(_sampler_state, 0);

var light_stream = new DataStream(Marshal.SizeOf(typeof(ShaderLightBuffer)), true, true);
light_stream.Write(light_buffer);
light_stream.Position = 0;

ImmediateContext.UpdateSubresource(new DataBox(0, 0, light_stream), _light_constant_buffer, 0);

ImmediateContext.VertexShader.SetConstantBuffer(_light_constant_buffer, 1);
ImmediateContext.VertexShader.SetConstantBuffer(_light_constant_buffer, 2);

ImmediateContext.Draw(_vertices_count[i], 0);
}
// end render logic
Alright, I hope this code is straight forward, just setting all infos for the pipeline stages, updating the general constant buffer and the cbuffer
for each material used for the geometry.

Now the odd part - shader code
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output;

// Transform the position into world space for lighting, and projected space
float4 vPosWorld = mul( float4(input.position,1), World );
output.position = mul( float4(input.position,1), WorldViewProjection );

// pass texture coordinate
output.texcoord = input.texcoord;

// transform normal into world space for lighting
float3 normal_world = mul( input.normal, (float3x3) World);
float3 light_vector = normalize( LightPosition - vPosWorld.xyz );

// compute the ambient and diffuse components of illumination
output.color.rgb = LightColor * MaterialAmbient;
output.color.rgb += LightColor * MaterialDiffuse * saturate( dot( light_vector, normal_world ) );

if( SpecularTextured.x > 0)
{
float3 camera = normalize( vPosWorld.xyz - CameraPosition );
float3 reflection = reflect ( light_vector, normal_world );
float phone_value = saturate( dot( reflection, camera ) );
output.color.rgb += MaterialSpecular * pow( phone_value, MaterialShininess );
}

// odd things happens if i comment the next line -> [7480] D3D11: WARNING: ID3D11DeviceContext::Draw: The size of the Constant Buffer at slot 1 of the Vertex Shader unit is too small (64 bytes provided, 144 bytes, at least, expected). This is OK, as out-of-bounds reads are defined to return 0. It is also possible the developer knows the missing data will not be used anyway. This is only a problem if the developer actually intended to bind a sufficiently large Constant Buffer for what the shader expects. [ EXECUTION WARNING #351: DEVICE_DRAW_CONSTANT_BUFFER_TOO_SMALL ]
output.color.rgb = MaterialDiffuse;
output.color.a = MaterialShininess;

return output;
}

float4 PS( PS_INPUT input ) : SV_Target
{
float4 output = input.color;
//// Sample and modulate the texture
if ( SpecularTextured.y > 0 )
output.rgb *= MeshTexture.Sample( samLinear, input.texcoord );
return output;
}
If I set the color values to diffuse the in the vertex shader it draws my geometry correctly with the defined color.
But commenting out the color assignment (since this line was used for debugging), will give me the following warning:
[7480] D3D11: WARNING: ID3D11DeviceContext::Draw: The size of the Constant Buffer at slot 1 of the Vertex Shader unit is too small (64 bytes provided, 144 bytes, at least, expected). This is OK, as out-of-bounds reads are defined to return 0. It is also possible the developer knows the missing data will not be used anyway. This is only a problem if the developer actually intended to bind a sufficiently large Constant Buffer for what the shader expects. [ EXECUTION WARNING #351: DEVICE_DRAW_CONSTANT_BUFFER_TOO_SMALL ]

This happens also if I use UpdateSubResource method to update both constant buffers.
It seems that the second constant buffer update overwrites also the first constant buffer.

I wanted to ask how to handle multiple constant buffer updates? Am I missing something?

FX Composer

01 August 2011 - 03:54 PM

Hi,
I wanted to ask if there is a tutorial for accessing the depth buffer, or further tutorials like using mrt and sampling afterwards from them for nvidia's fx composer?

I've looked at some samples from the shader library, but for me they look all a bit messy and are mixed up with sm3 code. I searched for a while at google but till yet, I haven't found any further documentations about this topic.

Another question is, do game developer use fx composer, or do they develope their own shader authoring tools?

Are there any alternatives to fx composer for developing sm4 and above shaders? Sadly that ati stopped developing on rendermonkey, the ui isn't that overcrowded as in fx composer.


cheers

[SlimDx 10] WPF

04 May 2011 - 06:24 AM

Hi Folks,
in all C++ DX Books the authors write about creating a SwapChain with the device to handle the front, buffer which will be presented, and the back buffer, where the next scene will be rendered. Whereas SwapChain.Present will finally swap the two (or x) buffers.

But if I am looking at the SlimDx10 Wpf Sample, there's no dx10 swapchain creation, instead a dx9 swapchain will be created. What I've read so far it's because the D3DImage in WPF doesn't support DX10.
Instead of calling swapchain.present to swap the buffers the device will be flushed (Device.Flush()), where the documentation just says, that all queued commands will be send to the command buffer, which says nothing to me.

Can anyone explain me the general idea how this approach works?


Recently I developed a deferred rendering scenario in DX 10 with SlimDx as an WPF application. The scenario just blur the mesh objects in the scene.
After finally get it to work it show up some strange VSync errors when moving the scene. If I google for that, they always say that swapchain.present will handle these errors, but obviously I've got no swapchain in my hand!

Has anyone tried to create a dx10 swapchain in a slimdx supported wpf application?

[SlimDx] Generate adjacency

16 March 2011 - 03:28 AM

Hi, I've reimplemented loading an .obj file into a mesh object for c#.
At the end I come to a stage where I assign the vertex, index and attribute buffer to the mesh object. After that I'll try to optimize the vertex/index buffer via the optimize in place method.
As far as I understand this optimization step I had to call the generate adjacency method which throws all duplicate vertex indices (same position related to a specific epsilon) away.

But after calling the method I'll get a Direct3D9Exception (still using dx9 device) at BaseMesh::GenerateAdjacency where the HRESULT hr returns a failure.

Here's my current code snippet:
// Create the encapsulated mesh
            	_mesh = new SlimDX.Direct3D9.Mesh(device, _Indices.Count / 3, _Vertices.Count, MeshFlags.SystemMemory | MeshFlags.Use32Bit, PositionNormalTextureVertex.VertexElements);

            	// storing vertex buffer in the mesh
            	DataStream vertex_stream = _mesh.LockVertexBuffer(LockFlags.None);
            	vertex_stream.WriteRange(_Vertices.ToArray());
            	_mesh.UnlockVertexBuffer();

            	// storing index buffer
            	DataStream index_stream = _mesh.LockIndexBuffer(LockFlags.None);
            	index_stream.WriteRange(_Indices.ToArray());
            	_mesh.UnlockIndexBuffer();

            	// storing attribute buffer
            	DataStream attribute_stream = _mesh.LockAttributeBuffer(LockFlags.None);
            	attribute_stream.WriteRange(_Attributes.ToArray());
            	_mesh.UnlockAttributeBuffer();

             	_mesh.GenerateAdjacency(0.001f); // <-- Exception will be thrown here
            	// _mesh.OptimizeInPlace(MeshOptimizeFlags.AttributeSort | MeshOptimizeFlags.VertexCache);


VertexElement is defined as:
public static readonly VertexElement[] VertexElements =
        	{
            	new VertexElement(0, 0, DeclarationType.Float4, DeclarationMethod.Default, DeclarationUsage.Position, 0),
            	new VertexElement(0, 16, DeclarationType.Float3, DeclarationMethod.Default, DeclarationUsage.Normal, 0),
            	new VertexElement(0, 28, DeclarationType.Float2, DeclarationMethod.Default,DeclarationUsage.TextureCoordinate, 0), 
            	VertexElement.VertexDeclarationEnd
        	};

The mesh will be drawn if I comment out the generation adjacency method, so the rendering works fine.

Any ideas? Is there something missing?
Cheers

PARTNERS