Jump to content

  • Log In with Google      Sign In   
  • Create Account

Banner advertising on our site currently available from just $5!


1. Learn about the promo. 2. Sign up for GDNet+. 3. Set up your advert!


korvax

Member Since 20 Jun 2012
Offline Last Active Apr 19 2015 02:05 PM

Topics I've Started

Help with WorldViewProjection

25 September 2014 - 02:52 PM

Hi, Im having some problem with moving out my Position Clip space calculation out of the shader and in to cpu space. That is creating a WorldViewProjection matrix and use that in the shaders.

 

 

The following shader code works fine 

PS Shader.

struct PS_INPUT
{
	float4 PositionSS : SV_POSITION;
	float2 TexCoord : TEXCOORD0;	
};
//--------------------------------------------------------------------------------------
// Pixel Shader
//--------------------------------------------------------------------------------------
PSOutput main(in PS_INPUT input)
{
	PSOutput output;
	if (isTextured)
		output.Color = txDiffuse.Sample(samLinear, input.TexCoord);
	else
		output.Color = diffuse;
	
	return output;
}
cbuffer VSConstants : register(cb0 )
{
	matrix World;
	matrix View;
	matrix Projection;	
	matrix WordViewProjection;
}
//--------------------------------------------------------------------------------------
struct VS_INPUT
{
    float3 Position	: POSITION;
    float2 TexCoord	: TEXCOORD0;
		float3 Normal	: NORMAL;		
};

struct VS_OUTPUT
{
	float4 PositionCS : SV_POSITION;
	float2 TexCoord : TEXCOORD0;	
};

//--------------------------------------------------------------------------------------
// Vertex Shader
//--------------------------------------------------------------------------------------
VS_OUTPUT main(in VS_INPUT input)
{  
	VS_OUTPUT output = (VS_OUTPUT)0;
	float4 position = float4(input.Position, 1.0f);		
	output.PositionCS = mul(position, World);		
	output.PositionCS = mul(output.PositionCS, View);              
	output.PositionCS = mul(output.PositionCS, Projection);	
	output.TexCoord = input.TexCoord;	  
  return output;
}

But I want to replace 

output.PositionCS = mul(output.PositionCS, View);
output.PositionCS = mul(output.PositionCS, Projection);	

with 

output.PositionCS = mul(position, WordViewProjection);

and my calculations for ConstantBuffer is

VSC_WORLD PipelineManagerDX11::CalculateModelSpace(const IRenderable3D &renderble)
{	
	XMMATRIX model_matrix = renderble.World.GetTransformationMatrix();
	VSC_WORLD vsS;	
	vsS.mWorld = XMMatrixTranspose(model_matrix);
	vsS.mView = XMMatrixTranspose(this->m_View);
	vsS.mProjection = XMMatrixTranspose(this->m_Projection);
	XMMATRIX WorldView = model_matrix*this->m_View;
	XMMATRIX ViewProjection = this->m_View*this->m_Projection;
	vsS.mWordViewProjection = XMMatrixTranspose(WorldView*ViewProjection);;
	return vsS;
}

Im not sure what I'm doing wrong here. But its not working, my output is completely black so something is going badly wrong with the PositionCS Calc. Im pretty sure I'm missing something basic, not sure what though.

 

All the help is much appreciated. 


Problem with Constant Buffer Size.

22 September 2014 - 09:46 AM

HI, 

I have a question regarding Constant Buffers in Directx 11, I'm not sure if this should go to the beginners forum feel free to move it if so. 

Anyhow. Im getting the following error: 

 

D3D11 WARNING: ID3D11DeviceContext::DrawIndexed: The size of the Constant Buffer at slot 0 of the Pixel Shader unit is too small (48 bytes provided, 80 bytes, at least, expected). This is OK, as out-of-bounds reads are defined to return 0. It is also possible the developer knows the missing data will not be used anyway. This is only a problem if the developer actually intended to bind a sufficiently large Constant Buffer for what the shader expects.  [ EXECUTION WARNING #351: DEVICE_DRAW_CONSTANT_BUFFER_TOO_SMALL]

 

If I'm understanding this message correctly it says that my buffer is 48 bytes large but it should be 80 bytes.

 

This is how my buffer looks:

struct cbPerLight
{ 
XMFLOAT4 lightDirection;  //16 bytes
XMFLOAT4 lightColor;   //16 bytes 
XMFLOAT4 lightPosition; //16 bytes
XMFLOAT4 lightAngle; //16 bytes
XMFLOAT4 lightRange;      //16 bytes
};

 16*5 = 80 bytes. And all of the is 16 bytes each so no problem that that one of the is over the 16 block.

 

This is how my PixelShader looks

//---------------------------------------------------------------------------------------------------------------------
Texture2D txDiffuse : register(t0);
Texture2D txNormal : register(t1);
Texture2D txDepth : register(t2);
Texture2D txSpecular : register(t3);
//---------------------------------------------------------------------------------------------------------------------
SamplerState samLinear0 : register( s0 );
SamplerState samLinear1 : register( s1 );
SamplerState samLinear2 : register( s2 );
SamplerState samLinear3 : register( s3 );
//---------------------------------------------------------------------------------------------------------------------
cbuffer cbLight : register(b0)
{
	float4 lightDirection;
	float4 lightColor;	
	float4 lightPosition;
	float4 lightAngles;
	float4 lightRange;
}
//---------------------------------------------------------------------------------------------------------------------
struct PS_INPUT
{
  float4 position	 : SV_POSITION;
  float2 texcoord	 : TEXCOORD0;	
	};
//---------------------------------------------------------------------------------------------------------------------
float4 main(PS_INPUT input) : SV_Target0
{
      float4 diffuseAlbedo = txDiffuse.Sample(samLinear0, input.texcoord);
      float4 normal = txNormal.Sample(samLinear1, input.texcoord);
      float4 depth = txDepth.Sample(samLinear2, input.texcoord);
      float4 specular = txDepth.Sample(samLinear3, input.texcoord);
      float4 L = lightPosition - input.position;
      float distance =  length(L);		
       L /= distance;
       float attenuation = max(0, 1.0f - (distance / lightRange.x));

       float4 L2 = lightDirection;
       float rho = dot(-L, L2);
	attenuation *= saturate((rho - lightAngles.y) / (lightAngles.x - lightAngles.y));

	const float diffuseNormalizationFactor = 1.0f / 3.14159265f;		
	float d1 = dot(normal, L);
	float nDotL = saturate(d1);
	float4 diffuse = nDotL * lightColor * diffuseAlbedo * diffuseNormalizationFactor;
	return diffuse * attenuation;
}


Its a light stage in my render, should render a spotlight when its finished (It not complete yet).

 

I have to say that I'm not really understanding how the constant buffer should be allocated in memory but I think they should be in blocks of 16bytes and no variable should cross a 16 byte block.  And one could use padding variable to achieve this.. 

My output is currently black because it seems that some of variables seems to be corrupt in the shader,, with makes me think that it has to do something with the allocation. 

 

But in this case I'm not sure where it gets 48bytes from. Does anyone has any suggestions? 

 

thx for you help.


How is a system with render dependent resources structured.

11 April 2014 - 06:03 AM

Hi, this question is more of a structural and code based.

Lets say you have some sort of engine in lack of a better name, has as a rendersystem.  The Rendersystem could be OpenGL or DX based.

The Engine have knowleage of some of the common render objects, like Shader, ShaderView, meshes and so on. It does not now what a OpenGL and DX Shader is. How does the more professional systems handle this?

 

I can think of two solutions, one is that the rendersystem has a list of all DX Shader and the Shaders in the engine has an ID to that list (alt 1),

Or that OpenGL Shader would be a subclass of the Engine Shader and cast to a OpenGL Shader every time its beeing used in the Render system (alt 2).

 
(alt 1)

void OpenGL::ShaderInput(EngineShader* pShader)

{

OpenGL* pOpenGLShader = GetOpenGLShader(pShader->ID);

}

 

(alt2)

void OpenGL::ShaderInput(EngineShader* pShader)

{

OpenGL* pOpenGLShader = static_cast<OpenGLShader>(pShader);

DoSomething(pOpenGLShader)

}

 

Just think both solutions seems a slow and maybe a bit wasteful. 

any thoughts pls?


Defered Render structure.

30 October 2013 - 03:34 PM

Hi,

 

I render each object to  G-Buffer  after that I should render each light in the scene to the lightbuffer, and the result should be blended so the lightbuffer is a blended combination of all lights in the scene correct.

 

So how do I blend the lightbuffers renderpass? Is this something that i should program in the shaderworld or in the cpu world? And how would a typical very simple lightbuffer look like should that just be a pixel shader?

 

Also currently my setup works fine for 1 light (:-P) and it goes something like this.

 

1) Render all object to the G-Buffer one by one, each object has the G-Buffer as there primary shader

2) Render a Quad with the light buffer as a shader and a light as input. If i would start render 1000 lights I cant render

the Quad 1000 times, so I should just run the "light shader" a 10000s without any geometry somehow and then as a third final step render the  a Quad and then apply the blend result from the light shader to that quad, how would I do that?

// Lightbuffer
//--------------------------------------------------------------------------------
Texture2D txDiffuse : register(t0);
Texture2D txNormal : register(t1);
Texture2D txDepth : register(t2);
Texture2D txSpecular : register(t3);
//--------------------------------------------------------------------------------
SamplerState samLinear0 : register( s0 );
SamplerState samLinear1 : register( s1 );
SamplerState samLinear2 : register( s2 );
SamplerState samLinear3 : register( s3 );
//--------------------------------------------------------------------------------
cbuffer cbLight : register(b0)
{
	float4 lightDirection;
	float4 lightColor;
	float4 lightRange;	
}
//--------------------------------------------------------------------------------
struct PS_INPUT
{
  float4 position	 : SV_POSITION;
  float2 texcoord	 : TEXCOORD0;	
	};
//--------------------------------------------------------------------------------
float4 main(PS_INPUT input) : SV_Target
{
	float4 diffuse = txDiffuse.Sample(samLinear0, input.texcoord);
	float4 normal = txNormal.Sample(samLinear1, input.texcoord);
	float4 depth = txDepth.Sample(samLinear2, input.texcoord);
	float4 specular = txDepth.Sample(samLinear3, input.texcoord);
	float irradiance = saturate(dot(normal, -lightDirection));
	return lightColor*irradiance*diffuse;
}


// G-BUFFER
Texture2D txDiffuse : register( t0 );
SamplerState samLinear : register( s0 );

cbuffer cbPerObject : register(b0)
{
	float4 diffuse;
	float4 specular;
	bool isTextured;	
}

//--------------------------------------------------------------------------------------
struct PS_INPUT
{
  float4 position	 : SV_POSITION;
  float2 texcoord	 : TEXCOORD0;	
	float4 normal	: NORMAL;
};
//--------------------------------------------------------------------------------------
struct PSOutput
{
	float4 Color : SV_Target0;
  float4 Normal : SV_Target1;	
	float4 Depth : SV_Target2;
	float4 Specular : SV_Target3;
	
};
//--------------------------------------------------------------------------------------
// Pixel Shader
//--------------------------------------------------------------------------------------
PSOutput main(PS_INPUT input)  
{
	PSOutput output;
	output.Color = diffuse*txDiffuse.Sample(samLinear, input.texcoord);
	output.Normal = normalize(input.normal);
	output.Specular = specular;
	output.Depth =  input.position.z / input.position.w;
	return output;
}

any help is much appreciated.


Defered Render output, repeated.

29 October 2013 - 02:30 PM

Hi,

I have the basic for a simple defered render up and running. I render the end result on a "full screen quad" with the output as a ShaderView not sure if this is the best way to do it, if not what is? My problem thought as you can see in the picture, that the result it repeated and not "stretched out". I think this has to do something with the texture or ShaderView.. but I cant find the problem.

 

This is my Quad.

UINT uFullWidth = 1024;
UINT uFullHeight = 768;
Vertex vertices_fullquad[] = {
{ XMFLOAT3(-1.0f*uFullWidth, 1.0f*uFullHeight, 0.0f), XMFLOAT2(0.0f, 0.0f), XMFLOAT3(0.0f, 1.0f, 0.0f) },
{ XMFLOAT3(1.0f*uFullWidth, 1.0f*uFullHeight, 0.0f), XMFLOAT2(1.0f*uFullWidth, 0.0f), XMFLOAT3(0.0f, 1.0f, 0.0f) },
{ XMFLOAT3(-1.0f*uFullWidth, -1.0f*uFullHeight, 0.0f), XMFLOAT2(0.0f, 1.0f*uFullHeight), XMFLOAT3(0.0f, 1.0f, 0.0f) },
{ XMFLOAT3(1.0f*uFullWidth, -1.0f*uFullHeight, 0.0f), XMFLOAT2(1.0f*uFullWidth, 1.0f*uFullHeight), XMFLOAT3(0.0f, 1.0f, 0.0f) },
};

TArray<WORD> waLightIndices;
quad_indices.add(0); quad_indices.add(1); quad_indices.add(3); quad_indices.add(0); quad_indices.add(2); quad_indices.add(3);

RenderTarget Texture

	ID3D11Texture2D* pTexture = nullptr;
	// Create a render target view
	D3D11_TEXTURE2D_DESC descTarget;
	ZeroMemory(&descTarget, sizeof(descTarget));
	descTarget.Width = uWidth;
	descTarget.Height = uHeight;
	descTarget.MipLevels = 1;
	descTarget.ArraySize = 1;
	descTarget.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
	descTarget.SampleDesc.Count = 1;
	descTarget.Usage = D3D11_USAGE_DEFAULT;
	descTarget.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
	descTarget.CPUAccessFlags = 0;
	descTarget.MiscFlags = 0;
	HRESULT hr = m_pDevice->CreateTexture2D(&descTarget, nullptr, &pTexture);

ShaderView

	pTexture->GetDesc(&descTarget);
	D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
	srvDesc.Format = descTarget.Format;
	srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
	srvDesc.Texture2D.MostDetailedMip = 0;
	srvDesc.Texture2D.MipLevels = 1;

Light-Shader

//----------------------------------------------------------------------------------------------
Texture2D txDiffuse : register(t0);
Texture2D txNormal : register(t1);
Texture2D txDepth : register(t2);
Texture2D txSpecular : register(t3);
//----------------------------------------------------------------------------------------------
SamplerState samLinear0 : register( s0 );
SamplerState samLinear1 : register( s1 );
SamplerState samLinear2 : register( s2 );
SamplerState samLinear3 : register( s3 );
//----------------------------------------------------------------------------------------------
cbuffer cbLight : register(b0)
{
	float4 lightDirection;
	float4 lightColor;
	float4 lightRange;	
}
//---------------------------------------------------------------------------------------------------------------------
struct PS_INPUT
{
  float4 position	 : SV_POSITION;
  float2 texcoord	 : TEXCOORD0;	
	};
//---------------------------------------------------------------------------------------------------------------------
float4 main(PS_INPUT input) : SV_Target
{
	float4 diffuse = txDiffuse.Sample(samLinear0, input.texcoord);
	float4 normal = txNormal.Sample(samLinear1, input.texcoord);
	float4 depth = txDepth.Sample(samLinear2, input.texcoord);   //Not used currently
	float4 specular = txDepth.Sample(samLinear3, input.texcoord); //Not used currently
	float irradiance = saturate(dot(normal, -lightDirection));
	return lightColor*irradiance*diffuse;
}

G-Buffer

Texture2D txDiffuse : register( t0 );
SamplerState samLinear : register( s0 );
cbuffer cbPerObject : register(b0)
{
	float4 diffuse;
	float4 specular;
	bool isTextured;	
}

//--------------------------------------------------------------------------------------
struct PS_INPUT
{
  float4 position	 : SV_POSITION;
  float2 texcoord	 : TEXCOORD0;	
  float4 normal          : NORMAL;
};
//--------------------------------------------------------------------------------------
struct PSOutput
{
  float4 Color    : SV_Target0;
  float4 Normal   : SV_Target1;	
  float4 Depth    : SV_Target2;
  float4 Specular : SV_Target3;
	
};
//--------------------------------------------------------------------------------------
// Pixel Shader
//--------------------------------------------------------------------------------------
PSOutput main(PS_INPUT input)  
{
	PSOutput output;
	output.Color = diffuse*txDiffuse.Sample(samLinear, input.texcoord);
	output.Normal = normalize(input.normal);
	output.Specular = specular;
	output.Depth =  input.position.z / input.position.w;
	return output;
}

PARTNERS