Jump to content

  • Log In with Google      Sign In   
  • Create Account


simotix

Member Since 17 Aug 2007
Offline Last Active Apr 21 2011 10:45 PM

Topics I've Started

Template Type Factory

05 March 2011 - 12:21 AM

I am working on a factory that will have types added to them, however, if the class is not explicitly instiated in the .exe that is exectured (compile-time), then the type is not added to the factory. This is due to the fact that the static call is some how not being made. Does anyone have any suggestions on how to fix this? Below is five very small files that I am putting into a lib, then an .exe will call this lib. If there is any suggestions on how I can get this to work, or maybe a better design pattern, please let me know. Here is basically what I am looking for

1) A factory that can take in types
2) Auto registration to go in the classes .cpp file, any and all registration code should go in the class .cpp (for the example below, RandomClass.cpp) and no other files.

BaseClass.h
#pragma once

class BaseClass{ };

RandomClass.h
#pragma once

#include "BaseClass.h"

class RandomClass : public BaseClass
{
private:
	static short ID;

public:
	RandomClass();
	virtual ~RandomClass();
};

RandomClass.cpp
#include "RandomClass.h"
#include "TemplatedFactory.h"

short RandomClass::ID = TemplatedFactory::GetInstance().AddType<RandomClass>("RandomClass");

RandomClass::RandomClass()
{

}

RandomClass::~RandomClass()
{

}

TemplateFactory.h
#pragma once

#include <map>
using std::map;

#include "BaseClass.h"

template<typename Type> BaseClass* createType() { return new Type; }

class TemplatedFactory
{
private:
	typedef BaseClass* (*ComponentFactoryFuncPtr)();
	typedef map<const char*, ComponentFactoryFuncPtr> map_type;

	map_type m_Map;

public:
	static TemplatedFactory &GetInstance();

	template<typename Type>
	short AddType(const char* typeName);
};

template<typename Type>
short TemplatedFactory::AddType(const char* typeName)
{
	ComponentFactoryFuncPtr function = &createType<Type>;
	m_Map.insert(std::make_pair(typeName, function));

	return 0;
}

TemplateFactory.cpp
#include "TemplatedFactory.h"

TemplatedFactory &TemplatedFactory::GetInstance()
{
	static TemplatedFactory instance;
	return instance;
}

[DX11] Instancing problem due to Input Assembler

26 February 2011 - 02:19 AM

I am attemptinng to achieve instancing, and while I have read the documentation on it, I am getting an issue with the input layout. I will get the following errors when I try to render

D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'POSITION' of the input stage has a hardware register component mask that is not a subset of the output of the previous stage. [ EXECUTION ERROR #345: DEVICE_SHADER_LINKAGE_REGISTERMASK ]
D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. The input stage requires Semantic/Index (NORMAL,0) as input, but it is not provided by the output stage. [ EXECUTION ERROR #342: DEVICE_SHADER_LINKAGE_SEMANTICNAME_NOT_FOUND ]
D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'TEXCOORD' is defined for mismatched hardware registers between the output stage and input stage. [ EXECUTION ERROR #343: DEVICE_SHADER_LINKAGE_REGISTERINDEX ]
D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'TRANSFORM' is defined for mismatched hardware registers between the output stage and input stage. [ EXECUTION ERROR #343: DEVICE_SHADER_LINKAGE_REGISTERINDEX ]
D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'TRANSFORM' is defined for mismatched hardware registers between the output stage and input stage. [ EXECUTION ERROR #343: DEVICE_SHADER_LINKAGE_REGISTERINDEX ]
D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'TRANSFORM' is defined for mismatched hardware registers between the output stage and input stage. [ EXECUTION ERROR #343: DEVICE_SHADER_LINKAGE_REGISTERINDEX ]
D3D11: ERROR: ID3D11DeviceContext::DrawIndexedInstanced: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'TRANSFORM' is defined for mismatched hardware registers between the output stage and input stage. [ EXECUTION ERROR #343: DEVICE_SHADER_LINKAGE_REGISTERINDEX ]

I create my layout with the following

		D3D11_INPUT_ELEMENT_DESC layoutInstanced[] =
		{
			{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
			{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
			{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 },
			{ "TRANSFORM", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, 0, D3D11_INPUT_PER_INSTANCE_DATA, 1 },
			{ "TRANSFORM", 1, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, 16, D3D11_INPUT_PER_INSTANCE_DATA, 1 },
			{ "TRANSFORM", 2, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, 32, D3D11_INPUT_PER_INSTANCE_DATA, 1 },
			{ "TRANSFORM", 3, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, 48, D3D11_INPUT_PER_INSTANCE_DATA, 1 },
		};	

		HRESULT hResult = D3D11Device->CreateInputLayout( layoutInstanced, 7, d3d11InstanceLNTVS->GetD3DBlob()->GetBufferPointer(),
			d3d11InstanceLNTVS->GetD3DBlob()->GetBufferSize(), &m_LNVInstancedLayout );

Which I am checking the hResult for any errors. In my vertex shader I have

struct VS_INPUT_INSTANCED
{
	float4 Pos : POSITION;
	float3 Norm : NORMAL;
	float2 Tex : TEXCOORD0;
	float4x4 Transform : TRANSFORM;
};

As the layout. The both seem to be defined the same way and when I looked in PIX, the vertex shader that is using VS_INPUT_INSTANCED is being called. Is there something I am missing here?

Simple Model File Format

18 February 2011 - 12:30 AM

I am trying to find a simple model file format that would hopefully have an exporter written for in for Maya. I will eventually be loading the file in my DirectX11 renderer but my rendering is done through an abstract layer so it should really be graphics API independent. I was reading about COLLADA files but I keep hearing that the file format is "bloated". Is there any other suggestions on a file format for basic models that do not need animation support?

Crash Reports

13 February 2011 - 02:57 PM

Does anyone have any suggestions or tutorials for creating crash reports on a program? I want to start sending out a game I have been developing to people, but if the game crashes through error handling, I want to see those reports to an email address that I have registered (along with data collected from the program). Does anyone know how to do this?

Reconstruction from Depth, correct value to reconstruct

13 February 2011 - 01:57 PM

I have been attempting to correctly reconstruct the depth buffer, and I have been able to reconstruct the value. However, I believe I am reconstructing the wrong value. I have been following http://mynameismjp.wordpress.com/2010/09/05/position-from-depth-3/ but the terminology is not always clear.

Something that I noticed is that the value I have in my working model of Deferred Rendering is different then what I have in this version (for the position). I debugged the same pixel and even though the value of positionVS from "length(positionVS)" in my rendering of the buffer is the same as the value in my lighting shader, this "positionVS" is different then if I were to generate the position like this (traditional)

	float depthVal = depthMap.Sample( sampPointClamp, texCoord ).r;

	float4 position;
	position.xy = input.ScreenPosition.xy;
	position.z  = depthVal;
	position.w = 1.0f;

	position = mul(position, InvertViewProjection);
	position /= position.w;

This leads me to believe that maybe I am calculating positionVS from my vertex shader in my gbuffer wrong.

As an example, in my rendering of the GBuffer my position will be "input.VertexShaderOutput::PositionVS ( -0.496, -1.000, 1.496, 1.000 ) ". In the traditional way, my position will be, "( -0.496, -0.000, -0.504, 1.000 ) " where as in this reconstruction method, it will be exactly " ( -0.496, -1.000, 1.496, 1.000 )".

There is my shader code, could someone please take a look at my terminology and make sure I am calculating the correct thing? I am transposing my world, view and proj matrix before I send them into my shader.

G Buffer
struct VertexShaderInput
{
	float4 Position : POSITION;
	float3 Normal : NORMAL;
	float2 TexCoord : TEXCOORD0;
};

struct VertexShaderOutput
{
	float4 PositionCS : SV_POSITION;
	float4 PositionVS : Position;
	float2 TexCoord : TEXCOORD0;
	float3 Normal : TEXCOORD1;
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
	input.Position.w = 1.0f;

		VertexShaderOutput output = (VertexShaderOutput)0;
		output.PositionCS = mul( input.Position, World );

		output.PositionCS = mul( output.PositionCS, View );

	output.PositionVS = output.PositionCS;

		output.PositionCS = mul( output.PositionCS, Projection );

		output.TexCoord = input.TexCoord;

	output.Normal = mul( input.Normal, (float3x3)World );
	output.Normal = normalize(output.Normal);
		
	return output;
}

struct PixelShaderOutput
{
	float4 Color : SV_Target0;
	float4 Normal : SV_Target1;
	float4 Depth : SV_Target2;
};

PixelShaderOutput PixelShaderFunction(VertexShaderOutput input)
{
	PixelShaderOutput output = (PixelShaderOutput)0;
	output.Color = txDiffuse.Sample( samLinear, input.TexCoord );
	output.Color.a = SpecularIntensity;

	output.Normal.rgb = 0.5f * (normalize(input.Normal) + 1.0f);
	output.Normal.a = SpecularPower;
	output.Depth.x = length(input.PositionVS);
	return output;
};

Point Light:
struct VertexShaderInput
{
	float3 Position : POSITION0;
};

struct VertexShaderOutput
{
	float4 PositionCS : SV_POSITION;
	float3 ViewRay : TEXCOORD0;
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
	VertexShaderOutput output;

	float4 inputPosition = float4(input.Position, 1.0f);

	output.PositionCS = mul( inputPosition, World );

	float3 positionWS = output.PositionCS.xyz;
	output.ViewRay = positionWS - CameraPosition;

	output.PositionCS = mul( output.PositionCS, View );
	output.PositionCS = mul( output.PositionCS, Projection );

	return output;
}

float4 PixelShaderFunction(VertexShaderOutput input) : SV_Target
{
	float2 texCoord = input.PositionCS.xy * InvTextureSize;

	float3 viewRay = normalize(input.ViewRay);
	float viewDistance = depthMap.Sample( sampPointClamp, texCoord ).x;
	float3 positionWS = CameraPosition + viewRay * viewDistance;

	float4 normalData = normalMap.Sample( sampPointClamp, texCoord);
	float3 normal = 2.0f * normalData.xyz - 1.0f;

	float3 lightDir = normalize(positionWS - PointLightPosition);
	float d = length( PointLightPosition - positionWS);
	float nDl = dot(normal, lightDir);
	float atten = 1.0f -d/PointLightRadius;

	float4 color = (atten * float4(PointLightColor.rgb, 1.0f)) + (nDl * atten);
	return color;
}

PARTNERS