Jump to content

  • Log In with Google      Sign In   
  • Create Account

We're offering banner ads on our site from just $5!

1. Details HERE. 2. GDNet+ Subscriptions HERE. 3. Ad upload HERE.


kgs

Member Since 26 Jul 2011
Offline Last Active May 29 2014 11:46 PM

Topics I've Started

Help!How to release these resource completely?

20 April 2014 - 11:04 AM

Here is a class that load D2DBitmap and draw it.

class YHMSprite
{
public:
	YHMSprite(const std::shared_ptr<DX::DeviceResources>& deviceResources);
	//YHMSprite(const std::shared_ptr<DX::DeviceResources>& deviceResources, LPCWSTR fileName);
	//YHMSprite(const std::shared_ptr<DX::DeviceResources>& deviceResources, Platform::String^ fileName);
	~YHMSprite();
	void LoadResource(LPCWSTR fileName);
	void Draw(float scale, float x, float y);
	void Draw(float scaleX, float scaleY, float x, float y);
	void Draw(float scaleX, float scaleY, float x, float y, float opacity);
	D2D1_SIZE_F GetBitmapSize();
private:
	std::shared_ptr<DX::DeviceResources> mDeviceResources;
	bool mIsResourceLoaded;
	ID2D1DrawingStateBlock *mStateBlock;
	ID2D1Bitmap *mBitmap;
	D2D1_SIZE_F	mBitmapSize;
};

And I create these resource

YHMSprite::YHMSprite(const std::shared_ptr<DX::DeviceResources>& deviceResources)
{
	mIsResourceLoaded = false;
	mDeviceResources = deviceResources;
	mDeviceResources->GetD2DFactory()->CreateDrawingStateBlock(&mStateBlock);
}

void YHMSprite::LoadResource(LPCWSTR fileName)
{
	ComPtr<IWICBitmapDecoder> wicBitmapDecoder;
	mDeviceResources->GetWicImagingFactory()->CreateDecoderFromFilename(fileName, nullptr, GENERIC_READ, WICDecodeMetadataCacheOnDemand, &wicBitmapDecoder);
	ComPtr<IWICBitmapFrameDecode> wicBitmapFrame;
	wicBitmapDecoder->GetFrame(0, &wicBitmapFrame);
	ComPtr<IWICFormatConverter> wicFormatConverter;
	mDeviceResources->GetWicImagingFactory()->CreateFormatConverter(&wicFormatConverter);
	wicFormatConverter->Initialize(wicBitmapFrame.Get(), GUID_WICPixelFormat32bppPBGRA, WICBitmapDitherTypeNone, nullptr, 0.0, WICBitmapPaletteTypeCustom);
	double DpiX = 72.0f, DpiY = 72.0f;
	wicFormatConverter->GetResolution(&DpiX, &DpiY);
	mDeviceResources->GetD2DDeviceContext()->CreateBitmapFromWicBitmap(wicFormatConverter.Get(), BitmapProperties(PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED), static_cast<float>(DpiX), static_cast<float>(DpiY)), &mBitmap);
	mBitmapSize = mBitmap->GetSize();
	mIsResourceLoaded = true;
}

In my code, I release these resource using this destruction function

YHMSprite::~YHMSprite()

{
mBitmap->Release();
mStateBlock->Release();
}
 
I release my resource like this:
YHMSprite* test = new YHMSprite(....);
delete test;
But it doesn't release test completed!
Thanks.

Question about post-processing

30 March 2014 - 08:12 PM

I'm working for a post-processing effect for a long time.

I use multi rendertarget to achieve this effect.

But I met a problem.

3tb_1403311010323ris512293.jpg

As the screen shot show,I rendered object two times, one with glow effect and one not, they are in same position,But the glow effect is just a 2D image.So the effect become unreal,just like a 2D image cover in a 3D scene.

I want this effect looked like a 3D effect.

How do I fixed it?


Need HELP!Question about Post processing.

21 March 2014 - 01:47 AM

I followed D3D Post Processing sample in windows 8.1.

And I have successfully rendered my object with post processing effect.

But now I met a problem.I have many objects to render.I want only a part of objects rendered with post processing effect.I know this problem is about rendertarget.But I don't know how to deal with it.

Following is my code:

void D3DPostProcessing::DrawWithoutPostProcessing()
{
	m_d3dContext->OMSetRenderTargets(1, m_d3dRenderTargetView.GetAddressOf(), m_d3dDepthStencilView.Get());
	const float clearColor[4] = { 0.071f, 0.040f, 0.561f, 1.0f };
	m_d3dContext->ClearRenderTargetView(m_d3dRenderTargetView.Get(), clearColor);
	m_d3dContext->ClearDepthStencilView(m_d3dDepthStencilView.Get(), D3D11_CLEAR_DEPTH, 1.0f, 0);
	m_d3dContext->IASetInputLayout(m_inputLayout.Get());

	unsigned int stride = sizeof(VertexPNTTBI);
	unsigned int offset = 0;

	m_d3dContext->IASetVertexBuffers(0, 1, mYHMMesh.Geometry[0].VertexBuffer.GetAddressOf(), &stride, &offset);
	m_d3dContext->IASetIndexBuffer(mYHMMesh.Geometry[0].IndexBuffer.Get(), DXGI_FORMAT_R16_UINT, 0);
	m_d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
	m_d3dContext->VSSetShader(m_vertexShader.Get(), nullptr, 0);
	m_d3dContext->VSSetConstantBuffers(0, 1, m_constantBuffer.GetAddressOf());
	// set the pixel shader stage state
	m_d3dContext->PSSetShader(m_pixelShader.Get(), nullptr, 0);
	m_d3dContext->PSSetShaderResources(0, 1, mYHMMesh.Geometry[0].textureSVR.GetAddressOf());
	m_d3dContext->PSSetSamplers(0, 1, m_sampler.GetAddressOf());
	m_d3dContext->DrawIndexed(mYHMMesh.Geometry[0].IndexCount, 0, 0);
}

void D3DPostProcessing::DrawPostProcessing()
{
	m_d3dContext->OMSetRenderTargets(1, m_intermediateTextureRenderTargetView.GetAddressOf(), m_d3dDepthStencilView.Get());
	const float clearColor[4] = { 0.071f, 0.040f, 0.561f, 1.0f };
	m_d3dContext->ClearRenderTargetView(m_intermediateTextureRenderTargetView.Get(), clearColor);
	m_d3dContext->ClearDepthStencilView(m_d3dDepthStencilView.Get(), D3D11_CLEAR_DEPTH, 1.0f, 0);
	m_d3dContext->IASetInputLayout(m_inputLayout.Get());

	unsigned int stride = sizeof(VertexPNTTBI);
	unsigned int offset = 0;

	m_d3dContext->IASetVertexBuffers(0, 1, mYHMMesh.Geometry[0].VertexBuffer.GetAddressOf(), &stride, &offset);
	m_d3dContext->IASetIndexBuffer(mYHMMesh.Geometry[0].IndexBuffer.Get(), DXGI_FORMAT_R16_UINT, 0);
	m_d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
	m_d3dContext->VSSetShader(m_vertexShader.Get(), nullptr, 0);
	m_d3dContext->VSSetConstantBuffers(0, 1, m_constantBuffer.GetAddressOf());
	// set the pixel shader stage state
	m_d3dContext->PSSetShader(m_pixelShader.Get(), nullptr, 0);
	m_d3dContext->PSSetShaderResources(0, 1, mYHMMesh.Geometry[0].textureSVR.GetAddressOf());
	m_d3dContext->PSSetSamplers(0, 1, m_sampler.GetAddressOf());
	// Save the backbuffer viewport
	D3D11_VIEWPORT oldViewPort[1];
	unsigned int numberOfViewPorts = 1;
	m_d3dContext->RSGetViewports(&numberOfViewPorts, oldViewPort);
	// Setup the viewport to match the Intermediate Render Target
	D3D11_VIEWPORT viewPort = { 0 };
	viewPort.Width = static_cast<float>(m_intermediateRenderTargetWidth);
	viewPort.Height = static_cast<float>(m_intermediateRenderTargetHeight);
	viewPort.MinDepth = 0.0f;
	viewPort.MaxDepth = 1.0f;
	viewPort.TopLeftX = 0;
	viewPort.TopLeftY = 0;
	m_d3dContext->RSSetViewports(1, &viewPort);
	m_d3dContext->DrawIndexed(mYHMMesh.Geometry[0].IndexCount, 0, 0);
	// Restore the backbuffer viewport
	m_d3dContext->RSSetViewports(numberOfViewPorts, oldViewPort);
	BrightPassDownFilter();
	RenderGlow();
	CombineGlow();
}

In my code, both object rendered with post processing effect.Before rendering a post processing effect, I want to render one object that without post processing effect.

Any help would be appreciated.Thanks.

And this is my full codeAttached File  FullCode.rar   510.02KB   20 downloads


NEED HELP!create_async() failed problem!

21 January 2014 - 08:26 AM

I'm using create_async to load bitmap.

This works fine under release mode, but failed in debug mode.

Following is my code

Concurrency::create_async([&]()
{
mBackGround = new YHMSprite(mDeviceResources, L"Textures\\Menu\\background.png");
});

YHMSprite::YHMSprite(shared_ptr<DX::DeviceResources> &deviceResources, LPCWSTR fileName)
{
	mDeviceResources = deviceResources;
	ComPtr<IWICBitmapDecoder> wicBitmapDecoder;
	mDeviceResources->GetWicImagingFactory()->CreateDecoderFromFilename(fileName, nullptr, GENERIC_READ, WICDecodeMetadataCacheOnDemand, &wicBitmapDecoder);
	ComPtr<IWICBitmapFrameDecode> wicBitmapFrame;
	wicBitmapDecoder->GetFrame(0, &wicBitmapFrame);
	ComPtr<IWICFormatConverter> wicFormatConverter;
	mDeviceResources->GetWicImagingFactory()->CreateFormatConverter(&wicFormatConverter);
	wicFormatConverter->Initialize(wicBitmapFrame.Get(), GUID_WICPixelFormat32bppPBGRA, WICBitmapDitherTypeNone, nullptr, 0.0, WICBitmapPaletteTypeCustom);
	double DpiX = 96.0f, DpiY = 96.0f;
	wicFormatConverter->GetResolution(&DpiX, &DpiY);
	mDeviceResources->GetD2DDeviceContext()->CreateBitmapFromWicBitmap(wicFormatConverter.Get(), BitmapProperties(PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED), static_cast<float>(DpiX), static_cast<float>(DpiY)), &mBitmap);
	mBitmapSize = mBitmap->GetSize();
	mDeviceResources->GetD2DFactory()->CreateDrawingStateBlock(&mStateBlock);
}

I don't know what's wrong with it.

My program triggered a breakpoint under debug mode.

 

Any help would be appreciated!

 


Need help!My skinned mesh work incorrectly in ARM device(Surface RT)

27 November 2013 - 09:09 PM

My program is a windows store DirectX app.

Just like the topic title, my code work well with win32.But when I debug my demo with my Surface RT.Some vertex position wrong.Just like the photo below. 

3tb_131128104817o6rm512293.jpg

I have do some test about my program.

This is my VertexShaderInput:

struct VertexShaderInput
{
    float3 pos : POSITION;
    float3 norm : NORMAL;
    float2 tex : TEXCOORD0;
    float4 Tangent	: TANGENT;
    float4 Weights    : WEIGHTS;
    uint4 BoneIndices : BONEINDICES;
};

When I use only pos, norm, tex data(mesh with no skinned animation data).My mesh displayed correctly.

So I thought the problem may be the BoneIndices,it's uint4 type in my HLSL code.

For more detail info, I paste my full hlsl code:

cbuffer SimpleConstantBuffer : register(b0)
{
    matrix model;
    matrix view;
    matrix projection;
    matrix gBoneTransforms[59];
};

struct VertexShaderInput
{
    float3 pos : POSITION;
    float3 norm : NORMAL;
    float2 tex : TEXCOORD0;
    float4 Tangent : TANGENT;
    float4 Weights    : WEIGHTS;
    uint4 BoneIndices : BONEINDICES;
};

struct PixelShaderInput
{
    float4 pos : SV_POSITION;
    float3 norm : NORMAL;
    float2 tex : TEXCOORD0;
};

PixelShaderInput SimpleVertexShader(VertexShaderInput input)
{
    PixelShaderInput vertexShaderOutput;

    float weights[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
    weights[0] = input.Weights.x;
    weights[1] = input.Weights.y;
    weights[2] = input.Weights.z;
    weights[3] = input.Weights.w;
    float4 pos = float4(input.pos, 1.0f);
    float4 skinnedPos = float4(0.0f, 0.0f, 0.0f, 0.0f);
    float4 norm = float4(normalize(input.norm), 0.0f);
    float4 normFinal = float4(0.0f, 0.0f, 0.0f, 1.0f);
    for (int i = 0; i < 4; ++i)
    {
	skinnedPos += weights[i] * mul(pos, gBoneTransforms[input.BoneIndices[i]]);
	normFinal += weights[i] * mul(norm, gBoneTransforms[input.BoneIndices[i]]);
    }
    skinnedPos.w = 1.0f;
    pos = mul(skinnedPos, model);
    pos = mul(pos, view);
    pos = mul(pos, projection);
    norm = mul(normFinal, model);

    vertexShaderOutput.pos = pos;
    vertexShaderOutput.tex = input.tex;
    vertexShaderOutput.norm = normalize(normFinal.xyz);
    return vertexShaderOutput;
}
 

And the vertex structure in my code is:

struct VertexPNTTBI
{
	XMFLOAT3 pos;
	XMFLOAT3 norm;
	XMFLOAT2 tex;
	XMFLOAT4 tan;
	XMFLOAT4 blendWeight;
	BYTE blendIndice[4];
};

And my D3D11_INPUT_ELEMENT_DESC is:

const D3D11_INPUT_ELEMENT_DESC VertexLayoutDesc[] =
{
     { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0,  D3D11_INPUT_PER_VERTEX_DATA, 0 },
     { "NORMAL",   0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
     { "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT,    0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 },
     { "TANGENT", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 32, D3D11_INPUT_PER_VERTEX_DATA, 0 },
     { "WEIGHTS", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 48, D3D11_INPUT_PER_VERTEX_DATA, 0 },
     { "BONEINDICES", 0, DXGI_FORMAT_R8G8B8A8_UINT, 0, 64, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};

These code work well with my local machine(win32).But I don't known if there are somethine I must change between WIN32 and ARM.

I have been working on this problem for 4 days.

Any help would be appreciated!Thanks.


PARTNERS