DepthStencilState DisableDepth
{
DepthEnable = FALSE;
DepthWriteMask = ZERO;
};
BlendState DisableBlend
{
BlendEnable[0] = false;
};
//#############################################################################
//
// SAMPLERS
//
//#############################################################################
Texture2D <float4>g_SourceTex : TEXTURE0;
SamplerState BilinearSampler
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Clamp;
AddressV = Clamp;
};
//#############################################################################
struct VS_Output
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD0;
};
//#############################################################################
//
// DOWNSCALE
//
//#############################################################################
VS_Output VS_downscale(in float4 Pos : POSITION)
{
VS_Output OUT;
OUT = (VS_Output)0;
OUT.Pos = float4(Pos.x, Pos.y, 0.5f, 1.0f);
OUT.Tex = Pos.zw;
return OUT;
}
float4 PS_downScale2x2(in VS_Output IN) : SV_TARGET0
{
half4 sample = 0.0f;
return g_SourceTex.Sample(BilinearSampler, IN.Tex.xy);
}
//=============================================================
technique10 Downscale4x4Bilinear
{
pass p0
{
SetVertexShader ( CompileShader( vs_4_0, VS_downscale() ) );
SetGeometryShader( NULL );
SetPixelShader ( CompileShader( ps_4_0, PS_downScale2x2() ) );
SetBlendState( DisableBlend, float4( 0.0f, 0.0f, 0.0f, 0.0f ), 0xFFFFFFFF );
SetDepthStencilState( DisableDepth, 0 );
}
}
this code is copy from nv dx10 sdk HDRRendering.
vs input:
[attachment=7594:1.png]
vs output:
[attachment=7595:2.png]
for(int i = 1; i <= m_NumberOfTaps; i++){
m_SourceTex->SetResource( m_FilterTapsSRV[i-1] );
m_D3DDevice->RSSetViewports( 1, &m_DownsampleQuadCoords[i-1].Viewport );
m_D3DDevice->OMSetRenderTargets(1, &m_FilterTapsRTV, NULL);
m_D3DDevice->IASetVertexBuffers( 0, 1, &m_DownsampleQuadCoords[i-1].VBdata, &stride, &offset );
for( UINT p = 0; p < techDesc.Passes; ++p )
{
m_TechniqueDownscale->GetPassByIndex( p )->Apply(0);
m_D3DDevice->Draw(3, 0);
}
}
it is render a primitive with three vertices, but why it can be used to down scale a surface.
formerly, we need create a vertex buffer, a index buffer, a input layout by these data:
struct FQuadVertex
{
float3 vPos;
float2 vTex;
};
static const FQuadVertex Vertices[4] =
{
{ float3(-1.0f, -1.0f, 0.0f), float2(0.0f, 1.0f) },
{ float3(-1.0f, 1.0f, 0.0f), float2(0.0f, 0.0f ) },
{ float3( 1.0f, 1.0f, 0.0f), float2(1.0f, 0.0f) },
{ float3( 1.0f, -1.0f, 0.0f), float2(1.0f, 1.0f) }
};
static const word_t Indices[6] =
{
0, 1, 2,
0, 2, 3
};
then, draw
DrawIndexed( EPT_TriangleList, 0, 0, 2 );
what the difference between these two approaches?
which is the better way?
the first approach, the output texture texcoordinates is greater than 1.0, and the primitive is just a triangle, how does it cover the full screen?