reference code:
// Depth pass vertex shader
output.vPositionCS = mul(input.vPositionOS, g_matWorldViewProj);
output.vDepthCS.xy = output.vPositionCS.zw;
// Depth pass pixel shader (output z/w)
return input.vDepthCS.x / input.vDepthVS.y;
in the first section, MJP demonstrate a traditional method to render depth, I can't understand why we must save zw to xy, and return x/y in pixel shader, why not we return [color=#494949]
vPositionCS.z directly?
i use this code to render a depth buffer for shadow map, it works well, does that have any problem?
float4 DrawShadowMapVS( HStaticMeshPositionVertexFactory Input ):SV_Position
{
float4 vWorldPos = mul( float4(Input.vPos, 1.0f ), mWorld );
float4 oPos = mul( vWorldPos, mLightViewProj );
return oPos;
}
float4 DrawShadowMapPS( float4 vPos : SV_Position ) : SV_Target0
{
float z = vPos.z;
return float4(z,z,z,1.0);
}
float4 DrawShadowMapVS( HStaticMeshPositionVertexFactory Input ):SV_Position
{
float4 vWorldPos = mul( float4(Input.vPos, 1.0f ), mWorld );
float4 oPos = mul( vWorldPos, mLightViewProj );
return oPos;
}
float4 DrawShadowMapPS( float4 vPos : SV_Position ) : SV_Target0
{
float z = vPos.z;
return float4(z,z,z,1.0);
}