Sign in to follow this  

DX11 DX11 - Volume Rendering - Article Misunderstanding

This topic is 1589 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hi guys! Again... ohmy.png

 

So I'm trying to follow an article/tutorial on volume rendering from GraphicsRunner (Great!) [ http://graphicsrunner.blogspot.dk/2009/01/volume-rendering-101.html ].

 

But there is something which I don't understand, which is the following:

 

 

 

We could always calculate the intersection of the ray from the eye to the current pixel position with the cube by performing a ray-cube intersection in the shader. But a better and faster way to do this is to render the positions of the front and back facing triangles of the cube to textures. This easily gives us the starting and end positions of the ray, and in the shader we simply sample the textures to find the sampling ray.

 

So I understand that I have to render the back and front culled positions to individual textures, which I am doing and it works (It looks fine), but does he mean render the depth model into view + projected space, or to render it as a texture that can be sampled onto the cube with the cubes respective texture coordinates?

 

Thanks, as always.

-MIGI0027

Share this post


Link to post
Share on other sites

I read that post a while back, and what he is doing is storing the end points of a segment that would be created by passing a ray through the volume, and then using those end points to do the ray marching through the volume.  So what you need to do is look at his comparison function and figure out how he performs the iterative step.

 

In my volume rendering implementation in Hieroglyph 3, I use the texture space coordinates and step through the texture that way.  If you wanted to do that, you would find the 3D texture coordinate at the pixel location for the front and back faces, and store them accordingly.  Then you can simply step from the front value to the back value and do a texture lookup at each step, looking for the intersection with the isosurface.

Share this post


Link to post
Share on other sites

Still testing and changing.

 

If useful, here is the shader:

cbuffer ConstantObjectBuffer : register (b0)
{
	matrix worldMatrix;

	float3 StepSize;
	float Iterations;

	float4 ScaleFactor;
};

#define Side 2

cbuffer ConstantFrameBuffer : register (b1)
{
	
	matrix viewMatrix;
	matrix projectionMatrix;

	float3 eyepos;
	float cppad;

	float4 lightvec;
	float4 lightcol;

	float FogStart;
	float FogEnd;
	float2 __space;

	float3 FogColor;
	float shadows;

	float SpecularIntensity;
	float3 pad3;
	float4 SpecularColor;
}

//***************************************************//
//                 VERTEX SHADER                     //
//***************************************************//

struct VOut
{
    float4 position : SV_POSITION;
    float3 texC		: TEXCOORD0;
    float4 pos		: TEXCOORD1;
    float2 texcoord : TEXCOORD2;
    float3 normal   : NORM;
};

struct GlobalIn
{
	float4 position : POSITION;
	float4 normal : NORMAL;
	float2 texcoord : TEXCOORD;
	float4 tangent : TANGENT;
};
	
**CE_RESERVED_SHADER[INPUTS]**

Texture3D t_VolData : register(t0);
Texture2D t_TransFront : register(t1);
Texture2D t_TransBack : register(t2);

SamplerState ss;

VOut VShader(GlobalIn input)
{
    VOut output;

    input.position.w = 1.0f;
	output.texcoord = input.texcoord;

	// Calculate the position of the vertex against the world, view, and projection matrices.
    output.position = mul(input.position, worldMatrix);
    output.position = mul(output.position, viewMatrix);
    output.position = mul(output.position, projectionMatrix);

	output.texC = input.position;
    output.pos = output.position;
    output.normal = mul(float4(input.normal.xyz,0), worldMatrix);
	
    return output;
}

//***************************************************//
//                 PIXEL SHADER                      //
//***************************************************//

struct POut
{
	float4 Diffuse  : SV_Target0;
	float4 Position : SV_Target1;
	float4 Depth    : SV_Target2;
	float4 Normals  : SV_Target3;
	float4 Lighting : SV_Target4;
};

// Functions
float4 GetVRaycast(VOut input)
{
	//calculate projective texture coordinates
    //used to project the front and back position textures onto the cube
    float2 texC = input.pos.xy /= input.pos.w;
	texC.x =  0.5f*texC.x + 0.5f; 
	texC.y = -0.5f*texC.y + 0.5f;  
 
    float3 front = t_TransFront.Sample(ss, texC).xyz;
    float3 back = t_TransBack.Sample(ss, texC).xyz;
 
    float3 dir = normalize(back - front);
    float4 pos = float4(front, 0);
 
    float4 dst = float4(0, 0, 0, 0);
    float4 src = 0;
 
    float value = 0;
 
    float3 Step = dir * StepSize;
 
    for(int i = 0; i < 32; i++)
    {
        pos.w = 0;
        value = t_VolData.Sample(ss, pos).r;
             
        src = (float4)value;
        src.a *= .5f; //reduce the alpha to have a more transparent result 
         
        //Front to back blending
        // dst.rgb = dst.rgb + (1 - dst.a) * src.a * src.rgb
        // dst.a   = dst.a   + (1 - dst.a) * src.a     
        src.rgb *= src.a;
        dst = (1.0f - dst.a)*src + dst;     
     
        //break from the loop when alpha gets high enough
        if(dst.a >= .95f)
            break; 
     
        //advance the current position
        pos.xyz += Step;
     
        //break if the position is greater than <1, 1, 1>
        if(pos.x > 1.0f || pos.y > 1.0f || pos.z > 1.0f)
            break;
    }
 
    return dst;
}

POut PShader(VOut input)
{
	POut output;

	// Depth
	output.Depth = float4(0, 0, 0, 1.0f);

	// Normals
	output.Normals = float4(normalize(input.normal), 1);
	output.Position = float4(0, 0, 0, 1);
	output.Lighting = float4(1, 1, 1, 1);

	output.Diffuse = GetVRaycast(input);

	return output;
}

Thanks!

-MIGI0027

Share this post


Link to post
Share on other sites

Can somebody confirm that this is an Ok way of loading a volume texture:

// Will be filled and returned
	ID3D11ShaderResourceView* pSRV = NULL;
 
	// Build the texture header descriptor
	D3D11_TEXTURE3D_DESC descTex;
	descTex.Width = width;
	descTex.Height = height;
	descTex.Depth = depth;
	descTex.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
	descTex.Usage = D3D11_USAGE_DEFAULT;
	descTex.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
	descTex.CPUAccessFlags = 0;
	descTex.MipLevels = 1;
	descTex.MiscFlags = D3D10_RESOURCE_MISC_GENERATE_MIPS;

	// Load Data into Memory
	const int size = height*width*depth;

	// Initialize memory
    unsigned int* pVolume = new unsigned int[size];

	// Load into memory
	FILE* pFile = fopen ( (const char*)filePath.c_str() , (const char*)"rb" );
	fread(pVolume,sizeof(unsigned int), size, pFile);
    fclose(pFile);
 
	// Resource data descriptor, with depth
	D3D11_SUBRESOURCE_DATA data ;
	data.pSysMem = pVolume;
	data.SysMemPitch = 1.0f * width;
	data.SysMemSlicePitch = width * height * 1;

	/*_until(r, depth)
	{
		// Fetch Data

		memset( &data[r], 0, sizeof(D3D11_SUBRESOURCE_DATA));
		data[r].pSysMem = pData;
		data[r].SysMemPitch = 4 * width;
	}*/
 
	// Create the 3d texture from data
	ID3D11Texture3D * pTexture = NULL;
	HV( pDevice->CreateTexture3D( &descTex, &data, &pTexture ));
 
	// Create resource view descriptor
	D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
	srvDesc.Format = descTex.Format;
	srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D;

	srvDesc.Texture3D.MostDetailedMip = 0;
	srvDesc.Texture3D.MipLevels = D3D11_RESOURCE_MISC_GENERATE_MIPS;

	// Create the shader resource view
	HV( pDevice->CreateShaderResourceView( pTexture, &srvDesc, &pSRV ));
 
	return pSRV;

Share this post


Link to post
Share on other sites
One thing: Your pitches are wrong. For RGBA8 a texel is 4 bytes in size, so
data.SysMemPitch = width * 4;
data.SysMemSlicePitch = width * height * 4;
And the pitches are uints, not floats (the compiler likely warns you here).

I'm probably sounding like a broken record now but I recommend using PIX/graphics debugger to see if resources are loaded/initialized fine.

PS: Oh, and thanks for the first link. That's quite a resourceful site. And it's mostly C# biggrin.png

Share this post


Link to post
Share on other sites

Ok, so I'm just going to post all the code in case that I did something stupid as usual. tongue.png

 

Back, Front map rendering:

cbuffer ConstantObjectBuffer : register (b0)
{
	matrix worldMatrix;

	float pad_f;
	float instance;
	float2 pad_v2;
};

cbuffer ConstantFrameBuffer : register (b1)
{
	matrix viewMatrix;
	matrix projectionMatrix;
};


struct VOut
{
    float4 position : SV_POSITION;
    float3 texC		: TEXCOORD0;
    float4 pos		: TEXCOORD1;
};

Texture2D t_alphamap : register(t0);
Texture2D t_dffalpha : register(t1);
SamplerState ss;

VOut VShader(float4 position : POSITION, float4 normal : NORMAL, float2 texcoord : TEXCOORD, float3 instancePosition : INSTANCEPOS)
{
    VOut output;

	if (instance == 1)
	{
		position.x += instancePosition.x;
		position.y += instancePosition.y;
		position.z += instancePosition.z;
	}

    position.w = 1.0f;
	position = position * float4(1, 1, 1, 1);

	// Calculate the position of the vertex against the world, view, and projection matrices.
    output.position = mul(position, worldMatrix);
    output.position = mul(output.position, viewMatrix);
    output.position = mul(output.position, projectionMatrix);

	output.texC = position;
    output.pos = output.position;

    return output;
}

float4 PShader(VOut input) : SV_TARGET
{
	return float4(input.texC, 1.0f);
}

Code for volume rendering:

//*******CONFIGURE OBJECT BUFFER
	bff_PrObject.worldMatrix = world;
	bff_PrObject.Iterations = 24;

	//*******CALCULATE PROPERTIES

	float mStepScale = 1.0f;

	float maxSize = (float)max(volume->m_pVolumeData.m_Width, max(volume->m_pVolumeData.m_Height, volume->m_pVolumeData.m_Depth));
	bff_PrObject.StepSize = D3DXVECTOR3(1.0f / volume->m_pVolumeData.m_Width, 1.0f / volume->m_pVolumeData.m_Height, 1.0f / volume->m_pVolumeData.m_Depth);
	bff_PrObject.Iterations = (int)maxSize * (1.0f / mStepScale);

	D3DXVECTOR3 sizes = D3DXVECTOR3(volume->m_pVolumeData.m_Width, volume->m_pVolumeData.m_Height, volume->m_pVolumeData.m_Depth);
	D3DXVECTOR3 ratios = D3DXVECTOR3(1, 1, 1);

	D3DXVECTOR3 r(sizes.x * ratios.x, sizes.y * ratios.y, sizes.z * ratios.z);
	
	D3DXVECTOR3 c1 = (D3DXVECTOR3(1, 1, 1) * maxSize);
	D3DXVECTOR3 d(c1.x / r.x, c1.y / r.y, c1.z / r.z);

	bff_PrObject.ScaleFactor = D3DXVECTOR4( 1.0f / d.x, 1.0f / d.y, 1.0f / d.z, 1.0f );

	//*******SMART SEND ALL BUFFERS
	devcon->UpdateSubresource(pCBuffer[0], 0, 0, &bff_PrObject, NULL, NULL);
	devcon->UpdateSubresource(pCBuffer[1], 0, 0, &bff_PrFrame, NULL, NULL);

	devcon->VSSetConstantBuffers(0, 2, pCBuffer);
	devcon->PSSetConstantBuffers(0, 2, pCBuffer);

	//*******SEND RESOURCES
	ID3D11ShaderResourceView 
		*front = volume->transHolder.m_pFront.GetShaderResourceView(), 
		*back = volume->transHolder.m_pBack.GetShaderResourceView();

	devcon->PSSetShaderResources(0, 1, &volume->m_pVolumeData.pTexture);
	devcon->PSSetShaderResources(1, 1, &front);
	devcon->PSSetShaderResources(2, 1, &back);

	FOREACH (volume->Parameters.Parameters.size())
	{
		devcon->PSSetShaderResources(volume->Parameters.Parameters[i].slot, 1, &volume->Parameters.Parameters[i].resource);
	}

	//*******PREPARE PASS
	Pass.Prepare(devcon, rastManager);

	devcon->DrawIndexed(bf.IndexCount, 0, 0);

	//*******CLEAN UP
	ID3D11ShaderResourceView *n = NULL;
	FOREACH (volume->Parameters.Parameters.size())
	{
		devcon->PSSetShaderResources(volume->Parameters.Parameters[i].slot, 1, &n);
	}
	
	devcon->PSSetShaderResources(0, 1, &n);
	devcon->DSSetShaderResources(1, 1, &n);
	devcon->DSSetShaderResources(2, 1, &n);

Shader code for volume:

cbuffer ConstantObjectBuffer : register (b0)
{
	matrix worldMatrix;

	float3 StepSize;
	float Iterations;

	float4 ScaleFactor;
};

#define Side 2

cbuffer ConstantFrameBuffer : register (b1)
{
	
	matrix viewMatrix;
	matrix projectionMatrix;

	float3 eyepos;
	float cppad;

	float4 lightvec;
	float4 lightcol;

	float FogStart;
	float FogEnd;
	float2 __space;

	float3 FogColor;
	float shadows;

	float SpecularIntensity;
	float3 pad3;
	float4 SpecularColor;
}

//***************************************************//
//                 VERTEX SHADER                     //
//***************************************************//

struct VOut
{
    float4 position : SV_POSITION;
	float3 texC		: TEXCOORD0;
    float4 pos		: TEXCOORD1;
	float2 texcoord : TEXCOORD2;
	float3 normal   : NORM;

	**CE_RESERVED_SHADER[LINKAGE]**
};

struct GlobalIn
{
	float4 position : POSITION;
	float4 normal : NORMAL;
	float2 texcoord : TEXCOORD;
	float4 tangent : TANGENT;
};
	
**CE_RESERVED_SHADER[INPUTS]**

Texture3D t_VolData : register(t0);
Texture2D t_TransFront : register(t1);
Texture2D t_TransBack : register(t2);

sampler3D VolumeS = sampler_state
{
	Texture = <t_VolData>;
	MinFilter = LINEAR;
	MagFilter = LINEAR;
	MipFilter = LINEAR;
	
	AddressU = Border;				// border sampling in U
    AddressV = Border;				// border sampling in V
    AddressW = Border;
    BorderColor = float4(0,0,0,0);	// outside of border should be black
};

SamplerState ss;

VOut VShader(GlobalIn input)
{
    VOut output;

    input.position.w = 1.0f;
	output.texcoord = input.texcoord;

	**CE_RESERVED_SHADER[PREVERTEX]**

	// Calculate the position of the vertex against the world, view, and projection matrices.
    output.position = mul(input.position, worldMatrix);
    output.position = mul(output.position, viewMatrix);
    output.position = mul(output.position, projectionMatrix);

	output.texC = input.position;
    output.pos = output.position;
    output.normal = mul(float4(input.normal.xyz,0), worldMatrix);

	**CE_RESERVED_SHADER[VERTEX]**
	
    return output;
}

//***************************************************//
//                 PIXEL SHADER                      //
//***************************************************//

struct POut
{
	float4 Diffuse  : SV_Target0;
	float4 Position : SV_Target1;
	float4 Depth    : SV_Target2;
	float4 Normals  : SV_Target3;
	float4 Lighting : SV_Target4;
};

// Functions
float4 GetVRaycast(VOut input)
{
	//calculate projective texture coordinates
    //used to project the front and back position textures onto the cube
    float2 texC = input.pos.xy /= input.pos.w;
	texC.x =  0.5f*texC.x + 0.5f; 
	texC.y = -0.5f*texC.y + 0.5f;  
 
    float3 front = t_TransFront.Sample(ss, texC).xyz;
    float3 back = t_TransBack.Sample(ss, texC).xyz;
 
    float3 dir = normalize(back - front);
    float4 pos = float4(front, 0);
 
    float4 dst = float4(0, 0, 0, 0);
    float4 src = 0;
 
    float value = 0;
 
    float3 Step = dir * StepSize;
 
    for(int i = 0; i < 32; i++)
    {
        pos.w = 0;
        value = t_VolData.Sample(ss, pos).r;
             
        src = (float4)value;
        src.a *= .5f; //reduce the alpha to have a more transparent result 
         
        //Front to back blending
        // dst.rgb = dst.rgb + (1 - dst.a) * src.a * src.rgb
        // dst.a   = dst.a   + (1 - dst.a) * src.a     
        src.rgb *= src.a;
        dst = (1.0f - dst.a)*src + dst;     
     
        //break from the loop when alpha gets high enough
        if(dst.a >= .95f)
            break; 
     
        //advance the current position
        pos.xyz += Step;
     
        //break if the position is greater than <1, 1, 1>
        if(pos.x > 1.0f || pos.y > 1.0f || pos.z > 1.0f)
            break;
    }
 
    return dst;
}

POut PShader(VOut input)
{
	POut output;

	// Depth
	output.Depth = float4(0, 0, 0, 1.0f);

	// Normals
	output.Normals = float4(normalize(input.normal), 1);
	output.Position = float4(0, 0, 0, 1);
	output.Lighting = float4(1, 1, 1, 1);

	**CE_RESERVED_SHADER[PIXEL]**

	output.Diffuse = GetVRaycast(input);

	return output;
}

PS. Don't mind the weird things like **CE...

Share this post


Link to post
Share on other sites

I usually don't work with for loops in shaders, but this one requires so.

 

But when I don't use a constant in the loop max, then the compilation takes hell of a while, hasn't compiled yet, so I stopped the program.

So changing to this:

for(int i = 0; i < Iterations; i++)

Why is that so?

 

EDIT: Well the compiler is trying to unroll an infinite loop, so it fails...

-MIGI0027

Edited by Migi0027

Share this post


Link to post
Share on other sites

Nah stuff is still wrong, the reason for the foot to look better is because I cheated a bit:

for(int i = 0; i < Iterations; i++)
    {
        pos.w = 0;
        value = t_VolData.SampleLevel(ss, pos/4.1f, 1).r; // See that /4.1f, that the naughty stuff

It is supposed to look like this:

 

foot_thumb%5B11%5D.png?imgmax=800

 

 

Any help on this while I battle on?

-MIGI0027

Share this post


Link to post
Share on other sites

I see that the repeating pattern dissapears when scaling the vertices (3rd party program), to 1/4th of the original size. But then the resolution is terrible.

Share this post


Link to post
Share on other sites

A few things:

 

  • What format is your back/front textures? If it's 8bit you should scale to 0-1 range for output (if you're using HDR values then you should be fine).
  • Your position should be in texture space. That is, bias and scale it from [-1, 1] to [0, 1] (pos * 0.5 + 0.5) when you read your texture.

Share this post


Link to post
Share on other sites

This topic is 1589 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this  

  • Similar Content

    • By mister345
      Hi, can somebody please tell me in clear simple steps how to debug and step through an hlsl shader file?
      I already did Debug > Start Graphics Debugging > then captured some frames from Visual Studio and
      double clicked on the frame to open it, but no idea where to go from there.
       
      I've been searching for hours and there's no information on this, not even on the Microsoft Website!
      They say "open the  Graphics Pixel History window" but there is no such window!
      Then they say, in the "Pipeline Stages choose Start Debugging"  but the Start Debugging option is nowhere to be found in the whole interface.
      Also, how do I even open the hlsl file that I want to set a break point in from inside the Graphics Debugger?
       
      All I want to do is set a break point in a specific hlsl file, step thru it, and see the data, but this is so unbelievably complicated
      and Microsoft's instructions are horrible! Somebody please, please help.
       
       
       

    • By mister345
      I finally ported Rastertek's tutorial # 42 on soft shadows and blur shading. This tutorial has a ton of really useful effects and there's no working version anywhere online.
      Unfortunately it just draws a black screen. Not sure what's causing it. I'm guessing the camera or ortho matrix transforms are wrong, light directions, or maybe texture resources not being properly initialized.  I didnt change any of the variables though, only upgraded all types and functions DirectX3DVector3 to XMFLOAT3, and used DirectXTK for texture loading. If anyone is willing to take a look at what might be causing the black screen, maybe something pops out to you, let me know, thanks.
      https://github.com/mister51213/DX11Port_SoftShadows
       
      Also, for reference, here's tutorial #40 which has normal shadows but no blur, which I also ported, and it works perfectly.
      https://github.com/mister51213/DX11Port_ShadowMapping
       
    • By xhcao
      Is Direct3D 11 an api function like glMemoryBarrier in OpenGL? For example, if binds a texture to compute shader, compute shader writes some values to texture, then dispatchCompute, after that, read texture content to CPU side. I know, In OpenGL, we could call glMemoryBarrier before reading to assure that texture all content has been updated by compute shader.
      How to handle incoherent memory access in Direct3D 11? Thank you.
    • By _Engine_
      Atum engine is a newcomer in a row of game engines. Most game engines focus on render
      techniques in features list. The main task of Atum is to deliver the best toolset; that’s why,
      as I hope, Atum will be a good light weighted alternative to Unity for indie games. Atum already
      has fully workable editor that has an ability to play test edited scene. All system code has
      simple ideas behind them and focuses on easy to use functionality. That’s why code is minimized
      as much as possible.
      Currently the engine consists from:
      - Scene Editor with ability to play test edited scene;
      - Powerful system for binding properties into the editor;
      - Render system based on DX11 but created as multi API; so, adding support of another GAPI
        is planned;
      - Controls system based on aliases;
      - Font system based on stb_truetype.h;
      - Support of PhysX 3.0, there are samples in repo that use physics;
      - Network code which allows to create server/clinet; there is some code in repo which allows
        to create a simple network game
      I plan to use this engine in multiplayer game - so, I definitely will evolve the engine. Also
      I plan to add support for mobile devices. And of course, the main focus is to create a toolset
      that will ease games creation.
      Link to repo on source code is - https://github.com/ENgineE777/Atum
      Video of work process in track based editor can be at follow link: 
       
       

    • By mister345
      I made a spotlight that
      1. Projects 3d models onto a render target from each light POV to simulate shadows
      2. Cuts a circle out of the square of light that has been projected onto the render target
      as a result of the light frustum, then only lights up the pixels inside that circle 
      (except the shadowed parts of course), so you dont see the square edges of the projected frustum.
       
      After doing an if check to see if the dot product of light direction and light to vertex vector is greater than .95
      to get my initial cutoff, I then multiply the light intensity value inside the resulting circle by the same dot product value,
      which should range between .95 and 1.0.
       
      This should give the light inside that circle a falloff from 100% lit to 0% lit toward the edge of the circle. However,
      there is no falloff. It's just all equally lit inside the circle. Why on earth, I have no idea. If someone could take a gander
      and let me know, please help, thank you so much.
      float CalculateSpotLightIntensity(     float3 LightPos_VertexSpace,      float3 LightDirection_WS,      float3 SurfaceNormal_WS) {     //float3 lightToVertex = normalize(SurfacePosition - LightPos_VertexSpace);     float3 lightToVertex_WS = -LightPos_VertexSpace;          float dotProduct = saturate(dot(normalize(lightToVertex_WS), normalize(LightDirection_WS)));     // METALLIC EFFECT (deactivate for now)     float metalEffect = saturate(dot(SurfaceNormal_WS, normalize(LightPos_VertexSpace)));     if(dotProduct > .95 /*&& metalEffect > .55*/)     {         return saturate(dot(SurfaceNormal_WS, normalize(LightPos_VertexSpace)));         //return saturate(dot(SurfaceNormal_WS, normalize(LightPos_VertexSpace))) * dotProduct;         //return dotProduct;     }     else     {         return 0;     } } float4 LightPixelShader(PixelInputType input) : SV_TARGET {     float2 projectTexCoord;     float depthValue;     float lightDepthValue;     float4 textureColor;     // Set the bias value for fixing the floating point precision issues.     float bias = 0.001f;     // Set the default output color to the ambient light value for all pixels.     float4 lightColor = cb_ambientColor;     /////////////////// NORMAL MAPPING //////////////////     float4 bumpMap = shaderTextures[4].Sample(SampleType, input.tex);     // Expand the range of the normal value from (0, +1) to (-1, +1).     bumpMap = (bumpMap * 2.0f) - 1.0f;     // Change the COORDINATE BASIS of the normal into the space represented by basis vectors tangent, binormal, and normal!     float3 bumpNormal = normalize((bumpMap.x * input.tangent) + (bumpMap.y * input.binormal) + (bumpMap.z * input.normal));     //////////////// LIGHT LOOP ////////////////     for(int i = 0; i < NUM_LIGHTS; ++i)     {     // Calculate the projected texture coordinates.     projectTexCoord.x =  input.vertex_ProjLightSpace[i].x / input.vertex_ProjLightSpace[i].w / 2.0f + 0.5f;     projectTexCoord.y = -input.vertex_ProjLightSpace[i].y / input.vertex_ProjLightSpace[i].w / 2.0f + 0.5f;     if((saturate(projectTexCoord.x) == projectTexCoord.x) && (saturate(projectTexCoord.y) == projectTexCoord.y))     {         // Sample the shadow map depth value from the depth texture using the sampler at the projected texture coordinate location.         depthValue = shaderTextures[6 + i].Sample(SampleTypeClamp, projectTexCoord).r;         // Calculate the depth of the light.         lightDepthValue = input.vertex_ProjLightSpace[i].z / input.vertex_ProjLightSpace[i].w;         // Subtract the bias from the lightDepthValue.         lightDepthValue = lightDepthValue - bias;         float lightVisibility = shaderTextures[6 + i].SampleCmp(SampleTypeComp, projectTexCoord, lightDepthValue );         // Compare the depth of the shadow map value and the depth of the light to determine whether to shadow or to light this pixel.         // If the light is in front of the object then light the pixel, if not then shadow this pixel since an object (occluder) is casting a shadow on it.             if(lightDepthValue < depthValue)             {                 // Calculate the amount of light on this pixel.                 float lightIntensity = saturate(dot(bumpNormal, normalize(input.lightPos_LS[i])));                 if(lightIntensity > 0.0f)                 {                     // Determine the final diffuse color based on the diffuse color and the amount of light intensity.                     float spotLightIntensity = CalculateSpotLightIntensity(                         input.lightPos_LS[i], // NOTE - this is NOT NORMALIZED!!!                         cb_lights[i].lightDirection,                          bumpNormal/*input.normal*/);                     lightColor += cb_lights[i].diffuseColor*spotLightIntensity* .18f; // spotlight                     //lightColor += cb_lights[i].diffuseColor*lightIntensity* .2f; // square light                 }             }         }     }     // Saturate the final light color.     lightColor = saturate(lightColor);    // lightColor = saturate( CalculateNormalMapIntensity(input, lightColor, cb_lights[0].lightDirection));     // TEXTURE ANIMATION -  Sample pixel color from texture at this texture coordinate location.     input.tex.x += textureTranslation;     // BLENDING     float4 color1 = shaderTextures[0].Sample(SampleTypeWrap, input.tex);     float4 color2 = shaderTextures[1].Sample(SampleTypeWrap, input.tex);     float4 alphaValue = shaderTextures[3].Sample(SampleTypeWrap, input.tex);     textureColor = saturate((alphaValue * color1) + ((1.0f - alphaValue) * color2));     // Combine the light and texture color.     float4 finalColor = lightColor * textureColor;     /////// TRANSPARENCY /////////     //finalColor.a = 0.2f;     return finalColor; }  
      Light_vs.hlsl
      Light_ps.hlsl
  • Popular Now