nVidia Ocean Sample unknown error on code-port from DX10 to DX11

Started by
7 comments, last by mynameisnafe 11 years ago

Hi Guys,

So I've been rewriting an nVidia HLSL ocean example, porting it from a DirectX 10 example app with one .fx file to

a DirectX11 app with two files; .vs and .ps.

I've got a class that wraps up shader functionality and this is composed into a DXMesh class, a DXTerrain class, other objects: terrain, a skysphere.. the shaders for which are in .vs and .ps form: these all show up on render and work fine. It's also composed into the DXWater class, an instance of which I am trying to render (and apparently succeeding).


However the water doesn't show up when I render it.. I've tried rendering it on it's own (no terrain/skysphere) and it's still not visible. I've tried scaling it up too, by massive factors.

I'm not getting any shader errors from DX11 and if anything wasn't right in set up or render my app would just bail, but it runs.

What's the most likely explanation for this kind of thing? Does anyone have any advic to help me chew through and investigate?

I'd post some code but theres rather a lot so if anyone want's to see it just post and tell me which bits.

Please help!

Thanks, N


Screw it. here are the shaders - my two followed by the original .fx:

ocean.vs:


////////////////////////////////////////////////////////////////////////////////
// Filename: Ocean.vs
//
// Disclaimer: 
// This shader file and the corresponding ocean.ps file are based
// on the nVidia example which was in turn based partly on "Effective Water 
// Simulation From Physical Models", GPU Gems
//
//
////////////////////////////////////////////////////////////////////////////////

////////////////////////////////////////////////////////////////////////////////
//			GLOBALS															  //
////////////////////////////////////////////////////////////////////////////////

cbuffer MatrixBuffer
{
    matrix worldMatrix;
    matrix viewMatrix;
    matrix projectionMatrix;
	float4 eyePos;
};

cbuffer LightBuffer
{
	float4 ambientColour;
	float4 diffuseColour;
   	float3 lightDirection;
	float timeElapsed;         // we only want timeElapsed LOL -> Making use of the API I've built so far
};							   // and being stringent with my memory.. it's an optimisation, I promise!

//////////////// TEXTURES ///////////////////

Texture2D	NormalTexture : register(t0);
TextureCube EnvTexture    : register(t1);

SamplerState NormalSampler 
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressV = Wrap;
};

SamplerState EnvSampler 
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = Clamp;
    AddressV = Clamp;
    AddressW = Clamp;
};

////////////////////////////////////////////////////////////////////////////////
//		 TWEAKABLE PARAMETERS												  //
////////////////////////////////////////////////////////////////////////////////
float BumpScale = 0.9;
float TexReptX  = 0.20;
float TexReptY  = 0.10;
float BumpSpeedX  = -0.05;
float BumpSpeedY  = 0.0;

static float2 TextureScale = float2(TexReptX,TexReptY);
static float2 BumpSpeed = float2(BumpSpeedX,BumpSpeedY);

float FresnelBias    = 0.1;
float FresnelExp     = 2.0;
float HDRMultiplier  = 2.0;
float3 DeepColor     = {0.0f, 0.0f, 0.1f};
float3 ShallowColor  = {0.0f, 0.5f, 0.6f};
float3 ReflTint      = {0.0f, 0.7f, 1.0f};

// these are redundant, but makes the ui easier:
float Kr  = 1.0f;
float KWater  = 1.0f;
float WaveAmp  = 0.05;
float WaveFreq  = 3.0;

////////////////////////////////////////////////////////////////////////////////
//				TYPEDEFS													  //
////////////////////////////////////////////////////////////////////////////////
struct VertexInputType
{
   	float4 position : POSITION; //in object space
   	float3 normal	: NORMAL;
	float4 colour	: DIFFUSE;
	float4 spec		: SPECULAR;
	float2 uv		: TEXCOORD0;
};

struct PixelInputType // OceanVertOut in the nVidia sample
{
 	float4 position : SV_POSITION; // actual position
	float2 uv		: TEXCOORD0;   // texture coord ?
 	float3 T2WXf1	: TEXCOORD1;   // 1st row of the 3x3 transform from tangent to cube space
    float3 T2WXf2	: TEXCOORD2;   // 2nd row of the 3x3 transform from tangent to cube space
    float3 T2WXf3	: TEXCOORD3;   // 3rd row of the 3x3 transform from tangent to cube space
    float2 bumpUV0	: TEXCOORD4;   // 1st bump map coord
    float2 bumpUV1	: TEXCOORD5;   // 2nd bump map coord
    float2 bumpUV2	: TEXCOORD6;   // 3rd bump map coord
    float3 WorldView: TEXCOORD7;   // world view matrix
};

////////////////////////////////////////////////////////////////////////////////
//				WAVE FUNCTIONS												  //
////////////////////////////////////////////////////////////////////////////////

struct Wave {
  float freq;	// 2*PI / wavelength
  float amp;	// amplitude
  float phase;	// speed * 2*PI / wavelength
  float2 dir;
};

#define NWAVES 2

float evaluateWave(Wave w, float2 pos, float t)
{
  return w.amp * sin( dot(w.dir, pos)*w.freq + t*w.phase);
}

// derivative of wave function
float evaluateWaveDeriv(Wave w, float2 pos, float t)
{
  return w.freq*w.amp * cos( dot(w.dir, pos)*w.freq + t*w.phase);
}


////////////////////////////////////////////////////////////////////////////////
// Vertex Shader
////////////////////////////////////////////////////////////////////////////////

PixelInputType OceanVSTechnique(VertexInputType input)
{
   	PixelInputType output = (PixelInputType)0;
	
	float4x4 wvp = mul( mul( worldMatrix, viewMatrix ), projectionMatrix );

	Wave wave [NWAVES] = 
	{	
		{WaveFreq,   WaveAmp,   0.5, float2(-1, 0)}, 
		{WaveFreq*2, WaveAmp/2, 1.3, float2(-0.7, 0.7)} 
	};

	input.position.w = 1.0f;

	input.position.y = 0;

	// Sum waves
	float ddx = 0.0;
	float ddy = 0.0;

	for(int i = 0; i < NWAVES; i++){
		input.position.y += evaluateWave( wave, input.position.xz, timeElapsed );
		float d = evaluateWaveDeriv( wave, input.position.xz, timeElapsed );
		ddx += d * wave.dir.x;
		ddy += d * wave.dir.y;
	}

	// compute tangent basis
    float3 B = float3(1, ddx, 0);
    float3 T = float3(0, ddy, 1);
    float3 N = float3(-ddx, 1, -ddy);

	float cycle = fmod( timeElapsed, 100.0 );

	// compute the 3x3 tranform from tangent space to object space
    
	float3x3 objToTangentSpace;
    
	// first rows are the tangent and binormal scaled by the bump scale
    
	objToTangentSpace[0] = BumpScale * normalize(T);
    objToTangentSpace[1] = BumpScale * normalize(B);
    objToTangentSpace[2] = normalize(N);
	
	float3 Pw = mul(input.position, worldMatrix).xyz;

	output.position = mul( input.position, wvp );
	output.uv = input.uv.xy;
	output.bumpUV0.xy = input.uv.xy * TextureScale       + cycle * BumpSpeed;
    output.bumpUV1.xy = input.uv.xy * TextureScale * 2.0 + cycle * BumpSpeed * 4.0;
    output.bumpUV2.xy = input.uv.xy * TextureScale * 4.0 + cycle * BumpSpeed * 8.0;
	output.T2WXf1.xyz = mul(objToTangentSpace, worldMatrix[0].xyz);
    output.T2WXf2.xyz = mul(objToTangentSpace, worldMatrix[1].xyz);
    output.T2WXf3.xyz = mul(objToTangentSpace, worldMatrix[2].xyz);    
	output.WorldView  = eyePos - Pw; // view inv. transpose contains eye position in world space in last row

    return output;
}

ocean.ps:


////////////////////////////////////////////////////////////////////////////////
// Filename: Ocean.ps
//
// Disclaimer: 
// This shader file and the corresponding ocean.vs file are based
// on the nVidia example which was in turn based partly on "Effective Water 
// Simulation From Physical Models", GPU Gems
//
//
////////////////////////////////////////////////////////////////////////////////


//////////////////////////////////////
//		TWEAKABLES					//
//////////////////////////////////////
float BumpScale = 0.9;
float TexReptX  = 0.20;
float TexReptY  = 0.10;
float BumpSpeedX  = -0.05;
float BumpSpeedY  = 0.0;

static float2 TextureScale = float2(TexReptX,TexReptY);
static float2 BumpSpeed = float2(BumpSpeedX,BumpSpeedY);

float FresnelBias    = 0.1;
float FresnelExp     = 2.0;
float HDRMultiplier  = 2.0;
float3 DeepColour     = {0.0f, 0.0f, 0.1f};
float3 ShallowColour  = {0.0f, 0.5f, 0.6f};
float3 ReflTint      = {0.0f, 0.7f, 1.0f};

// these are redundant, but makes the ui easier:
float Kr  = 1.0f;
float KWater  = 1.0f;
float WaveAmp  = 0.05;
float WaveFreq  = 3.0;

//////////////////////////////////////
//		TEXTURE SAMPLER STATES		//
//////////////////////////////////////
Texture2D	NormalTexture : register(t0);
TextureCube EnvTexture    : register(t1);

SamplerState NormalSampler 
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressV = Wrap;
};

SamplerState EnvSampler 
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = Clamp;
    AddressV = Clamp;
    AddressW = Clamp;
};

//////////////////////////////////////
//		CONSTANT BUFFERS			//
//////////////////////////////////////
cbuffer LightBuffer
{
	float4 ambientColour;
	float4 diffuseColour;
   	float3 lightDirection;
	float  tDelta;
};


//////////////
// TYPEDEFS //
//////////////
struct PixelInputType // OceanVertOut in the nVidia sample
{
 	float4 position : SV_POSITION;
	float2 uv		: TEXCOORD0;
 	float3 T2WXf1	: TEXCOORD1; // first row of the 3x3 transform from tangent to cube space
    float3 T2WXf2	: TEXCOORD2; // second row of the 3x3 transform from tangent to cube space
    float3 T2WXf3	: TEXCOORD3; // third row of the 3x3 transform from tangent to cube space
    float2 bumpUV0	: TEXCOORD4;
    float2 bumpUV1	: TEXCOORD5;
    float2 bumpUV2	: TEXCOORD6;
    float3 WorldView: TEXCOORD7;
};


////////////////////////////////////////////////////////////////////////////////
// Pixel Shader
////////////////////////////////////////////////////////////////////////////////
float4 OceanPSTechnique( PixelInputType input ) : SV_TARGET
{
	float4 colour = (float4)0;

	// sum our normal maps
	float4 t0 = NormalTexture.Sample( NormalSampler, input.bumpUV0 ) * 2.0 - 1.0;
	float4 t1 = NormalTexture.Sample( NormalSampler, input.bumpUV1 ) * 2.0 - 1.0;
	float4 t2 = NormalTexture.Sample( NormalSampler, input.bumpUV2 ) * 2.0 - 1.0;
	
	float3 nt = t0.xyz + t1.xyz + t2.xyz;
	
	//
	// tangent to world matrix
	float3x3 tangentToWorld;

	tangentToWorld[0] = input.T2WXf1;
	tangentToWorld[1] = input.T2WXf2;
	tangentToWorld[2] = input.T2WXf3;

	float3 Nw = mul( tangentToWorld, nt );

	float3 Nn = normalize( Nw );

	//
	// reflection

	float3 Vn = normalize( input.WorldView );
	float3 R = reflect( -Vn, Nn );

	// use the multiplier in the alpha channel of the cube map to create the High Dynamic Range FX
	
	float4 reflection = EnvTexture.Sample( EnvSampler, R );

	reflection.rgb *= ( 1.0 * reflection.a * HDRMultiplier );

	float facing = 1.0 - max( dot( Vn, Nn ) , 0 );

	float fres = 1.0;  // Kr*(FresnelBias+(1.0-FresnelBias)*pow(abs(facing),FresnelExp)); ?!

	float3 waterColour = KWater * lerp( DeepColour, ShallowColour, facing );

	colour = float4( waterColour + ( fres * reflection.rgb * ReflTint ), 1.0 );

	return colour;
}

ocean.fx from nVidia (..*quotes Turvold*)


/*********************************************************************NVMH3****
$Revision: #3 $

Copyright NVIDIA Corporation 2007
TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED
*AS IS* AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE.  IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS
BE LIABLE FOR ANY SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES
WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR ANY OTHER PECUNIARY
LOSS) ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

% Simple ocean shader with animated bump map and geometric waves
% Based partly on "Effective Water Simulation From Physical Models", GPU Gems

keywords: material animation environment bumpmap

These paramters have been updated so as to look appropriate when applied to an
FX Composer2 "plane" object


To learn more about shading, shaders, and to bounce ideas off other shader
    authors and users, visit the NVIDIA Shader Library Forums at:

    http://developer.nvidia.com/forums/

******************************************************************************/

//// UN-TWEAKABLES - AUTOMATICALLY-TRACKED TRANSFORMS ////////////////


float Timer : Time ;

//////////////// TEXTURES ///////////////////

Texture2D NormalTexture;

SamplerState NormalSampler 
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressV = Wrap;
};

TextureCube EnvTexture ;

SamplerState  EnvSampler 
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = Clamp;
    AddressV = Clamp;
    AddressW = Clamp;
};

///////// TWEAKABLE PARAMETERS //////////////////

float BumpScale = 0.9;
float TexReptX  = 0.20;
float TexReptY  = 0.10;
float BumpSpeedX  = -0.05;
float BumpSpeedY  = 0.0;

static float2 TextureScale = float2(TexReptX,TexReptY);
static float2 BumpSpeed = float2(BumpSpeedX,BumpSpeedY);

float FresnelBias    = 0.1;
float FresnelExp     = 2.0;
float HDRMultiplier  = 2.0;
float3 DeepColor     = {0.0f, 0.0f, 0.1f};
float3 ShallowColor  = {0.0f, 0.5f, 0.6f};
float3 ReflTint      = {0.0f, 0.7f, 1.0f};

// these are redundant, but makes the ui easier:
float Kr  = 1.0f;
float KWater  = 1.0f;
float WaveAmp  = 0.05;
float WaveFreq  = 3.0;

//////////// CONNECTOR STRUCTS //////////////////

struct AppData {
    float3 Position : POSITION;   // in object space
	float3 normal : NORMAL;
	float4 colour : DIFFUSE;
	float4 spec : SPECULAR;
    float2 UV : TEXCOORD;
};

struct OceanVertOut {
    float4 HPosition  : SV_POSITION;  // in clip space
    float2 UV  : TEXCOORD;
    float3 T2WXf1 : TEXCOORD1; // first row of the 3x3 transform from tangent to cube space
    float3 T2WXf2 : TEXCOORD2; // second row of the 3x3 transform from tangent to cube space
    float3 T2WXf3 : TEXCOORD3; // third row of the 3x3 transform from tangent to cube space
    float2 bumpUV0 : TEXCOORD4;
    float2 bumpUV1 : TEXCOORD5;
    float2 bumpUV2 : TEXCOORD6;
    float3 WorldView  : TEXCOORD7;
};

// wave functions ///////////////////////

struct Wave {
  float freq;  // 2*PI / wavelength
  float amp;   // amplitude
  float phase; // speed * 2*PI / wavelength
  float2 dir;
};

#define NWAVES 2

float evaluateWave(Wave w, float2 pos, float t)
{
  return w.amp * sin( dot(w.dir, pos)*w.freq + t*w.phase);
}

// derivative of wave function
float evaluateWaveDeriv(Wave w, float2 pos, float t)
{
  return w.freq*w.amp * cos( dot(w.dir, pos)*w.freq + t*w.phase);
}


///////// SHADER FUNCTIONS ///////////////

OceanVertOut OceanVS(AppData IN)//(AppData vertexInput)
{
	float4x4 wvp = mul(mul(gWorldMatrix, gViewMatrix), gProjectionMatrix);

    OceanVertOut OUT = (OceanVertOut)0;
    
	Wave wave[NWAVES] = {
		{ WaveFreq, WaveAmp, 0.5, float2(-1, 0) },
		{ WaveFreq*2, WaveAmp*0.5, 1.3, float2(-0.7, 0.7) }	
	};

    float4 Po = float4(IN.Position.xyz,1.0);
   
    // sum waves	
    Po.y = 0.0;
    float ddx = 0.0, ddy = 0.0;
    
	for(int i=0; i<NWAVES; i++) {
		Po.y += evaluateWave(wave, Po.xz, Timer);
		float deriv = evaluateWaveDeriv(wave, Po.xz, Timer);
		ddx += deriv * wave.dir.x;
		ddy += deriv * wave.dir.y;
    }

    // compute tangent basis
    float3 B = float3(1, ddx, 0);
    float3 T = float3(0, ddy, 1);
    float3 N = float3(-ddx, 1, -ddy);
    
    OUT.HPosition = mul(Po,wvp);
    
    // pass texture coordinates for fetching the normal map
    
	OUT.UV = IN.UV.xy;//*TextureScale;
   
    float cycle = fmod(Timer, 100.0);
    
	OUT.bumpUV0.xy = IN.UV.xy * TextureScale + cycle*BumpSpeed;
    OUT.bumpUV1.xy = IN.UV.xy * TextureScale * 2.0 + cycle * BumpSpeed * 4.0;
    OUT.bumpUV2.xy = IN.UV.xy * TextureScale * 4.0 + cycle * BumpSpeed * 8.0;

    // compute the 3x3 tranform from tangent space to object space
    
	float3x3 objToTangentSpace;
    
	// first rows are the tangent and binormal scaled by the bump scale
    
	objToTangentSpace[0] = BumpScale * normalize(T);
    objToTangentSpace[1] = BumpScale * normalize(B);
    objToTangentSpace[2] = normalize(N);

    OUT.T2WXf1.xyz = mul(objToTangentSpace,gWorldMatrix[0].xyz);
    OUT.T2WXf2.xyz = mul(objToTangentSpace,gWorldMatrix[1].xyz);
    OUT.T2WXf3.xyz = mul(objToTangentSpace,gWorldMatrix[2].xyz);

    // compute the eye vector (going from shaded point to eye) in cube space
    
	float3 Pw = mul(Po,gWorldMatrix).xyz;
    
	OUT.WorldView = gEye - Pw; // view inv. transpose contains eye position in world space in last row
   
    return OUT; 
 }


// Pixel Shaders

float4 OceanPS(OceanVertOut IN) : SV_TARGET//(ps_params IN)
{
    // sum normal maps
    float4 t0 = NormalTexture.Sample(NormalSampler, IN.bumpUV0)*2.0-1.0;
    float4 t1 = NormalTexture.Sample(NormalSampler, IN.bumpUV1)*2.0-1.0;
    float4 t2 = NormalTexture.Sample(NormalSampler, IN.bumpUV2)*2.0-1.0;
    float3 Nt = t0.xyz + t1.xyz + t2.xyz;//normalize((t0.xyz + t1.xyz + t2.xyz));
    
	//    float3 Nt = t1.xyz;

    float3x3 m; // tangent to world matrix
    m[0] = IN.T2WXf1;
    m[1] = IN.T2WXf2;
    m[2] = IN.T2WXf3;
    float3 Nw = mul(m,Nt);
    float3 Nn = normalize(Nw);

	// reflection
    float3 Vn = normalize(IN.WorldView);
    float3 R = reflect(-Vn, Nn);

    float4 reflection =EnvTexture.Sample(EnvSampler, R);			// texCUBE(EnvSampler, R);
    // hdr effect (multiplier in alpha channel)
    reflection.rgb *= (1.0 + reflection.a*HDRMultiplier);

    float facing = 1.0 - max(dot(Vn, Nn), 0);
    float fres =1.0;// Kr*(FresnelBias+(1.0-FresnelBias)*pow(abs(facing),FresnelExp));

    float3 waterColor = KWater * lerp(DeepColor, ShallowColor, facing);
    float3 result = waterColor + (fres * reflection.rgb * ReflTint);  //
    
     return float4(result.rgb,1.0);
}

//////////////////// TECHNIQUE ////////////////


technique10 Ocean  {
    pass p0  {
    
    	SetVertexShader(CompileShader(vs_4_0, OceanVS()));
		SetGeometryShader(NULL);
		SetPixelShader(CompileShader(ps_4_0, OceanPS()));
    }
}

///////////////////////////////// eof ///

Advertisement

You're setting sampler states within your effect file, these don't exist in shaders and must be done through your code. Assuming you're using C++ that'll be CreateSamplerState, PSSetSamplers.

This is indeed being done by the shader class that wraps up shading in my app:


void Shader::SetMatricesBuffer( ID3D11DeviceContext* deviceContext, D3DXVECTOR4 eye, D3DXMATRIX world, D3DXMATRIX view, D3DXMATRIX proj )
{
	HRESULT						result;

	D3D11_MAPPED_SUBRESOURCE	map;
	unsigned int				bufferNumber;

	MatricesBuffer	* ptr;

	D3DXMatrixTranspose( &world, &world );
	D3DXMatrixTranspose( &view, &view );
	D3DXMatrixTranspose( &proj, &proj );
	
	//
	// lock
	result = deviceContext->Map( m_matrixBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &map );

	if( FAILED(result) ) return;

	//write
	ptr = (MatricesBuffer*) map.pData;

	ptr->world			= world;
	ptr->view			= view;
	ptr->projection		= proj;
	ptr->eyePosition	= eye;

	//unlock
	deviceContext->Unmap( m_matrixBuffer, 0 );

	bufferNumber = 0;

	//set
	deviceContext->VSSetConstantBuffers( bufferNumber, 1, &m_matrixBuffer );
}

//---------------------------------------------------------------------------------------------------------------

void Shader::SetLightBuffer( ID3D11DeviceContext* deviceContext, D3DXVECTOR4 ambient, D3DXVECTOR4 diffuse, D3DXVECTOR3 direction, float timeElapsed )
{
	HRESULT						result;

	D3D11_MAPPED_SUBRESOURCE	map;
	unsigned int				bufferNumber;

	LightBuffer		* ptr;

	//
	//lock..
	result = deviceContext->Map( m_lightBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &map );

	if(FAILED( result )) return;

	//write..
		//grab a ptr to the data
	ptr = (LightBuffer*)map.pData;

	ptr->ambient	= ambient;
	ptr->diffuse	= diffuse;
	ptr->direction  = direction;
	ptr->tdelta		= timeElapsed;


	//unlock
	deviceContext->Unmap( m_lightBuffer, 0 );

	bufferNumber = 0;

	//set
	deviceContext->PSSetConstantBuffers( bufferNumber, 1, &m_lightBuffer );
}

//---------------------------------------------------------------------------------------------------------------

void Shader::SetTextureData( ID3D11DeviceContext* deviceContext, ID3D11ShaderResourceView* t, int t_index )
{
	deviceContext->PSSetShaderResources( t_index, 1, &t );
}

And then my water / ocean object in C++ land:


bool DXWater::Render( ID3D11DeviceContext *context, D3DXVECTOR4 eye, D3DXMATRIX view, D3DXMATRIX proj,
						D3DXVECTOR4 amb, D3DXVECTOR4 dif, D3DXVECTOR3 dir, float tDelta )
{
	m_Shader->SetMatricesBuffer( context, eye, m_modelMatrix, view, proj );
	m_Shader->SetLightBuffer( context, amb, dif, dir, tDelta );
	m_Shader->SetTextureData( context, m_texNormal->GetTexture(),  0 );
	m_Shader->SetTextureData( context, m_texCubeMap->GetTexture(), 1 );
	
	if( RenderBuffers( context ) )	//put our data on the gpu
		if(m_Shader->Render( context, m_numIndices ) )	//tell the shader to render
			return true;

	return false;
}

As I said, shading works for other objects.. just not for this one. I think it may have something to do with the TextureCube declaration maybe in the shaders but I'm not sure..


Wait, PSSetSamplers?

I see! I will have a look into this.

Edit: [thinking]

Ah okay..I've set up a sampler state in C++ and forgotten about it, and I doubt it matches the ones in the ocean.fx.

Need to re-jig this shader class a bit so I can pass it a sampler state to set up (or two, or three.. ) as opposed to hardcoding one sampler.

Bloody good call sir! Thank you muchly

Is there a tutorial/example you know of where I can see how to set up a sampler state for a cubemap? All the stuff my tutor has given us is DX10 and I don't see any sampler set up anywhere 'cos he's always using the


 effect->GetVariableByName("g_cubeMap") ;

-type API.


What would I change in this?


    // Create a texture sampler state description.
    samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
    samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
    samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
    samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
    samplerDesc.MipLODBias = 0.0f;
    samplerDesc.MaxAnisotropy = 1;
    samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
    samplerDesc.BorderColor[0] = 0;
    samplerDesc.BorderColor[1] = 0;
    samplerDesc.BorderColor[2] = 0;
    samplerDesc.BorderColor[3] = 0;
    samplerDesc.MinLOD = 0;
    samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;

    // Create the texture sampler state.
    result = device->CreateSamplerState( &samplerDesc, &m_Sampler );
    if( FAILED(result) )        return false;
 

Thanks again :)

There's nothing to change in the sampler, it'll work right away for all texture dimensions.

So what I've got should work then? ( I thought maybe I couldn't see my water because only the normal map was being sampled for normals and the cube map wasn't hence no colour hence invisible..? )

Or do I still need to create a second sampler state on the C++ side and initialise it in the shader at init time?

i.e I could use the sampler I used for the normal map for the cubemap in the HLSL, but I should use the cubemap sampler and it doesn't exist because I've not set it in C++ with context->CreateSampleState()?

You're setting sampler states within your effect file, these don't exist in shaders and must be done through your code. Assuming you're using C++ that'll be CreateSamplerState, PSSetSamplers.

So does one .fx file 'do more' than two or three (or four or five) shader files?

What's the difference if its all HLSL?

I figured it was just the same thing in one file.. can you not do multiple passes with shaders?

..That's probably a bit off topic to be fair.

Okay so thinking out loud:

HLSL:


Texture2D    NormalTexture : register(t0); // 
TextureCube  EnvTexture    : register(t1); // these are set on render call

SamplerState NormalSampler                 // this is set up at init
{ Filter = MIN_MAG_MIP_LINEAR; AddressV = Wrap; }; 

SamplerState EnvSampler                    // this doesn't exist on the C++ side?
{ Filter = MIN_MAG_MIP_LINEAR; AddressU = Clamp; AddressV = Clamp; AddressW = Clamp; };

C++


// Create texture sampler state.

result = device->CreateSamplerState( &samplerDesc, &m_Sampler ); // so this would be "NormalSampler" in the shader
                                                                 // so EnvSampler is... ?! o_O

// call this once for the normal map (reg 0) and once for the cubemap (reg 1) at render time
void Shader::SetTextureData( ID3D11DeviceContext* deviceContext, /* samplerID, */ ID3D11ShaderResourceView* t, int t_index  ) {             
        deviceContext->PSSetShaderResources( t_index, 1, &t ); // so this has nowt to do with the sampler
        /* deviceContext->PSSetSamplers( samplerID ... ) ;   */
}

How would I dictate from the C++ which sampler is for which texture?

Thanks for your patience with me, I know I've got a lot of questions :)

If you know of a bit of code you could point me at, tutorial/example/etc, that would be sweet

You can use same sampler for both NormalSampler and EnvSampler (C++):


context->PSSetSamplers(0, 1, &m_Sampler);
context->PSSetSamplers(1, 1, &m_Sampler);

then in HLSL you need:


SamplerState NormalSampler : register(s0);
SamplerState EnvSampler : register(s1);

Vertex/Pixel shader is basically single function that accesses textures/samplers/other things through registers, if you want to have multiple passes you need multiple shaders:


context->PSSetShader(shader1, ...);
context->Draw(...);
context->PSSetShader(shader2, ...);
context->Draw(...);

However effect files wrap it and make things easier by allowing to specify techniques inside effect files, effect compiler also automatically creates sampler states and assigns registers for everything.

I see.

Thats beautiful! Thank you kindly smile.png

Sorry it took so long to get back to this, I'm juggling way too many things right now

Edit:

Are there any efficiency differences between the two?

This topic is closed to new replies.

Advertisement