Texture close view looks pixelated

Started by
7 comments, last by Norman Barrows 11 years ago
Good day,
i have some problem with my textures in my directx 11 app.
In the distance the texture filtering seems to work like it should.
But they look very pixelated when iam very close to it.
See this picture:
98555950.jpg
And this one:
70372621.jpg
Iam using "D3D11_FILTER_MIN_MAG_MIP_LINEAR" to create the texture sampler.

//Code for sampler creation
D3D11_SAMPLER_DESC SamDesc;
ZeroMemory(&SamDesc, sizeof(D3D11_SAMPLER_DESC));
SamDesc.Filter		= D3D11_FILTER_MIN_MAG_MIP_LINEAR;
SamDesc.AddressU	= D3D11_TEXTURE_ADDRESS_WRAP;
SamDesc.AddressV	= D3D11_TEXTURE_ADDRESS_WRAP;
SamDesc.AddressW	= D3D11_TEXTURE_ADDRESS_WRAP;
SamDesc.MipLODBias	= 0.0f;
SamDesc.MaxAnisotropy	= 1;
SamDesc.ComparisonFunc	= D3D11_COMPARISON_NEVER;
SamDesc.BorderColor[0]	= SamDesc.BorderColor[1] = SamDesc.BorderColor[2] = SamDesc.BorderColor[3] = 0;
SamDesc.MinLOD		= 0;
SamDesc.MaxLOD		= D3D11_FLOAT32_MAX;

I tested it by using the filter "D3D11_FILTER_ANISOTROPIC" with MaxAnisotropy of 4 or 8 and the texture still looks pixelated at very close view.
The texture loading is done with "D3DX11CreateShaderResourceViewFromFile".

//Code for texture loading
D3DX11_IMAGE_LOAD_INFO imageInfo;
imageInfo.Width		= D3DX11_DEFAULT;
imageInfo.Height	= D3DX11_DEFAULT;
imageInfo.Depth		= D3DX11_DEFAULT;
imageInfo.FirstMipLevel	= D3DX11_DEFAULT;
imageInfo.MipLevels	= D3DX11_DEFAULT;
imageInfo.Usage		= D3D11_USAGE_DEFAULT;
imageInfo.BindFlags	= D3D11_BIND_SHADER_RESOURCE;
imageInfo.Format	= DXGI_FORMAT_R8G8B8A8_UNORM;
imageInfo.MipFilter	= D3DX11_FILTER_LINEAR;
imageInfo.Filter	= D3DX11_FILTER_LINEAR;

D3DX11CreateShaderResourceViewFromFile(..)

It doesnt matter what kind of Filter i set or using default values for the 'D3DX11_IMAGE_LOAD_INFO' structure it still pixelated at close view.
First of all i tested if swapchain, backbuffer, depth texture are the same size as the window creation.
They where all matching.

//Code window creation
RECT rc = { 0, 0, 1600, 960 };
AdjustWindowRect( &rc, WS_OVERLAPPEDWINDOW, FALSE );

hWnd = CreateWindow(L"myclass", L"myapp", WS_OVERLAPPEDWINDOW,
      CW_USEDEFAULT, CW_USEDEFAULT, rc.right - rc.left, rc.bottom - rc.top, NULL, NULL, hInstance, NULL);

if (!hWnd)
    return FALSE;

g_WindowHWND = hWnd;
ShowWindow(g_WindowHWND, nCmdShow);

//Code buffer creation after window is created
m_hWndMainRenderTarget = hwnd;

// Create a Direct2D render target			
RECT rcRenderTarget;
GetClientRect( hwnd, &rcRenderTarget);
m_uiRenderTargetWidth	= rcRenderTarget.right-rcRenderTarget.left;
m_uiRenderTargetHeight	= rcRenderTarget.bottom-rcRenderTarget.top;


// Create swapchain settings
DXGI_SWAP_CHAIN_DESC sSwapChainDesc;
ZeroMemory( &sSwapChainDesc, sizeof( sSwapChainDesc ) );

sSwapChainDesc.BufferCount		= 1;
sSwapChainDesc.BufferDesc.Width		= m_uiRenderTargetWidth;
sSwapChainDesc.BufferDesc.Height	= m_uiRenderTargetHeight;
sSwapChainDesc.BufferDesc.Format	= DXGI_FORMAT_R8G8B8A8_UNORM;
sSwapChainDesc.BufferUsage		= DXGI_USAGE_RENDER_TARGET_OUTPUT;
sSwapChainDesc.OutputWindow		= m_hWndMainRenderTarget;
sSwapChainDesc.SampleDesc.Count		= 1;
sSwapChainDesc.SampleDesc.Quality	= 0;
sSwapChainDesc.Windowed			= m_bWindowed;
sSwapChainDesc.SwapEffect		= DXGI_SWAP_EFFECT_DISCARD;

// Retrive device, adapter and factory that was created with the device
IDXGIDevice * pDXGIDevice;
hr = m_pDevice->QueryInterface(__uuidof(IDXGIDevice), (void **)&pDXGIDevice);

IDXGIAdapter * pDXGIAdapter;
hr = pDXGIDevice->GetParent(__uuidof(IDXGIAdapter), (void **)&pDXGIAdapter);

IDXGIFactory * pIDXGIFactory;
pDXGIAdapter->GetParent(__uuidof(IDXGIFactory), (void **)&pIDXGIFactory);

// Create swap chain seperate	
if(FAILED( hr = pIDXGIFactory->CreateSwapChain( pDXGIDevice, &sSwapChainDesc, &m_pSwapChain ) ))
{
         return hr;
}
// Get a pointer to the back buffer
if(FAILED( hr = m_pSwapChain->GetBuffer( 0, __uuidof( ID3D11Texture2D ), ( LPVOID* )&m_pBackBuffer )))
{
	return hr;
}

// Create a render-target view
if(FAILED( hr = m_pDevice->CreateRenderTargetView( m_pBackBuffer, NULL, &m_pRenderTargetView )))
{
	return hr;
}

D3D11_TEXTURE2D_DESC	sDepthStencilTextureDesc;

// Create depth stencil texture etc..
sDepthStencilTextureDesc.Width			= m_uiRenderTargetWidth;
sDepthStencilTextureDesc.Height			= m_uiRenderTargetHeight;
sDepthStencilTextureDesc.MipLevels		= 1;
sDepthStencilTextureDesc.ArraySize		= 1;
sDepthStencilTextureDesc.Format			= DXGI_FORMAT_D24_UNORM_S8_UINT;
sDepthStencilTextureDesc.SampleDesc.Count	= 1;
sDepthStencilTextureDesc.SampleDesc.Quality	= 0;
sDepthStencilTextureDesc.Usage			= D3D11_USAGE_DEFAULT;
sDepthStencilTextureDesc.BindFlags		= D3D11_BIND_DEPTH_STENCIL;
sDepthStencilTextureDesc.CPUAccessFlags		= 0;
sDepthStencilTextureDesc.MiscFlags		= 0;

if(FAILED( hr = m_pDevice->CreateTexture2D( &sDepthStencilTextureDesc, NULL, &m_pDepthStencilTexture )))
{
	return hr;
}
	
D3D11_DEPTH_STENCIL_VIEW_DESC	sDepthStencilViewDesc;

// Depth stencil view desc...
ZeroMemory( &sDepthStencilViewDesc, sizeof( sDepthStencilViewDesc ) );
sDepthStencilViewDesc.Format			= DXGI_FORMAT_D24_UNORM_S8_UINT;
sDepthStencilViewDesc.ViewDimension		= D3D11_DSV_DIMENSION_TEXTURE2D;
sDepthStencilViewDesc.Texture2D.MipSlice	= 0;
sDepthStencilViewDesc.Flags			= 0;

if(FAILED( hr = m_pDevice->CreateDepthStencilView( m_pDepthStencilTexture, &sDepthStencilViewDesc,&m_pDepthStencilView )))
{
	return hr;
}

// Bind the view
m_pDeviceContext->OMSetRenderTargets( 1, &m_pRenderTargetView, m_pDepthStencilView );

D3D11_VIEWPORT sViewPort;

// Setup the viewport
sViewPort.Width		= (FLOAT)m_uiRenderTargetWidth;
sViewPort.Height	= (FLOAT)m_uiRenderTargetHeight;
sViewPort.MinDepth	= 0.0f;
sViewPort.MaxDepth	= 1.0f;
sViewPort.TopLeftX	= 0;
sViewPort.TopLeftY	= 0;

m_pDeviceContext->RSSetViewports( 1, &sViewPort );

During some debug session i tested if all the dimension of the created buffers match up with the window client rect.
Every created buffer has the same size as the client area of the created window.
The window istself is very basic. No menu, bars or anything. Just a simply titlebar.
Text displayed on the screen done with my font engine are pixel perfect so i think the buffers size match up with the window client area size.
At this point i have to say that other application like the samples from the Microsoft sdk works like accepted.
Since iam running out of ideas i can post here some more code that could be relevant.

//Code projection matrx creation. zNear = 1.0f and zFar = 12000.0f
int width	= g_CDevice.GetRenderTargetWidth();
int height	= g_CDevice.GetRenderTargetHeight();
float fov	= 0.785398163f;
float aspectRatio = width / (float)height;
D3DXMatrixPerspectiveFovLH(&g_mProjection, fov, aspectRatio, g_fZNear, g_fZFar);

The pixelated effect normaly starts when iam comming close to the zNear value.
Something like 2 units above the zNear settings.
Here is the pixelshader iam using.
It is a combination of some detail textures with slope based texturing and some blending of mixed uv values for the detail textures.
They are also combined via a noise blending map for mixing the slope based texture values to get ride of the repeat effect of detail textures.
All textures used here are 512x512.
The texture are loaded all the same way like described at the beginning of this post and using the same texture sampler.

//Code pixelshader
Texture2D txColorMap_1 : register( t0 ); // grass
Texture2D txColorMap_2 : register( t1 ); // dirt
Texture2D txColorMap_3 : register( t2 ); // rock
Texture2D txColorMap_4 : register( t3 ); // rgba random blend map
Texture2D txColorMap_5 : register( t4 ); // noisy normal map

// Texture sampler - D3D11_FILTER_MIN_MAG_MIP_LINEAR / ADRESS: D3D11_TEXTURE_ADDRESS_WRAP
SamplerState samLinear2D_1 : register( s0 );

struct PixelInputType
{
    float4 position : SV_POSITION;
    float3 normal   : TEXCOORD0;
    float2 tex_1    : TEXCOORD1; // uv for detail
    float2 tex_2    : TEXCOORD2; // uv for noise map
};


float4 PSMain(PixelInputType input) : SV_Target
{
	const float uvDetail = 32.0f; // detail uv in the range of 0.125f - 0.25f
	const float4 vLightDir = float4(-0.5f, 1.0f, 1.0f, 1.0f); // Directional light for testing
	const float4 vLightColor = float4(0.8f, 0.8f, 0.8f, 1.0f);
	const float4 vAmbientColor = float4(0.2f, 0.2f, 0.2f, 1.0f);

	float4 finalColor;
	float blendAmount;
	
	float4 t_1 =  txColorMap_4.Sample( samLinear2D_1, input.tex_2 * 0.125f);// random map blend values
	float4 col;

	// Read detail texture
	float4 c1 = txColorMap_1.Sample( samLinear2D_1, input.tex_1 * uvDetail);
	float4 c2 = txColorMap_2.Sample( samLinear2D_1, input.tex_1 * uvDetail);
	float4 c3 = txColorMap_3.Sample( samLinear2D_1, input.tex_1 * uvDetail);
	
	// Read detail texture with lower uv values for mixing
	float4 c4 = txColorMap_1.Sample( samLinear2D_1, input.tex_1 * uvDetail * 0.25f);
	float4 c5 = txColorMap_2.Sample( samLinear2D_1, input.tex_1 * uvDetail * 0.125f);
	float4 c6 = txColorMap_3.Sample( samLinear2D_1, input.tex_1 * uvDetail * 0.125f);
	
	// lerp the values with the blend values of the 
	c1 = lerp(c1*c4, c2, t_1.r);
	c2 = lerp(c2*c5, c3, t_1.g);
	c3 = lerp(c3,c6, t_1.b);

	//  Slope calculation based on rastertek tutorial
	float slope = 1.0f - input.normal.y;

    if(slope < 0.2)
    {
        blendAmount = slope / 0.2f;
        col = lerp(c1, c2, blendAmount);
    }
	
    if((slope < 0.7) && (slope >= 0.2f))
    {
        blendAmount = (slope - 0.2f) * (1.0f / (0.7f - 0.2f));
        col = lerp(c2, c3, blendAmount);
    }

    if(slope >= 0.7) 
    {
        col = c3;
    }
	
	// add normal map noise values for some cheap bump effect
	float3 n1 = txColorMap_5.Sample(samLinear2D_1, input.tex_1 * uvDetail);
	float3 n2 = txColorMap_5.Sample(samLinear2D_1, input.tex_1 * uvDetail * 0.25f);
	n1 = lerp(n1, n2, t_1.r);
	float  d = dot((float3)vLightDir,n1);
	col *= d;
	
	// calculate directional lighting
	finalColor = saturate( dot( (float3)vLightDir,input.normal) * vLightColor) * col;
	finalColor += col * vAmbientColor;

	return finalColor;
}

Thats all the code that could be relevant to guess what kind of bug i have.
Thanks in advance to anyone here.
Advertisement

The one thing I can't see mentioned here is the UV coordinates you are using, which may mean you are trying to debug code that doesn't actually need to be debugged. Have a look (if you haven't already) to see if the UV coords you are using use the full resolution of the texture.

Aimee

We are now on Tumblr, so please come and check out our site!

http://xpod-games.com

Thank you for the answer AmzBee.

I cant test any code here at work atm so will do it when iam back home later.

The UV are generated inside my vertex shader.

Can very low uv coodinates make problems?

So when mapping from 0 to 1 and the uv for it are like 0,0078125 - 0,01..... ?

But ill check my vertex shader later.

Thank you very much.

The one thing I can't see mentioned here is the UV coordinates you are using, which may mean you are trying to debug code that doesn't actually need to be debugged. Have a look (if you haven't already) to see if the UV coords you are using use the full resolution of the texture.

Aimee

Woot i solved it now.

There was a bug in the uv generation in the vertex shader process.

Thank you very much.

And sorry for the big post :)

Can very low uv coodinates make problems?
So when mapping from 0 to 1 and the uv for it are like 0,0078125 - 0,01..... ?
I don't do shaders, but i follow pretty much everything you're doing.
looks like you might have a classic case of "5 foot texture, 10 foot rock" as i call it. IE your texture is not high rez enough for the size of the mesh its mapped onto.
when this occurs, you can get very small u,v coords for a given tri, such as you mention.
that and your first image (a classic case of low rez texture) are what make me suspect "5 foot texture, 10 foot rock", or "1 meter texture, 2 meter rock" if you prefer.
if thats the case, then the image in your texture is a picture of a piece of land thats not as big as the area it gets mapped onto in your world. so it gets stretched. instead of vertex coords 0 to 1 mapping to UVs 0 to 1, you get UVs 0 to 0.1 or .01, etc.
so, you can do one or a combo of things:
1. increase texture rez. i run 256x256 for speed, but have tested up to 4096x4096 on the exact same case you're working (ground textures). i saw that first image of yours, and i was like "yeah, been there, done that, i remember that, walking up to rocks and ground that sticks up, and figuring out how to get high enough rez with the lowest rez textures possible, while tweaking texture wrap, quad size, and seamless textures. increasing texture size costs memory. bigger textures run slower (at least in fixed function pipeline).
2. decrease quad/triangle size. 1/2 as big with same mapping means twice the resolution from the same texture. seamless textures may be required.
3. repeat the texture more than once across a quad - usually requires seamless texture. this gets you the high rez of a high rez texture without the memory hit, and doesn't increase your triangle count. the downside is possible moire' patterns from repeating the same texture 2 or more times across a surface.
here's the load code for the texture:
// load texture - creates mipmaps. no image filtering, box filtering for mipmaps.
void Zloadtex(char *s)
{
HRESULT h;
char s2[100];
h=D3DXCreateTextureFromFileExA(Zd3d_device_ptr,s,D3DX_DEFAULT, // widtth
D3DX_DEFAULT, // height
D3DX_DEFAULT, // miplvls ( default = complete chain )
0, // usage (0=not render & not dynamic)
D3DFMT_A8R8G8B8,
mempool, // memory pool of your choice - managed, most likely.
D3DX_FILTER_NONE, // image filter ( default = tri + dither )
D3DX_DEFAULT, // mip filter ( default = box )
0, // color key (0=none)
NULL,NULL,&(Ztex[numtextures].tex));
if (h != D3D_OK) { strcpy_s(s2,100,"Error loading "); strcat_s(s2,100,s); Zmsg2(s2); exit(1); }
strcpy_s(Ztex[numtextures].name,s);
numtextures++;
}
and here are the states of the pipeline when drawing:
// no blending, turn on gouraud shading
Zd3d_device_ptr->SetTextureStageState( 0, D3DTSS_COLOROP, D3DTOP_MODULATE );
Zd3d_device_ptr->SetTextureStageState( 0, D3DTSS_COLORARG1, D3DTA_TEXTURE );
Zd3d_device_ptr->SetTextureStageState( 0, D3DTSS_COLORARG2, D3DTA_DIFFUSE );
Zd3d_device_ptr->SetRenderState(D3DRS_SHADEMODE,D3DSHADE_GOURAUD);
Zmipmaps(1); // Zd3d_device_ptr->SetSamplerState(0,D3DSAMP_MIPFILTER,D3DTEXF_LINEAR);
Zminmagfilter(2); // 0=point, 1=linear, 2=aniso min and magnification
Zambient(255,255,255); // i control ambient with materials
Znormalize(1); // normalize normals on. i do scaling of meshes on the fly.
Zspecular(1); // turn on specular
Zalphablend(0); // alpha blend off
and heres the results with 256x256 texture on a 10x10 quad. when i get up close and personal like in your first image, i get about 4x the rez you are getting (IE just slightly pixelated).
well, i was going to post an image, but the image button doesn't work. i have a couple screenshots in my gallery if you want to check them out. not sure how well they show the ground though.

It is a combination of some detail textures with slope based texturing and some blending of mixed uv values for the detail textures.
They are also combined via a noise blending map for mixing the slope based texture values to get ride of the repeat effect of detail textures.

i believe this is the only place where we're doing things differently. i do a simple aniso tmapping of a seamless texture onto a height mapped quad.
as i said, i'm using 256x256 tex on a 10x10 quad. based on you first image, if you're using a 512x512 texture, it looks like you're mapping it onto a quad size of about 80x80, whereas 512x512 mapped onto 20x20 would be the equivalent of what i'm doing (4 times the rez).
from my testing, i found i couldn't go bigger than a 10x10 with a 256x256 texture, without unacceptable levels of pixelation at closest ranges. this would be the equivalent of a 20x20 quad using 512x512 textures.
don't forget that the size of the image in the texture can be a factor too.
if your mapping a 512x512 tex onto a 20x20 quad, at 1 meter per d3d or Ogl unit, and your image on your texture is only a section of land 5 meters across, you'll also get similar results to your first image.

Norm Barrows

Rockland Software Productions

"Building PC games since 1989"

rocklandsoftware.net

PLAY CAVEMAN NOW!

http://rocklandsoftware.net/beta.php

There was a bug in the uv generation in the vertex shader process.

uv coords too small , eh?

Norm Barrows

Rockland Software Productions

"Building PC games since 1989"

rocklandsoftware.net

PLAY CAVEMAN NOW!

http://rocklandsoftware.net/beta.php

The one thing I can't see mentioned here is the UV coordinates you are using, which may mean you are trying to debug code that doesn't actually need to be debugged. Have a look (if you haven't already) to see if the UV coords you are using use the full resolution of the texture.

Aimee

Woot i solved it now.

There was a bug in the uv generation in the vertex shader process.

Thank you very much.

And sorry for the big post smile.png

no worries, sometimes the biggest problem can be solved with the smallest fix lol :P

We are now on Tumblr, so please come and check out our site!

http://xpod-games.com

There was a bug in the uv generation in the vertex shader process.

uv coords too small , eh?

Nice info Norman.

There where not to small but i did a small mistake.

Since i update a small constant buffer for each patch that get rendered i also supplied sector data from the patch.

The sector data was used to generate continuous uv coordinates.

But i got a small bug in the update process for the constant buffer.

Nice info you supplied. Thank you very much.

But i got a small bug in the update process for the constant buffer.

its always the little stuff, isn't it?

i was working on blending height map edges the other day. it helps if you calculate the average height at an edge as (h1+h2)/2 as opposed to h1 + h2/2. i forgot the parentheses. that one took a few hours to find, tracing up and down the call stack (so to speak, purely by code inspection).

Norm Barrows

Rockland Software Productions

"Building PC games since 1989"

rocklandsoftware.net

PLAY CAVEMAN NOW!

http://rocklandsoftware.net/beta.php

This topic is closed to new replies.

Advertisement