Jump to content

  • Log In with Google      Sign In   
  • Create Account

KaiserJohan

Member Since 08 Apr 2011
Online Last Active Today, 04:09 AM

Topics I've Started

Implementing SAO

01 October 2014 - 02:00 PM

I'm trying to implement SAO in according to this paper: http://graphics.cs.williams.edu/papers/SAOHPG12/

 

The issue is the SAO texture looks dosn't look as I would expect it to. Here are some screenshots, before blurring:

 

Attached File  sao_noblur1.png   810.84KB   0 downloads

 

Attached File  sao_noblur2.png   571.76KB   0 downloads

 

After horizontal + vertical blurring:

 

Attached File  sao_blur1.png   584.86KB   0 downloads

 

Attached File  sao_blur2.png   484.06KB   0 downloads

 

Look at the second screenshot for example, it just dosn't look right? The thick, black lines in particular and some areas are completely white where I expect them to be dark.

 

The SAO shader code is virtually the same as the linked paper ones, with a few alternations, 

#ifndef SSAO_PIXEL_HLSL
#define SSAO_PIXEL_HLSL

#include "Constants.h"
#include "Common.hlsl"

static const float gKernelSize = 11.0;
static const float gRadius = 1.0;
static const float gRadius2 = gRadius * gRadius;
static const float gProjScale = 500.0;
static const float gNumSpiralTurns = 7;
static const float gBias = 0.012;
static const float gIntensity = 1.0;


cbuffer SSAOCBuffer : register(CBUFFER_REGISTER_PIXEL)
{
    float4x4 gViewProjMatrix;
    float4x4 gProjMatrix;
    float4x4 gViewMatrix;
    float2 gScreenSize;
};

Texture2D gPositionTexture : register(TEXTURE_REGISTER_POSITION);
SamplerState gPointSampler : register(SAMPLER_REGISTER_POINT);


float3 reconstructNormal(float3 positionWorldSpace)
{
    return normalize(cross(ddx(positionWorldSpace), ddy(positionWorldSpace)));
}

float3 getOffsetPosition(int2 ssC, float2 unitOffset, float ssR) {
    // Derivation:
    //  mipLevel = floor(log(ssR / MAX_OFFSET));

    // TODO: mip levels
    int mipLevel = 0; //TODO: clamp((int)floor(log2(ssR)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL);

    int2 ssP = int2(ssR*unitOffset) + ssC;

    float3 P;

    // Divide coordinate by 2^mipLevel
    P = gPositionTexture.Load(int3(ssP >> mipLevel, mipLevel)).xyz;
    P = mul(gViewMatrix, float4(P, 1.0)).xyz;

    return P;
}

float2 tapLocation(int sampleNumber, float spinAngle, out float ssR)
{
    // Radius relative to ssR
    float alpha = float(sampleNumber + 0.5) * (1.0 / gNumSamples);
    float angle = alpha * (gNumSpiralTurns * 6.28) + spinAngle;

    ssR = alpha;
    return float2(cos(angle), sin(angle));
}

float sampleAO(uint2 screenSpacePos, float3 originPos, float3 normal, float ssDiskRadius, int tapIndex, float randomPatternRotationAngle)
{
    float ssR;
    float2 unitOffset = tapLocation(tapIndex, randomPatternRotationAngle, ssR);
    ssR *= ssDiskRadius;

    // The occluding point in camera space
    float3 Q = getOffsetPosition(screenSpacePos, unitOffset, ssR);

    float3 v = Q - originPos;

    float vv = dot(v, v);
    float vn = dot(v, normal);

    const float epsilon = 0.01;
    float f = max(gRadius2 - vv, 0.0); 
    
    return f * f * f * max((vn - gBias) / (epsilon + vv), 0.0);
}

float4 ps_main(float4 position : SV_Position) : SV_Target0
{
    uint2 screenSpacePos = position.xy;

    float3 originPos = gPositionTexture[screenSpacePos].xyz;
    originPos = mul(gViewMatrix, float4(originPos, 1.0)).xyz;
    float3 normal = reconstructNormal(originPos);

    // Hash function used in the HPG12 AlchemyAO paper
    float randomPatternRotationAngle = (3 * screenSpacePos.x ^ screenSpacePos.y + screenSpacePos.x * screenSpacePos.y) * 10;
    float ssDiskRadius = -gProjScale * gRadius / originPos.z;

    float ao = 0.0;
    for (int i = 0; i < gNumSamples; i++)
    {
        ao += sampleAO(screenSpacePos, originPos, normal, ssDiskRadius, i, randomPatternRotationAngle);
    }

    float temp = gRadius2 * gRadius;
    ao /= temp * temp;

    float A = max(0.0, 1.0 - ao * gIntensity * (5.0 / gNumSamples));

    return A;
}

#endif

Any ideas what could cause it?


Exponential shadow mapping filtering

10 September 2014 - 08:37 AM

Got some issues making ESMs work. Here's my workflow:

  1. Render exp(depth) into shadow maps
  2. Horizontal+Vertical blur said shadow maps
  3. Test in lighting pass

The problem is the shadow maps are just pure white, when looking at them in the debugger, making the comparison garbage. If I render only depth (no exp(depth)) it works just fine (of course I have to change step 3 though)

 

The shaders used:

 

Step 1:

#ifndef EXP_DEPTH_PIXEL_HLSL
#define EXP_DEPTH_PIXEL_HLSL

float ps_main(float4 position : SV_Position) : SV_Depth
{
    return exp(position.z / position.w);
}

#endif

Step 2:

#ifndef BOX_BLUR_PIXEL_HLSL
#define BOX_BLUR_PIXEL_HLSL

#include "Constants.hlsl"

cbuffer BoxBlurConstants : register(CBUFFER_REGISTER_PIXEL)
{
    // (1 / TEXTURE_WIDTH, 0) for horizontal pass, (0, 1 / TEXTURE_HEIGHT) for vertical pass
    float2 gTexelSize;
    float gTextureIndex;
};

SamplerState gSampler : register(SAMPLER_REGISTER_DEPTH_NO_COMPARE);
Texture2DArray gDepthTexture : register(TEXTURE_REGISTER_DEPTH);

static const uint NUM_SAMPLES = 7;


float ps_main(float4 position : SV_Position) : SV_Depth
{
    float ret = 0.0;

    for (int i = -3; i <= 3; i++)
    {
        const float2 texCoord = (uint2)position.xy + i;
        ret += gDepthTexture[float3(texCoord, gTextureIndex)].r;
    }

    ret /= NUM_SAMPLES;

    return ret;
}

#endif

Step 3:

#ifndef DIRECTIONAL_LIGHT_PIXEL_HLSL
#define DIRECTIONAL_LIGHT_PIXEL_HLSL

#include "FullscreenTriangleVertex.hlsl"
#include "Constants.hlsl"

#define DEPTH_BIAS 0.005
#define NUM_CASCADES 4
#define ESM_CONSTANT 10

cbuffer DirectionalLightConstants : register(CBUFFER_REGISTER_PIXEL)
{
    float4x4 gSplitVPMatrices[NUM_CASCADES];
    float4x4 gCameraViewMatrix;
    float4 gSplitDistances;
    float4 gLightColor;
    float4 gLightDirection;
};

Texture2D gPositionTexture : register(TEXTURE_REGISTER_POSITION);
Texture2D gDiffuseTexture : register(TEXTURE_REGISTER_DIFFUSE);
Texture2D gNormalTexture : register(TEXTURE_REGISTER_NORMAL);
Texture2DArray gShadowmap : register(TEXTURE_REGISTER_DEPTH);
SamplerState gShadowmapSampler : register(SAMPLER_REGISTER_DEPTH_NO_COMPARE);


float4 ps_main(float4 position : SV_Position) : SV_Target0
{
    float4 worldPos = gPositionTexture[uint2(position.xy)];
    float4 diffuse = gDiffuseTexture[uint2(position.xy)];
    float4 normal = gNormalTexture[uint2(position.xy)];

    float4 camPos = mul(gCameraViewMatrix, worldPos);

    uint index = 3;
    if (camPos.z > gSplitDistances.x)
        index = 0;
    else if (camPos.z > gSplitDistances.y)
        index = 1;
    else if (camPos.z > gSplitDistances.z)
        index = 2;

    float3 projCoords = (float3)mul(gSplitVPMatrices[index], worldPos);
    const float2 texelSize = 1.0 / float2(1024.0, 1024.0);
    projCoords.xy = (floor(projCoords.xy / texelSize)) * texelSize;

    float viewDepth = projCoords.z - DEPTH_BIAS;
    projCoords.z = float(index);
    //float visibilty = gShadowmap.SampleCmpLevelZero(gShadowmapSampler, projCoords, viewDepth);
    float occluder = gShadowmap.Sample(gShadowmapSampler, projCoords).r;
    float visibility = occluder / exp(ESM_CONSTANT * viewDepth);

    float angleNormal = clamp(dot(normal, gLightDirection), 0, 1);

    return visibility * diffuse * angleNormal * gLightColor;
}

#endif

It strikes me though, maybe the shadow map needs to cleared with a different default depth value... but what?


Blur shader

09 September 2014 - 05:08 AM

I'm working on exponential shadow map filtering and after outputing exp(depth) to my depth texture rather than the normal depth, I need to run another pass to blur it before I do the lighting pass. Since it's just bluring a depth texture the fastest, least accurate alghorithm is fine I think. All I get from google is gaussian blur, but afaik that is unnecessary heavy?

 

Whats the cheapest blur alghorithms out there?


CSM do these depth textures look correct?

18 August 2014 - 12:39 AM

I'm troubleshooting cascaded shadow mapping for DX11. I need some input, if the depth textures I generated for the different cascades are correct.

 

Here's the scene, where you see shadow artifacts from the directional light (looking (-1, -1, -1) - using right-handed coordinate system). Ignore the point light which is casting shadows near the cube.

 

Attached File  scene.png   468.86KB   1 downloads

 

And here are the various cascade depth textures, in the following ranges: [0.1, 6], [6, 12], [12, 18], [18, 100]

 

Attached File  csm1.png   199.47KB   1 downloads

 

Attached File  csm2.png   195.95KB   1 downloads

 

Attached File  csm3.png   197.52KB   1 downloads

 

Attached File  csm4.png   219.04KB   1 downloads

 

But that cant be right now that I think about it - for example, the alien is 6 units into the scene from cameras POV, so it should be clipped in the third image for example?

 

The way I am creating the view-Projection matrix for this is using the code I used in OpenGL, and I am using the same math library so the matrices should be 1-to-1.

    CameraFrustrum CalculateCameraFrustrum(const float fovDegrees, const float aspectRatio, const float minDist, const float maxDist, const Mat4& cameraViewMatrix)
    {
        CameraFrustrum ret = { Vec4(1.0f, 1.0f, -1.0f, 1.0f), Vec4(1.0f, -1.0f, -1.0f, 1.0f), Vec4(-1.0f, -1.0f, -1.0f, 1.0f), Vec4(-1.0f, 1.0f, -1.0f, 1.0f),
                               Vec4(1.0f, -1.0f, 1.0f, 1.0f), Vec4(1.0f, 1.0f, 1.0f, 1.0f), Vec4(-1.0f, 1.0f, 1.0f, 1.0f), Vec4(-1.0f, -1.0f, 1.0f, 1.0f), };

        const Mat4 perspectiveMatrix = PerspectiveMatrixFov(fovDegrees, aspectRatio, minDist, maxDist);
        const Mat4 invMVP = glm::inverse(perspectiveMatrix * cameraViewMatrix);

        for (Vec4& corner : ret)
        {
            corner = invMVP * corner;
            corner /= corner.w;
        }

        return ret;
    }

    Mat4 CreateDirLightVPMatrix(const CameraFrustrum& cameraFrustrum, const Vec3& lightDir)
    {
        Mat4 lightViewMatrix = glm::lookAt(Vec3(0.0f), -glm::normalize(lightDir), Vec3(0.0f, -1.0f, 0.0f));

        Vec4 transf = lightViewMatrix * cameraFrustrum[0];
        float maxZ = transf.z, minZ = transf.z;
        float maxX = transf.x, minX = transf.x;
        float maxY = transf.y, minY = transf.y;
        for (uint32_t i = 1; i < 8; i++)
        {
            transf = lightViewMatrix * cameraFrustrum[i];

            if (transf.z > maxZ) maxZ = transf.z;
            if (transf.z < minZ) minZ = transf.z;
            if (transf.x > maxX) maxX = transf.x;
            if (transf.x < minX) minX = transf.x;
            if (transf.y > maxY) maxY = transf.y;
            if (transf.y < minY) minY = transf.y;
        }

        Mat4 viewMatrix(lightViewMatrix);
        viewMatrix[3][0] = -(minX + maxX) * 0.5f;
        viewMatrix[3][1] = -(minY + maxY) * 0.5f;
        viewMatrix[3][2] = -(minZ + maxZ) * 0.5f;
        viewMatrix[0][3] = 0.0f;
        viewMatrix[1][3] = 0.0f;
        viewMatrix[2][3] = 0.0f;
        viewMatrix[3][3] = 1.0f;

        Vec3 halfExtents((maxX - minX) * 0.5, (maxY - minY) * 0.5, (maxZ - minZ) * 0.5);

        return OrthographicMatrix(-halfExtents.x, halfExtents.x, -halfExtents.y, halfExtents.y, halfExtents.z, -halfExtents.z) * viewMatrix;
    }

Unbinding resources?

12 August 2014 - 02:08 PM

Is it good practice (readability, performance, ...) to unbind resources are usage?

 

For example:

  • In my first pass, I use 5 vertex buffers, in my second pass, I use 2 vertex buffers. Is it wise to unbind 3 now unused vertex buffer slots after the first pass, even though the InputLayout does not use them?
  • Shader resources, such as textures and constant buffers. Is there any reason to unbind them after usage, if their slots are not used in the following pass?

There are exceptions of course, such as drawing to render targets and then reading them, but more like the situations I described above. Any opinions?


PARTNERS