Jump to content

  • Log In with Google      Sign In   
  • Create Account

JackShannon

Member Since 20 Nov 2012
Offline Last Active Sep 18 2014 11:57 PM

Topics I've Started

Idea for a demo of my 3D engine

16 October 2013 - 06:26 PM

What I'm not having trouble with is how to best to show my engine, and I'm going to have to start sending off my portfolio to companies in under a month, as I'm doing an Industry year out from university!

 

So I've finally implemented all the techniques that I want in my engine, models, terrain, sky, atmosphere, shadows, ssao, physics etc. Now I just need content!

 

I'm a capable artist when it comes to copying things (if I have references), and am competent in the necessary tools in the content pipeline. I just can't come up with ideas.

 

I was thinking of going to a local park or something and taking loads of photographs and just recreating that scene, could this be a good idea to send to companies, or is there anywhere I can find cool concept art for levels that I can use for free? Would just like some advice really from anyone who has created a portfolio piece for their engine!

 

Thanks

 

Jack


Shadow mapping in the deferred pipeline

21 June 2013 - 11:28 AM

I'm attempting to implement shadow mapping in my deferred pipeline. I've verified that my shadow map creation is correct but am now struggling to implement shadow occlusion.

 

Here is the fragment shader:

in vec2 f_uv;
in vec3 frustumCornerVS;
out vec4 fragment;

uniform mat4 shadowMatrix;

uniform sampler2D gbuffer_3_tex;
uniform sampler2D shadow_map_tex;

void main()
{
    // reconstruct the pixel position into view space from the depth buffer
    float pixelDepthCS = texture(gbuffer_3_tex, f_uv).r;
    vec3 pixelVS = pixelDepthCS * frustumCornerVS;

    // get pixel position in Light clip space
    vec4 pixelLightCS = shadowMatrix * vec4(pixelVS, 1);
    pixelLightCS /= pixelLightCS.w;

    // sample shadow map
    float litDepth = texture(shadow_map_tex, pixelLightCS.xy).r;

    // test for occlusion
    float occlusion = 0;
    if (pixelLightCS.z < litDepth) {
        occlusion += 1;
    }

    fragment = vec4(occlusion, occlusion, occlusion, 1);
}

Now, I know that pixelVS (pixel in view space) is correct because I've debugged it.

 

I think the problem is with shadowMatrix.

 

The code for building the shadow matrix is:

    // scale and bias
    glm::mat4 bias = glm::scale(0.5f, 0.5f, 0.5f);
    bias = glm::translate(bias, glm::vec3(0.5, 0.5, 0.5));

    glm::mat4 view = scene->camera->getView();
    glm::mat4 shadowMatrix = bias * (shadowCaster->projection * shadowCaster->view) * glm::inverse(view);

I'm not sure it's doing exactly what I want it to: transforming into world space from view space, then transforming into the shadowCasters clip space, then a scale and bias into texture coordinates.

 

What could I be doing wrong?

 

 

Edit: This was such a poor question and their were many things wrong that had nothing to do with this. Feel free to delete.


Calculating frustum corners from a projection matrix

20 June 2013 - 02:50 PM

I'm trying to multiply the NDC frustum points by the inverse of the projection matrix to give me the frustum corners in view space. Here is my code:

void getFrustumCorners(std::vector<glm::vec3>& corners, glm::mat4 projection)
{
    corners.clear();

    // homogeneous corner coords
    glm::vec4 hcorners[8];
    // near
    hcorners[0] = glm::vec4(-1, 1, 1, 1);
    hcorners[1] = glm::vec4(1, 1, 1, 1);
    hcorners[2] = glm::vec4(1, -1, 1, 1);
    hcorners[3] = glm::vec4(-1, -1, 1, 1);
    // far
    hcorners[4] = glm::vec4(-1, 1, -1, 1);
    hcorners[5] = glm::vec4(1, 1, -1, 1);
    hcorners[6] = glm::vec4(1, -1, -1, 1);
    hcorners[7] = glm::vec4(-1, -1, -1, 1);

    glm::mat4 inverseProj = glm::inverse(projection);
    for (int i = 0; i < 8; i++) {
        hcorners[i] = hcorners[i] * inverseProj;
        hcorners[i] /= hcorners[i].w;

        corners.push_back(glm::vec3(hcorners[i]));
    }
}

int main()
{
    auto proj = glm::perspective(56.25f, 720.0f / 450.0f, 0.1f, 100.0f);
    
    std::vector<glm::vec3> corners;
    getFrustumCorners(corners, proj);
    
    for (auto c : corners) {
        std::cout << c.x << " " << c.y << " " << c.z << std::endl;
    }
    return 0;
}

Now the output that this is giving me is:

-0.213538 0.133461 -1.24719
0.213538 0.133461 -1.24719
0.213538 -0.133461 -1.24719
-0.213538 -0.133461 -1.24719
-0.142418 0.089011 -0.831807
0.142418 0.089011 -0.831807
0.142418 -0.089011 -0.831807
-0.142418 -0.089011 -0.831807

This can't be correct? Surely when using a zNear of 0.1 and a zFar of 100.0, the difference between the near and far plane coords should be just under 100.0?

 

What am I doing wrong?


Strange specular with normal map (BlinnPhong)

25 April 2013 - 08:21 AM

SOLVED

 

Please can someone take a look at my normal map code, the diffuse is fine.

 

Without normal map:

2GiIt.png

 

With normal map:

2GiCw.png

 

Vertex shader:

/* The following can be defined:
 *      TEXTURED
 *      NORMALMAPPED
 */

in vec3 v_position;
#if defined(TEXTURED) || defined(NORMALMAPPED)
in vec2 v_uv;
#endif
in vec3 v_normal;
#ifdef NORMALMAPPED
in vec4 v_tangent;
#endif

layout(std140) uniform Transform
{
	mat4 t_model_view_proj;
	mat4 t_model_view;
	mat3 t_normal;
};

#if defined(TEXTURED) || defined(NORMALMAPPED)
out vec2 f_uv;
#endif

#ifdef NORMALMAPPED
layout(std140) uniform Light
{
    int l_type;
    vec3 l_orientation;
    float l_attenuation;
    vec3 l_ambient;
    vec3 l_diffuse;
    vec3 l_specular;
};
out vec3 tbn_lightDirection;
out vec3 tbn_viewDirection;
#else
out vec3 f_position;
out vec3 f_normal;
out vec3 f_viewDirection;
#endif

void main()
{
    gl_Position = t_model_view_proj * vec4(v_position, 1.0);

    #if defined(TEXTURED) || defined(NORMALMAPPED)
    f_uv = v_uv;
    #endif

    #ifdef NORMALMAPPED
    vec3 v_bitangent = v_tangent.z * cross(v_normal, v_tangent.xyz); // tangent.z stores m value which is the determinant of the object space to tangent space matrix

    mat3 TBN = transpose(mat3(v_tangent.xyz,
                              v_bitangent,
                              v_normal));

    tbn_lightDirection = normalize(TBN * l_orientation);
    tbn_viewDirection = -normalize(TBN * v_position);
    #else
    f_position = (t_model_view * vec4(v_position, 1.0)).xyz;
    f_normal = normalize(t_normal * v_normal);
    f_viewDirection = -normalize(t_model_view * vec4(v_position, 1.0)).xyz;
    #endif
}

 

Fragment shader:

/* The following can be defined:
 *      TEXTURED
 *      NORMALMAPPED
 */

#define LTYPE_DIRECTIONAL 0
#define LTYPE_POINT 1

#if defined(TEXTURED) || defined(NORMALMAPPED)
in vec2 f_uv;
#endif
#ifdef TEXTURED
uniform sampler2D diffuseTexture;
#endif

#ifdef NORMALMAPPED
uniform sampler2D normalTexture;
in vec3 tbn_lightDirection;
in vec3 tbn_viewDirection;
#else
in vec3 f_position;
in vec3 f_normal;
in vec3 f_viewDirection;
#endif

layout(std140) uniform Light
{
    int l_type;
    vec3 l_orientation;
    float l_attenuation;
    vec3 l_ambient;
    vec3 l_diffuse;
    vec3 l_specular;
};

layout(std140) uniform Material
{
    vec3 m_ambient;
    vec3 m_diffuse;
    vec3 m_specular; // scale specular by shininess strength in external tool
    float m_shininess;
};

out vec4 fragment;

float lambert(vec3 lightDirection, vec3 normal)
{
    float lambertTerm = dot(lightDirection, normal);
    lambertTerm = clamp(lambertTerm, 0, 1);
    return lambertTerm;
}

float blinnPhong(vec3 lightDirection, vec3 normal, vec3 viewDirection)
{
    vec3 halfwayDirection = normalize(lightDirection + viewDirection);
    float blinnTerm = dot(normal, halfwayDirection);
    blinnTerm = clamp(blinnTerm, 0, 1);
    blinnTerm = pow(blinnTerm, m_shininess);
    return blinnTerm;
}

// not normalized
vec3 getL()
{
    #ifdef NORMALMAPPED
        return normalize(tbn_lightDirection);
    #else
    if (l_type == LTYPE_DIRECTIONAL) {
        return l_orientation;
    }
    else if (l_type == LTYPE_POINT) {
        return l_orientation - f_position;
    }
    #endif
}

vec3 getN()
{
    #ifdef NORMALMAPPED
    vec3 tbnNormal = texture(normalTexture, f_uv).rgb * 2.0 - 1.0;
    return normalize(tbnNormal);
    #else
    return normalize(f_normal);
    #endif
}

vec3 getV()
{
    #ifdef NORMALMAPPED
        return normalize(tbn_viewDirection);
    #else
    return normalize(f_viewDirection);
    #endif
}

void main()
{
    vec3 lightDiffuse, lightSpecular = vec3(0);

    vec3 lightVector = getL();
    vec3 l = normalize(lightVector);
    vec3 n = getN();

    float lambertTerm = lambert(l, n);

    lightDiffuse = l_diffuse * lambertTerm;
    if (lambertTerm > 0) {
        vec3 v = getV();
        lightSpecular = l_specular * blinnPhong(l, n, v);
    }

    #ifdef TEXTURED
    vec3 diffuse = texture(diffuseTexture, f_uv).xyz * lightDiffuse;
    #else
    vec3 diffuse = m_diffuse * lightDiffuse;
    #endif
    vec3 specular = m_specular * lightSpecular;

    float attenuation;
    if (l_type == LTYPE_DIRECTIONAL) {
        attenuation = 1;
    }
    else if (l_type == LTYPE_POINT) {
        float distanceToLight = length(lightVector);
        float attenuation = 1.0 / (1.0 + l_attenuation * pow(distanceToLight, 2));
    }

    fragment = vec4(m_ambient + attenuation * (diffuse + specular), 1.0);
    //fragment = vec4(diffuse + specular, 1.0);
}

Good design of a VertexBuffer class

28 January 2013 - 06:52 PM

OK, so I have written my vertex buffer class how I thought it should be but it seems very messy for instance it needs the shader program id to know where the attributes are (this can only be a bad thing right, a vbo should be able to use multiple shaders easily) please can someone have a look and tell me what should be there and what shouldn't.

 

It also doesn't seem right that I'm having to come back in and call disableAttributes here after the draw call, there must be a cleaner way of doing things.

 

I think it's a case of not having clear responsibilities.

 

.h

#ifndef KVERTEXBUFFER_H
#define KVERTEXBUFFER_H

#include "KGraphicsLib.h"
#include "KShaderManager.h"

enum EFormatBitMask
{
    FORMAT_POSITION = (1u << 0),
    FORMAT_UV       = (1u << 1),
    FORMAT_NORMAL   = (1u << 2),
    FORMAT_TANGENT  = (1u << 3)
};

class CVertexBuffer
{
public:
    struct CAttrib
    {
        CAttrib(GLuint i, GLint s, int o)
        {
            index = i;
            size = s;
            offset = (GLvoid*)o;
        }
        GLuint index; // location that it's bound to
        GLint size; // component count (float[2] has size 2)
        GLvoid* offset;
    };

    
                CVertexBuffer();
    
    virtual     ~CVertexBuffer();

    void        init();
    void        destroy();

    void        uploadData(GLint programID, unsigned int format, float* data, int vertexCount);
    void        pointToData();
    void        disableAttributes();
private:
    GLuint identifier;
    GLsizei stride;
    vector<CAttrib*> attribs;
};

#endif  // #ifndef KVERTEXBUFER_H

 

.cpp

#include "KVertexBuffer.h"

CVertexBuffer::CVertexBuffer()
{
    init();
}

CVertexBuffer::~CVertexBuffer()
{
    destroy();
}

void CVertexBuffer::init()
{
    glGenBuffers(1, &identifier);
    GetError();
}

void CVertexBuffer::destroy()
{
    glDeleteBuffers(1, &identifier);
}

void CVertexBuffer::pointToData()
{
    CAttrib* attrib;
    vector<CAttrib*>::iterator it = attribs.begin();
    for (; it != attribs.end(); ++it) {
        attrib = (*it);
        glEnableVertexAttribArray(attrib->index);
        glBindBuffer(GL_ARRAY_BUFFER, identifier);
        glVertexAttribPointer(
                              attrib->index,                  // index
                              attrib->size,      // component count
                              GL_FLOAT,
                              GL_FALSE,           // normalized?
                              stride,
                              attrib->offset
                              );
        GetError();
    }
}

void CVertexBuffer::disableAttributes()
{
    vector<CAttrib*>::iterator it = attribs.begin();
    for (; it != attribs.end(); ++it) {
        glDisableVertexAttribArray((*it)->index);
    }
}


void CVertexBuffer::uploadData(GLint programID, unsigned int format, float* data, int vertexCount)
{
    stride = 0;
    CAttrib* attr;
    if ((format & FORMAT_POSITION) == FORMAT_POSITION) {
        attr = new CAttrib(glGetAttribLocation(programID, ""), 3, stride);
        stride += attr->size * sizeof(GLfloat);
        attribs.push_back(attr);
    }
    if ((format & FORMAT_UV) == FORMAT_UV) {
        attr = new CAttrib(glGetAttribLocation(programID, ""), 2, stride);
        stride += attr->size * sizeof(GLfloat);
        attribs.push_back(attr);
    }
    if ((format & FORMAT_NORMAL) == FORMAT_NORMAL) {
        attr = new CAttrib(glGetAttribLocation(programID, ""), 3, stride);
        stride += attr->size * sizeof(GLfloat);
        attribs.push_back(attr);
    }
    if ((format & FORMAT_TANGENT) == FORMAT_TANGENT) {
        attr = new CAttrib(glGetAttribLocation(programID, ""), 4, stride);
        stride += attr->size * sizeof(GLfloat);
        attribs.push_back(attr);
    }

    glBindBuffer(GL_ARRAY_BUFFER, identifier);
    glBufferData(GL_ARRAY_BUFFER, stride * vertexCount, data, GL_STATIC_DRAW);
}

 

Thank you

 

Jack


PARTNERS