• Content count

  • Joined

  • Last visited

Community Reputation

173 Neutral

About devdber

  • Rank
  1. I catch no errors on that piece.   I tried to set different output colors.   Framebuffer appears to be valid according to glCheckFramebufferStatus.   Please, more options  :(
  2. Framebuffer and related texture initialization: glGenTextures(1, &depth_map_tex); glBindTexture(GL_TEXTURE_2D, depth_map_tex); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32, WIDTH, HEIGHT, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL); glBindTexture(GL_TEXTURE_2D, 0); glGenFramebuffers(1, &depth_map_fbo); glBindFramebuffer(GL_FRAMEBUFFER, depth_map_fbo); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_map_tex, 0); glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE); glBindFramebuffer(GL_FRAMEBUFFER, 0); Drawing some scene to it: glViewport(0, 0, WIDTH, HEIGHT); glBindFramebuffer(GL_FRAMEBUFFER, depth_map_fbo); glClearDepth(1.0); glClear(GL_DEPTH_BUFFER_BIT); glUseProgram(program); { mat4 model(1.0f); glUniformMatrix4fv(ul_model, 1, GL_FALSE, glm::value_ptr(model)); glUniformMatrix4fv(ul_view, 1, GL_FALSE, glm::value_ptr(view)); glUniformMatrix4fv(ul_proj, 1, GL_FALSE, glm::value_ptr(proj)); glBindVertexArray(cube_vao); glDrawArrays(GL_TRIANGLES, 0, CUBE_VERTICES_NUM); } glBindFramebuffer(GL_FRAMEBUFFER, 0); Vertex shader: layout (location = 0) in vec3 position; uniform mat4 model; uniform mat4 view; uniform mat4 proj; void main() { gl_Position = proj * view * model * vec4(position, 1.0f); } Fragment shader: void main(void) { gl_FragColor = vec4(0, 0, 0, 1.0f); } The result I get via AMD codeXL is totally black texture attached to framebuffer.   So, how could I fix this?
  3. My generic normal generation function looks like this: static void calculate_vertex_normals(vector<vec3>& vx, vector<int>& faces_tri_idx, vector<vec3>& out_normals) { vector<vector<int>> adj_faces(vx.size()); for (int i = 0; i < vx.size(); i++) { for (int j = 0; j < faces_tri_idx.size() / 3; j++) { for (int k = 0; k < 3; k++) { if (faces_tri_idx[j * 3 + k] == i) adj_faces[i].push_back(j); } } } vector<vec3> face_n(faces_tri_idx.size() / 3); for (int i = 0; i < faces_tri_idx.size() / 3; i++) { const vec3& v1 = vx[faces_tri_idx[i * 3 + 0]]; const vec3& v2 = vx[faces_tri_idx[i * 3 + 1]]; const vec3& v3 = vx[faces_tri_idx[i * 3 + 2]]; vec3 vv1 = v2 - v1; vec3 vv2 = v3 - v1; vec3 norm = normalize(cross(vv1, vv2)); face_n[i] = norm; } for (int i = 0; i < adj_faces.size(); i++) { auto& vf = adj_faces[i]; vec3 sum(0.0f); for (int j = 0; j < vf.size(); j++) { sum += face_n[vf[j]]; } if (abs(length2(sum))>1e-9) { out_normals.push_back(normalize(sum)); } else { out_normals.push_back(vec3(0.0f, 1.0f, 0.0f)); } } } Box generation function: static void box(vector<vec3>& out_vertices, vector<vec3>& out_normals, vec3& sz) { static vec3 cube_v[] = { //top face view //v3----v2 //| | //v4----v1 //y=1 { 1.0f, 1.0f, 1.0f }, { 1.0f, 1.0f, -1.0f }, { -1.0f, 1.0f, -1.0f }, { -1.0f, 1.0f, 1.0f }, //y=-1 { 1.0f, -1.0f, 1.0f }, { 1.0f, -1.0f, -1.0f }, { -1.0f, -1.0f, -1.0f }, { -1.0f, -1.0f, 1.0f }, }; static int cube_i[] = { //y=-1 0,1,3,3,1,2, //y=-1 7,6,4,4,6,5, //x=1 4,5,0,0,5,1, //x=-1 3,2,7,7,2,6, //z=1 4,0,7,7,0,3, //z=-1 1,5,2,2,5,6 }; vector<vec3> cube_vertices(cube_v, cube_v + 8); vec3 szd2 = sz / 2.0f; for (auto& v : cube_vertices) v *= szd2; for (int i = 0; i < 36; i += 3) { int i1 = cube_i[i]; int i2 = cube_i[i + 1]; int i3 = cube_i[i + 2]; vec3& v1 = cube_vertices[i1]; vec3& v2 = cube_vertices[i2]; vec3& v3 = cube_vertices[i3]; out_vertices.push_back(v1); out_vertices.push_back(v2); out_vertices.push_back(v3); } vector<vec3> v_n; v_n.reserve(8); vector<vec3> vertices(cube_v, cube_v + 8); vector<int> faces_idx(cube_i, cube_i + 36); calculate_vertex_normals(vertices, faces_idx, v_n); for (int i = 0; i < faces_idx.size(); i++) { out_normals.push_back(v_n[faces_idx[i]]); } } Vertex shader: #version 330 core layout(location = 0) in vec3 pos; layout(location = 1) in vec3 n; uniform vec3 lightPosition; uniform mat4 model; uniform mat4 view; uniform mat4 proj; out vec3 f_vertexPosition; out vec3 f_lightPosition; out vec3 f_normal; void main() { mat4 modelView = view * model; mat4 modelViewProj = proj * modelView; gl_Position = modelViewProj * vec4(pos, 1.0); mat3 normalMatrix = mat3(modelView); normalMatrix = inverse(normalMatrix); normalMatrix = transpose(normalMatrix); vec4 lpt = vec4(lightPosition,1.0f); lpt = lpt*modelView; f_vertexPosition = pos; f_normal = normalize(normalMatrix*n); f_lightPosition = vec3(lpt); } Fragment shader: #version 330 core in vec3 f_vertexPosition; in vec3 f_lightPosition; in vec3 f_normal; void main() { vec3 faceColor = vec3(1.0, 0.0, 0.0); vec3 pl = f_vertexPosition-f_lightPosition; float dist = length(pl); vec3 lightDirection = normalize(pl); float magnitude = max(0.0, dot(f_normal, -lightDirection)) /(dist*dist); gl_FragColor = vec4(faceColor*magnitude + faceColor/3.0f, 1.0f); } The results I get at light position = (0.0f, 0.0f, 3.0f) by rotating box attached.   [attachment=33825:ex2.PNG] [attachment=33826:ex3.PNG] [attachment=33824:ex1.PNG]   I think the problem is at normal calculation step. But I cant figure out what's wrong...
  4. Need EPA assistance

    Shape *s1,*s2; void epa() { static std::vector<Vec2D> s; s.clear(); //a,b,c are points from gjk simplex s.push_back(a); s.push_back(b); s.push_back(c); for (int i = 0; i < 32; i++) { Vec2D norm = Vec2D::Zero(); float dist = 0; int index = -1; epaClosestEdge(s, index, norm, dist); Vec2D p = support(norm); float d = p * norm; if (d - dist < 1e-6) { collisionNormal = norm; penetration = d; return; } else { s.insert(s.begin() + index, p); } } } void epaClosestEdge(std::vector<Vec2D>& s, int& edgeIndex, Vec2D& normal, float& dist) { dist = INFINITY; for (int i = 0; i < s.size(); i++) { int j = i + 1 == s.size()? 0 : i + 1; Vec2D a = s[i]; Vec2D b = s[j]; Vec2D e = b - a; Vec2D oa = a; Vec2D n = tripleProduct(e, oa, e); n.normalize(); float d = n * a; if (d < dist) { dist = d; normal = n; edgeIndex = j; } } } Vec2D support(Vec2D& n) { Vec2D a = getfar(s1, n); Vec2D b = getfar(s2, -n); return a-b; } Vec2D getfar(Shape* s, Vec2D& n) { float maxDot = -INFINITY; Vec2D best; for (int i = 0; i < s->vsCount(); i++) { Vec2D v = s->vs(i); float d = v * n; if (d > maxDot) { maxDot = d; best = v; } } return best; } Plain copy-paste-adaptation to cpp from dyn4j.   1. What about simplex' winding? Should it be CW, CCW or arbitrary given? 2. Any mistakes in my implementation? Any situations when this implementation fails?  3. How can I get contatct points? Why is it said that EPA gives only one contact point? All i get are penetration and normal.  
  5. Working on D3D11/OGL wrapper

    Thank you for replies.   Im trying to come up with common interface for stream output. This is the problem that raised as i decided to go deeper into buffer implementation.   Common way to get such info out of shader in d3d11 (please correct me if i got it wrong):   1. Create buffer with STREAM_OUTPUT flag 2. Fill in D3D11_SO_DECLARATION_ENTRY's and strides. 3. Create geometry shader with CreateGeometryShaderWithStreamOutput.   -----   4. Set geometry shader 5. Set SO target 6. Draw to buffer 7. Unset SO target   Via OGL we do this:   1. Create transform feedback object 2. Bind buffers with TRANSFORM_FEEDBACK binding point to it (or do it at rendering) 3. Create program with geometry shader (optional?) 4. Set transform feedback varyings   --------   5. Bind transform feedback object 6. Use program with GS 7. Begin TF 8. Draw to buffer 9. End TF   Problem is that ways of d3d11 and gl retrieve info for further feedback are different:   typedef struct D3D11_SO_DECLARATION_ENTRY { UINT   Stream; LPCSTR SemanticName; UINT   SemanticIndex; BYTE   StartComponent; BYTE   ComponentCount; BYTE   OutputSlot; } D3D11_SO_DECLARATION_ENTRY;   and OGL:   void glTransformFeedbackVaryings?(GLuint program?, GLsizei count?, const char **varyings?, GLenum bufferMode?);   With OGL we can easily set vars to be retrieved by name. With D3D11 we have to state semantic name and semantic slot. Is there way to do as with OGL without D3DReflect?   As i learned, with OGL we can bind some buffer to particular "output slot" with BindBufferBase. But how we can do the same with d3d11, where we "bind" to "slots" not buffers (but this we do implicitly by SOSetTargets still ) but retrieved info?   What's Stream and which equivalent of this have OGL?
  6. Working on D3D11/OGL wrapper

    Well, no problem. OGL is more flexible in this case.   I thought D3D11's "SSBO" is RWBuffer/StructuredBuffer. What's about them?   What if I decide to use VERTEX_BUFFER |  INDEX_BUFFER | STREAM_OUTPUT | ... anything possible | ... ? Will be there horrible performance issues?   but I learned from MSDN that immutable buffers' content cant be changed and articles on ogl say that "immutability" results only in persistent buffers' size and (?) location, not affecting content modification.
  7. Working on D3D11/OGL wrapper

    To deepen my knowledge of these APIs.   Here I will post some misc questions on "cross-API" wrapper. They are supposed to be associated with each other, so I decided unite all in single topic (if moderators will accept this).    My main purpose is to get as much as possible of the features of both APIs using single aproach in my wrapper architecture.   At this moment im trying to find common things in OGL and D3D11 buffer representation.   First issue is binding point. In OGL, parameter "target" may be set as:   GL_ARRAY_BUFFER GL_ELEMENT_ARRAY_BUFFER GL_UNIFORM_BUFFER GL_COPY_READ_BUFFER - what's proper equivalent in d3d11? GL_COPY_WRITE_BUFFER - what's proper equivalent in d3d11? GL_PIXEL_PACK_BUFFER - what's proper equivalent in d3d11? GL_PIXEL_UNPACK_BUFFER - what's proper equivalent in d3d11? GL_TRANSFORM_FEEDBACK_BUFFER - what's proper equivalent in d3d11? To simplify task I could use special transform feedback object in OGL 4.1+. GL_TEXTURE_BUFFER - this we omit, it's used with special function glTexBuffer - am I right???   In D3D11 we have D3D11_BUFFER_DESC which has two elements with similar meaning - D3D11_BIND_FLAG and D3D11_RESOURCE_MISC_FLAG (I will get to this much much later, but still)   possible values of D3D11_BIND_FLAG are:   D3D11_BIND_VERTEX_BUFFER     D3D11_BIND_INDEX_BUFFER       D3D11_BIND_CONSTANT_BUFFER    D3D11_BIND_SHADER_RESOURCE    D3D11_BIND_STREAM_OUTPUT     D3D11_BIND_RENDER_TARGET    D3D11_BIND_DEPTH_STENCIL     D3D11_BIND_UNORDERED_ACCESS D3D11_BIND_DECODER D3D11_BIND_VIDEO_ENCODER        I ignore last three. They are seemed to be quite tricky and I dont know well how they are working.   Problem is that these flags can be combined, except D3D11_BIND_CONSTANT_BUFFER. But if we do this, we disallow GPU to do smart managing with this buffer. Can I afford this? Are there so many cases when someone prefer to use several flags?   Next thing about buffers is "usage".   D3D11_USAGE_DEFAULT    -  no problem with this, glBufferData D3D11_USAGE_IMMUTABLE - this buffer cant be changed after creation. Does glBufferStorage with proper flags do the same? And do these flags really add to performance? D3D11_USAGE_DYNAMIC - in OGL i would use GL_DYNAMIC_DRAW, however it can be ignored by OGL. Anything elese? D3D11_USAGE_STAGING - could i use this for transform feedback?   To reach the same things in both APIs we need to find proper combination of binding ponts, buffer usage in OGL and appropriate combination of binding flags, and usage in D3D11. These combinations can be reached by different ways (because buffer binding in OGL is "dynamic", while with D3D11 we have to set it strictly at creation. Am right with this point?). The only way i see is to make single classes for uniform buffers, vertex buffers, index buffers and transform feedback buffers. In special cases we have immutable buffer, which cannot be transform feedback buffers as i suppose.   Please, answer questions in previous sections and suggest your approaches.
  8. Generic buffer class

      well, nothing special. Likely I will need buffers for vertices, indexes, some per-instance data and transform feedback manipulations.  I just want to hide such means as glGenBuffers,glBindBuffer,glBufferData and other. In fact, I dont need full Opengl's functionality in this thing as well. Just think if it possible to make such operations more simple and safe. 
  9. Generic buffer class

    I'm making XNA-like OpenGL wrapper. And I have some problems with it's design. Should I come up with generic buffer class? What fields and methods should it have?    My first approach: class Buffer { public: Buffer(/* possible parameters */); ~Buffer(); unsigned char* map(); void unmap(); //possible getters (...) private: GLuint bindingPoint; GLuint capacity; GLuint actualSize; GLuint id; GLuint usage; } so, which fields would be preferable? Should I do generic buffer, or it would be correct to do single classes for vertex buffes/other possible buffers? Do I really need this overhead?
  10. I'm trying to make text writing function. Here: struct character { float advancex, advancey, bitmapw, bitmaph, bitmapl, bitmapt, texx; }; character* c; struct point { GLfloat x; GLfloat y; GLfloat z; GLfloat s; GLfloat t; }; point coords[255*6]; void renderText(const char*text,float x,float y,float sizex,float sizey) { // ScreenWidth = 640; // ScreenHeight = 480; auto l = -ScreenWidth / 2 + x; auto t = ScreenHeight / 2 + y; int n = 0; for (const char *p = text; *p; p++) { if (!w || !h)continue; auto r = l + c[*p].bitmapw*sizex; auto b = t - c[*p].bitmaph*sizey; coords[n++] = { l, t, 0, c[*p].texx, c[*p].bitmaph / atlas_height }; coords[n++] = { r, b, 0, c[*p].texx + c[*p].bitmapw / atlas_width, 0 }; coords[n++] = { l, b, 0, c[*p].texx, 0 }; coords[n++] = { l, t, 0, c[*p].texx, c[*p].bitmaph / atlas_height }; coords[n++] = { r, t, 0, c[*p].texx + c[*p].bitmapw / atlas_width, c[*p].bitmaph / atlas_height }; coords[n++] = { r, b, 0, c[*p].texx + c[*p].bitmapw / atlas_width, 0 }; l += c[*p].advancex*sizex; t += c[*p].advancey*sizey; } glBindTexture(GL_TEXTURE_2D, texture2D_ID0); glBindVertexArray(vertex_array_ID); glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_ID); glBufferSubData(GL_ARRAY_BUFFER,0,n*sizeof(point),coords); glDrawArrays(GL_TRIANGLES, 0, n); } How can I get aligned text? I use orthographic projection given as glm::ortho(-320.0f, +320.0f, 240.0f, -240.0f, 0.1f, 100.f);   and I get this result:
  11. How to get motivated to learn Programming?

    So, since you are at gamedev site, your motivation should be video games. I dreamed about game developing since my early childhood during i played nes/sega (i was about 5-8 yeas old). It was great, yep. But how stupid i was. It was just dreams, no real activity(i had some experience at BASIC though). I've lost MUCH time, and set off only year ago. That's my great mistake.
  12. perpixel operations with direct2d

    well, there's some difficulties i couldn't predict. I mean special gpu access and architecture  
  13. dos memory allocation (ASM)

    could you give some links with dos custom memory allocation samples? That few ones i found is full with linux/windows/dos internal api calls...
  14. dos memory allocation (ASM)

    i think it can be useful even so. Since i don't know asm well yet, it would be nice to start with something not so complicated. Other reason is that i'll have to use api calls for some of my demands, since windows runs in protected mode. Can i be wrong?   hmm, actually i haven't ever thought about it. Maybe it's just my interest?   thanks for your answer, i hope i'll made it to x64 mode eventually, but i want to know any basics of asm.   well, but how does dos search free mem block? Does it allocate whole segment to mem block? I mean, why is mem block located at AX:0000?