• Content count

  • Joined

  • Last visited

Community Reputation

1499 Excellent

About Aks9

  • Rank
  1. Let me add my comment on the topic.   Although the fixed functionality is deprecated it is still very useful for an introduction to computer graphics. There are several reasons for this:   1. The learning curve is much steeper with new APIs. For the legacy OpenGL you should know only the API. For the modern OpenGL you should also know math and physics, while for the Vulkan you should also know operating systems and how HW works.   2. The number code lines increase exponentially. For the triangle drawing in the legacy OpenGL one needs a dozen lines of code, about 400 lines in modern OpenGL and about 1000 in Vulkan.   3. Being overwhelmed with non-graphics topics, the beginner usually misses the main concepts like transformations, lighting, texturing etc.   That's why I'm still teaching legacy OpenGL in the first course of Computer Graphics, while the programmable pipeline is left for the advanced course (next year, after passing the first one). So far it works well.
  2. OpenGL Vulkan Win32 WMI

    Huh!  It is not easy to post a reasonable amount of code when you code using Vulkan... I didn't expected that it needs about a thousand lines of code just to initialize... Well, here is a code "fragment" that initializes swap chain: char* APP_SHORT_NAME = "VkRenderer"; instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);   VkApplicationInfo app_info = {}; app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; app_info.pNext = NULL; app_info.pApplicationName = APP_SHORT_NAME; app_info.applicationVersion = 1; app_info.pEngineName = APP_SHORT_NAME; app_info.engineVersion = 1; app_info.apiVersion = VK_API_VERSION;   VkInstanceCreateInfo inst_info = {}; inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; inst_info.pNext = NULL; inst_info.flags = 0; inst_info.pApplicationInfo = &app_info; inst_info.enabledExtensionCount = instance_extension_names.size(); inst_info.ppEnabledExtensionNames = inst_info.enabledExtensionCount ? instance_extension_names.data() : NULL; inst_info.enabledLayerCount = 0; inst_info.ppEnabledLayerNames = NULL;   VkResult res;   res = vkCreateInstance(&inst_info, NULL, &m_vkInst);   //...   VkWin32SurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.hinstance = GetModuleHandle(NULL); //info.connection; createInfo.hwnd = wnd; VkResult res = vkCreateWin32SurfaceKHR(m_vkInst, &createInfo, NULL, &m_surface);   VkBool32 *supportsPresent = (VkBool32 *)malloc(queue_count * sizeof(VkBool32)); for (uint32_t i = 0; i < queue_count; i++) { vkGetPhysicalDeviceSurfaceSupportKHR(m_vGPU[0], i, m_surface, &supportsPresent[i]); }   uint32_t graphicsQueueNodeIndex = UINT32_MAX; for (uint32_t i = 0; i < queue_count; i++) { if ((m_queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (supportsPresent[i] == VK_TRUE) { graphicsQueueNodeIndex = i; break; } } } //... float queue_priorities[1] = { 0.0 }; device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);   VkDeviceQueueCreateInfo queue_info = {}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.queueCount = 1; queue_info.pQueuePriorities = queue_priorities;   VkDeviceCreateInfo device_info = {}; device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_info.pNext = NULL; device_info.queueCreateInfoCount = 1; device_info.pQueueCreateInfos = &queue_info; device_info.enabledExtensionCount = device_extension_names.size(); device_info.ppEnabledExtensionNames = device_info.enabledExtensionCount ? device_extension_names.data() : NULL; device_info.enabledLayerCount = 0; device_info.ppEnabledLayerNames = NULL; device_info.pEnabledFeatures = NULL;   res = vkCreateDevice(m_vGPU[0], &device_info, NULL, &m_vkDevice);   //...   uint32_t formatCount; VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(m_vGPU[0], m_surface, &formatCount, NULL); assert(res == VK_SUCCESS); VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR)); res = vkGetPhysicalDeviceSurfaceFormatsKHR(m_vGPU[0], m_surface, &formatCount, surfFormats); assert(res == VK_SUCCESS); // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format.  Otherwise, at least one // supported format will be returned. VkFormat format; if (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED) { format = VK_FORMAT_B8G8R8A8_UNORM; } else { assert(formatCount >= 1); format = surfFormats[0].format; }   VkSurfaceCapabilitiesKHR surfCapabilities;   res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(m_vGPU[0], m_surface, &surfCapabilities);   uint32_t presentModeCount; res = vkGetPhysicalDeviceSurfacePresentModesKHR(m_vGPU[0], m_surface, &presentModeCount, NULL);   VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR));   res = vkGetPhysicalDeviceSurfacePresentModesKHR(m_vGPU[0], m_surface, &presentModeCount, presentModes);   VkExtent2D swapChainExtent;   if (surfCapabilities.currentExtent.width == (uint32_t)-1) { swapChainExtent.width = width; swapChainExtent.height = height; } else { swapChainExtent = surfCapabilities.currentExtent; }   VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR; for (size_t i = 0; i < presentModeCount; i++) { if (presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR; break; } if ((swapchainPresentMode != VK_PRESENT_MODE_MAILBOX_KHR) && (presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR)) { swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; } }   uint32_t desiredNumberOfSwapChainImages = surfCapabilities.minImageCount + 1; if ((surfCapabilities.maxImageCount > 0) && (desiredNumberOfSwapChainImages > surfCapabilities.maxImageCount)) { desiredNumberOfSwapChainImages = surfCapabilities.maxImageCount; }   VkSurfaceTransformFlagBitsKHR preTransform; if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; } else { preTransform = surfCapabilities.currentTransform; }   VkSwapchainCreateInfoKHR swap_chain = {}; swap_chain.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swap_chain.pNext = NULL; swap_chain.surface = m_surface; swap_chain.minImageCount = desiredNumberOfSwapChainImages; swap_chain.imageFormat = format; swap_chain.imageExtent.width = swapChainExtent.width; swap_chain.imageExtent.height = swapChainExtent.height; swap_chain.preTransform = preTransform; swap_chain.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; swap_chain.imageArrayLayers = 1; swap_chain.presentMode = swapchainPresentMode; swap_chain.oldSwapchain = NULL; swap_chain.clipped = true; swap_chain.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; swap_chain.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; swap_chain.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swap_chain.queueFamilyIndexCount = 0; swap_chain.pQueueFamilyIndices = NULL;   res = vkCreateSwapchainKHR(m_vkDevice, &swap_chain, NULL, &m_swap_chain);   res = vkGetSwapchainImagesKHR(m_vkDevice, m_swap_chain, &m_swapchainImageCount, NULL);   VkImage *swapchainImages = (VkImage *)malloc(m_swapchainImageCount * sizeof(VkImage));   res = vkGetSwapchainImagesKHR(m_vkDevice, m_swap_chain, &m_swapchainImageCount, swapchainImages);   m_buffers.resize(m_swapchainImageCount);   CreateCommandBuffer(); ExecuteBeginCommandBuffer();   vkGetDeviceQueue(m_vkDevice, m_graphicsQueueFamilyIndex, 0, &m_queue);   for (uint32_t i = 0; i < m_swapchainImageCount; i++) { VkImageViewCreateInfo color_image_view = {}; color_image_view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; color_image_view.pNext = NULL; color_image_view.format = format; color_image_view.components.r = VK_COMPONENT_SWIZZLE_R; color_image_view.components.g = VK_COMPONENT_SWIZZLE_G; color_image_view.components.b = VK_COMPONENT_SWIZZLE_B; color_image_view.components.a = VK_COMPONENT_SWIZZLE_A; color_image_view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_image_view.subresourceRange.baseMipLevel = 0; color_image_view.subresourceRange.levelCount = 1; color_image_view.subresourceRange.baseArrayLayer = 0; color_image_view.subresourceRange.layerCount = 1; color_image_view.viewType = VK_IMAGE_VIEW_TYPE_2D; color_image_view.flags = 0;   m_buffers[i].image = swapchainImages[i];   SetImageLayout(m_buffers[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);   color_image_view.image = m_buffers[i].image;   res = vkCreateImageView(m_vkDevice, &color_image_view, NULL, &m_buffers[i].view);   }   ExecuteEndCommandBuffer(); ExecuteQueueCommandBuffer();   This is not the whole code. All handles are valid and functions always return VK_SUCCESS. The dealocation code is like following: for (uint32_t i = 0; i < m_swapchainImageCount; i++) { vkDestroyImageView(m_vkDevice, m_buffers[i].view, NULL); } vkDestroySwapchainKHR(m_vkDevice, m_swap_chain, NULL);   I'm sorry for so much code, but that is Vulkan! 
  3. Has anyone encountered a problem with deallocating Win32 surface using NVIDIA drivers?   Unfortunately, there is no appropriate tutorial how to deal with the window system integration (WMI). I followed the samples from the LunarG SDK and modified a bit in order to incorporate into my code (framework). All functions return VK_SUCCESS, however, with 356.39-vkonly_geforce_win8 drivers, the application crashes at Vulkan instance destroy. With 356.45-vkonly_geforce_win8 drivers, the application doesn't crash, but there is a memory leak (pretty the same as if I prevent the instance destroying with 356.39).       Also, is there any requirements for the window pixel format (like in OpenGL) in order to draw correctly on the surface?   So far it seems WMI is the most complicated part of the Vulkan.  I hope after finishing correctly some interface functions for presentation on the windows I wouldn't have to change that code ever since.   Thanks in advance for any assistance or useful hints! Aleksandar
  4. OpenGL Vulkan is Next-Gen OpenGL

      The latest AMD drivers (17.02.) work fine with my R9 280. Please, take a look at http://vulkan.gpuinfo.org.
  5. OpenGL Vulkan is Next-Gen OpenGL

    I just want to report that Vulkan SDK works fine on NVIDIA hardware and with NV drivers. Even on the Optimus platform (I've got laptop with GTX 850M). Congratulations!   AMD drivers doesn't work, as all previous posts said. I've tried them on R280. Despite AMD is better "on the paper", the side by side comparison with NV reveals a quite different state of affairs. Maybe it is different in the DX world, but I have had bad experience with AMD OpenGL drivers. It is sad that AMD actually has very advanced architecture and some brilliant ideas, the state of its software (especially their drivers) spoils everything. :(   Does Intel Linux drivers work?
  6. OpenGL Vulkan is Next-Gen OpenGL

    He's just saying that because Khronos has actually done that exact thing before... They were supposed to ve working on a new, modern, clean API called Longs Peak, which would break backwards compatibility with GL. And , after setting the hype train in motion, they released a new GL instead (and handed API war victory to D3D on a silver platter). If they did the same thing this time, my bet would be on Valve taking to initiative to finish Mantle/Vulakn/Valven on their own, and form a new comittee making Khronos redundant. So, failure is not an option for them this time.     Well, the story about Longs Peak is a bit different. That was a threat to cease support for legacy OpenGL in the upcoming drivers. That's why OpenGL 3.0 didn't break backward compatibility. This time, Vulkan will coexist with OpenGL. Furthermore, OpenGL will be a more popular API for many years to come, because of the higher level of abstraction, the easiness of the usage and less responsibility for the programmers. Many of them are not aware of what the drivers do for them. Vulkan would change it a lot. Also, Vulkan is both graphics and computation API. Valve is deeply involved, but the support of big players and hardware vendors is even more important. NVIDIA already has support for Vulkan in its drivers.
  7. OpenGL Vulkan is Next-Gen OpenGL

      It is very unlikely. Vulkan and OpenGL are two different paths. They will continue to coexist for many years to come.     I'm betting on February 9th, 2016. ;)
  8. AFAIK, you cannot disable Optimus. Intel's GPU is the only way NVIDIA's GPU can communicate with a display. Nsight (and PerfKit, as Nsight relies on it) really had a problem with Optimus, and probably it still has (I haven't tried the latest version yet). Btw, you should know how to activate NVIDIA's GPU in your application. :) By default, Intel's GPU is used. Fortunately, it is so easy with Optimus.
  9.   What does glGetString(GL_VERSION) say? That should returned the highest GL version supported by the driver.   Specification is clear:         So, if you require GL 2.0 context, you could legitimately get GL 4.4 compatibility profile, since it is backward compatible with 2.0.   P.S. My browser or the engine that powers up this site, or both in a combination, are "lucid". All I typed down was in the same font and size, but the outcome is ridiculous.  
  10.   I know. ;) I'm sorry if my previous post make a confusion. I have skipped them in previous counting because of a general performance, not because of functionality.
  11. "Father, forgive them, for they do not know what they are doing."         This is a typical agnostic claim. Everything is a source of knowledge. A rendering context creation is the first thing one should learn when starting with computer graphics. But OK, I don't have time or will to argue about that.   Could you post a link, example or whatever to illustrate "the nightmare"? I've been creating GL contexts by myself about 18 years already and never had a problem. The problems could arise if you create GL 3.0+ context and hope everyone support it. Well, that is not a problem of drivers. Older drivers cannot assume what might happen in the future.   If the drivers are buggy, there is no workaround for the problem!      I really don't understand this.What kind of workaround? The way how a GL context is created is defined by the specification. Why risky?       I want to have control in my hands so no intermediary stuff is welcome. It is harder at the start, but the filling of freedom is priceless.       This link is totally out of context. The guy is frustrated by something, but give no arguments for his claims. Considering platform specific APIs for porting OpenGL, there was an initiative to make them unique. Khronos started development of EGL, but it is not adopted for desktop OpenGL yet.       Don't be sorry. That was your opinion and you have right to express it through (down)voting. Points means really nothing to me. Forums should be the way to share knowledge and opinions. Some of them are true, some not. I hope right advices still prevail on the behalf of users.
  12.   I'm horrified with suggestions to use any of the library/wrapper for OpenGL. :( It is not easy, but it is always better to understand what's happening under the hood than to be helplessly dependent on others. Despite of its imperfections, OpenGL is still the best 3D graphics API for me, since I can do whatever I want having to install just the latest drivers and nothing more. Of course, I need also a developing environment (read Visual Studio). Nobody wants to code in notepad and compile in command prompt.   Let's back to your problem. Before going any further revise your pixel format. It is not correct. The consequence is turning off HW acceleration and switching to OpenGL 1.1. Tho following code snippet shows how to create valid GL context: PIXELFORMATDESCRIPTOR pfd ;     memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR));     pfd.nSize  = sizeof(PIXELFORMATDESCRIPTOR);     pfd.nVersion   = 1;      pfd.dwFlags    = PFD_DOUBLEBUFFER | PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW;        pfd.iPixelType = PFD_TYPE_RGBA;      pfd.cColorBits = 32;     pfd.cDepthBits = 24;      pfd.iLayerType = PFD_MAIN_PLANE;   nPixelFormat = ChoosePixelFormat(hDC, &pfd);   if (nPixelFormat == 0)     { strcat_s(m_sErrorLog, LOGSIZE, "ChoosePixelFormat failed.\n"); return false;     }    DWORD error = GetLastError(); BOOL bResult = SetPixelFormat(hDC, nPixelFormat, &pfd); if (!bResult)     { error = GetLastError(); strcat_s(m_sErrorLog, LOGSIZE, "SetPixelFormat failed.\n"); return false;     }   HGLRC tempContext = wglCreateContext(hDC);  wglMakeCurrent(hDC,tempContext);   int attribs[] = { WGL_CONTEXT_MAJOR_VERSION_ARB, major, WGL_CONTEXT_MINOR_VERSION_ARB, minor,  WGL_CONTEXT_FLAGS_ARB, WGL_CONTEXT_DEBUG_BIT_ARB, // I suggest using debug context in order to know whats really happening and easily catch bugs WGL_CONTEXT_PROFILE_MASK_ARB, nProfile, // nProfile = WGL_CONTEXT_CORE_PROFILE_BIT_ARB or WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0 };   PFNWGLCREATECONTEXTATTRIBSARBPROC wglCreateContextAttribsARB = NULL; wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC) wglGetProcAddress("wglCreateContextAttribsARB"); if(wglCreateContextAttribsARB != NULL) { context = wglCreateContextAttribsARB(hDC, 0, attribs); } wglMakeCurrent(NULL,NULL);  wglDeleteContext(tempContext);
  13. Of course that it is better to use standard vertex/tessellation/fragment shader whenever you can. I have deliberately skipped geometry shader for the reason. On the other hand, there are tasks that require GPU based computation and cannot easily be ported to shaders. They usually are programmed in CUDA or OpenCL. Both APIs require separate contexts and introduce unwanted delay in the interoperability with OpenGL. That's why compute shaders are introduced. They are not as powerful as CUDA/OpenCL but can serve the purpose most of the time allowing much lower overhead.
  14. I hope you have couplet it with glEnable(GL_CLIP_DISTANCE0); in the host application, and the shader is actually executing.   Maybe you should read a little bit about the topic. There is a lot of material in all OpenGL related books: OpenGL SuperBible 5th Ed. – pg.528 OpenGL SuperBible 6th Ed. – pg.276-281. OpenGL Programming Guide 8th Ed. – pg.238 OpenGL Shading Language 3rd Ed – pg.112, 286  
  15. Then your code is wrong, as I presumed. There is no need for multiple clip-distances. Set just gl_ClipDistance[0]. And, in order to prove that it works, set gl_ClipDistance[0] = -1.0; If your model disappears, clipping works. :)