Sign in to follow this  
Deliverance

Geforce4 MX 440 seems to support shaders but nothing shows up!

Recommended Posts

I know i am dealing with ancient hardware but i'm working on a hardware accelerated yuv to rgb conversion that'll work on as many cards as possible. I'm trying to get shaders working on a Geforce 440 MX but with no luck. Here are the test shaders that compile successufully but don't seem to contribute to the scene:
void main()
{	
      gl_Position = ftransform();
}

void main(void) 
{
      gl_FragColor = vec3(1.0f,0.0f,0.0f);	
}



If this card does not support shaders how can i find that out?

Share this post


Link to post
Share on other sites
I find this site KluDX useful for enumeration of GPU capabilities, for GPU's I don't have.

here is the capabilities of your card

Also you can use glGetString(GL_SHADING_LANGUAGE_VERSION) to get the version of GLSL supported, you may also want to check what GL version it supports that site says 1.2, so thats fairly old and you may be using extensions it does not support or something.

I would ensure anything you create be it VBO's, FBO's, shaders or whatever check if they succeed when creating them.

Share this post


Link to post
Share on other sites
Quote:
Original post by apatriarca
Can you show the parts of the code where you set up the shaders and render to the screen?


Sure:


#include "Shader.h"
#include "WindowSFML.h"

using namespace Endeavour;

char* Shader::loadFile(std::string fileName)
{
char *text = NULL;
FILE *f=fopen(fileName.c_str(), "rb");
if (f)
{
fseek(f, 0, SEEK_END);
int size = ftell(f);
rewind(f);

text = new char[size+1];
fread(text,sizeof(char),size,f);
text[size] = '\0';

fclose(f);
}

return text;
}

void Shader::setUniform1i(std::string name, GLuint value)
{
int loc = glGetUniformLocationARB(program, name.c_str());
glUniform1iARB(loc, value);
}

void Shader::setUniform1f(std::string name, float value)
{
int loc = glGetUniformLocationARB(program, name.c_str());
glUniform1fARB(loc, value);
}

void Shader::setUniform3f(std::string name, float* value)
{
GLint loc = glGetUniformLocationARB(program, name.c_str());
glUniform3fvARB(loc, 3, value);
}

GLint Shader::getAttributeLocation(std::string attribName)
{
return glGetAttribLocationARB(program, attribName.c_str());
}

bool Shader::load(std::string vertexProgram, std::string fragmentProgram)
{

if (!glCreateShaderObjectARB ||
!glShaderSourceARB ||
!glCompileShaderARB)
{
return false;
}

vertexShader = glCreateShaderObjectARB(GL_VERTEX_SHADER_ARB);
fragmentShader = glCreateShaderObjectARB(GL_FRAGMENT_SHADER_ARB);
char *vs = loadFile(vertexProgram);
char *fs = loadFile(fragmentProgram);

if (!vs || !fs)
{
printf("Error - Cannot load shader file(s).\n");
return false;
}

const char *v = vs;
glShaderSourceARB(vertexShader, 1, &v, NULL);

const char *f = fs;
glShaderSourceARB(fragmentShader, 1, &f, NULL);

glCompileShaderARB(vertexShader);

glCompileShaderARB(fragmentShader);

delete[] vs;
delete[] fs;

bool compiled = true;
int param;
glGetObjectParameterivARB(vertexShader, GL_OBJECT_COMPILE_STATUS_ARB,
&param);
compiled = compiled && param;
if (param==GL_FALSE)
{
printf("VERTEX SHADER\n");
char loginfo[4096];
int len;
glGetShaderInfoLog(vertexShader, 4096, &len, loginfo);
printf("%s\n", loginfo);
}

glGetObjectParameterivARB(fragmentShader, GL_OBJECT_COMPILE_STATUS_ARB,
&param);
compiled = compiled && param;
if (param==GL_FALSE)
{
printf("FRAGMENT SHADER\n");
char loginfo[4096];
int len;
glGetShaderInfoLog(fragmentShader, 4096, &len, loginfo);
printf("%s\n", loginfo);
}

if (!compiled)
return false;

program = glCreateProgramObjectARB();
glAttachObjectARB(program, vertexShader);
glAttachObjectARB(program, fragmentShader);
glLinkProgramARB(program);

glUseProgramObjectARB(NULL);

return true;
}

void Shader::bind()
{
glUseProgramObjectARB(program);
}

void Shader::unBind()
{
glUseProgramObjectARB(NULL);
}

void Shader::free()
{
if (!program)
return;

glDetachObjectARB(program, vertexShader);
glDetachObjectARB(program, fragmentShader);
glDeleteObjectARB(vertexShader);
glDeleteObjectARB(fragmentShader);
glDeleteObjectARB(program);

program = 0;
vertexShader = 0;
fragmentShader = 0;
}





And when i render:


EnterOrtho();
{
shader.bind();

float w = movieImage[currentLogo].blitWidth, h = movieImage[currentLogo].blitHeight;
glEnable(GL_TEXTURE_2D);
unsigned int yuv[3];
video->getYUVTextures(yuv);

char* name[3] = {"Ytex", "UVtex"};

int i;
for (i=0; i<2; i++)
{
glActiveTextureARB(GL_TEXTURE0+i);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, yuv[i]);
shader.setUniform1i(name[i], i);

//printf("%d\n", yuv[i]);
}

float tfw = float(video->getFrameWidth()) / float(video->getTexWidth());
float tfh = float(video->getFrameHeight()) / float(video->getTexHeight());

int w2[2], h2[2];
video->getYUVDim(w2, h2);
shader.setUniform1f("yUpLimit",
float(video->getFrameHeight()) / 2.0f / float(h2[1]));

float tfw2 = float(video->getFrameWidth() / 2) / float(w2[1]);
float tfh2 = float(video->getFrameHeight() / 2) / float(h2[1]);

float texCoords[4][2]={0.0f, 0.0f,
tfw, 0.0f,
tfw, tfh,
0.0f, tfh};

float texCoords2[4][2]={0.0f, 0.0f,
tfw2, 0.0f,
tfw2, tfh2,
0.0f, tfh2};

glActiveTextureARB(GL_TEXTURE0);
glPushMatrix();
glTranslatef(400.0f, 300.0f, 0.0f);
glScalef(1.0f, -1.0f, 1.0f);
glBegin(GL_QUADS);
glMultiTexCoord2fvARB(GL_TEXTURE0, texCoords[0]);
glMultiTexCoord2fvARB(GL_TEXTURE1, texCoords2[0]);
glVertex2f(-w/2, -h/2);

glMultiTexCoord2fvARB(GL_TEXTURE0, texCoords[1]);
glMultiTexCoord2fvARB(GL_TEXTURE1, texCoords2[1]);
glVertex2f(w/2, -h/2);

glMultiTexCoord2fvARB(GL_TEXTURE0, texCoords[2]);
glMultiTexCoord2fvARB(GL_TEXTURE1, texCoords2[2]);
glVertex2f(w/2, h/2);

glMultiTexCoord2fvARB(GL_TEXTURE0, texCoords[3]);
glMultiTexCoord2fvARB(GL_TEXTURE1, texCoords2[3]);
glVertex2f(-w/2, h/2);
glEnd();
glPopMatrix();

for (i=0; i<2; i++)
{
glActiveTextureARB(GL_TEXTURE0_ARB+i);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
}
glActiveTextureARB(GL_TEXTURE0_ARB);
shader.unBind();
}

LeaveOrtho();




Quote:

A GeForce 4 440 MX is actually GeForce 2 tech, which predates nVidia's shader tech which was introduced with GeForce 3.


Good to know!

Quote:

The 440 MX doesn't have any shader support. The driver can usually run vertex shaders in software, but don't expect fragment shaders to work.


Then how should i be aware of the fact that no shader support is available if i get function pointers that "work" from glew?

Share this post


Link to post
Share on other sites
Quote:
Original post by Deliverance
Quote:
The 440 MX doesn't have any shader support. The driver can usually run vertex shaders in software, but don't expect fragment shaders to work.
Then how should i be aware of the fact that no shader support is available if i get function pointers that "work" from glew?
You should get an error when trying to compile or link your shader/program, or when you call glGetError after attempting to bind and render with the shader.

However, even if your shader compiles and runs without an error, there is no guarantee that it is running in hardware - OpenGL implementations are free to fallback to software emulation where necessary.

Share this post


Link to post
Share on other sites
Quote:
Original post by swiftcoder
Quote:
Original post by Deliverance
Quote:
The 440 MX doesn't have any shader support. The driver can usually run vertex shaders in software, but don't expect fragment shaders to work.
Then how should i be aware of the fact that no shader support is available if i get function pointers that "work" from glew?
You should get an error when trying to compile or link your shader/program, or when you call glGetError after attempting to bind and render with the shader.

However, even if your shader compiles and runs without an error, there is no guarantee that it is running in hardware - OpenGL implementations are free to fallback to software emulation where necessary.


Okay, how can i find out then that the shader will run in hardware for sure, is there a way to find that out?

Share this post


Link to post
Share on other sites
Quote:
Original post by Deliverance
Okay, how can i find out then that the shader will run in hardware for sure, is there a way to find that out?
By comparing the name of the card to a list of known capabilities.

In GL stuff is either supported, or not.

It's 'none of your business' if it's supported in hardware, because for all you know, on a certain platform, the software solution might be good enough to get the job done. Just like you don't know if the GL device is doing hardware transform and lighting, or hardware anything else.

Share this post


Link to post
Share on other sites
Quote:
Original post by Daaark
Quote:
Original post by Deliverance
Okay, how can i find out then that the shader will run in hardware for sure, is there a way to find that out?
By comparing the name of the card to a list of known capabilities.


That seems like a lot of work!

Quote:

It's 'none of your business' if it's supported in hardware, because for all you know, on a certain platform, the software solution might be good enough to get the job done. Just like you don't know if the GL device is doing hardware transform and lighting, or hardware anything else.

Fair enough.

Share this post


Link to post
Share on other sites
Quote:
Original post by BlackSeeds
I find this site KluDX useful for enumeration of GPU capabilities, for GPU's I don't have.

here is the capabilities of your card

Also you can use glGetString(GL_SHADING_LANGUAGE_VERSION) to get the version of GLSL supported, you may also want to check what GL version it supports that site says 1.2, so thats fairly old and you may be using extensions it does not support or something.

I would ensure anything you create be it VBO's, FBO's, shaders or whatever check if they succeed when creating them.


Thanks!
glGetString(GL_SHADING_LANGUAGE_VERSION) returns: 1.10 NVIDIA via Cg 1.3 compiler. Why is that?!

Share this post


Link to post
Share on other sites
Quote:
Original post by Deliverance
Quote:
Original post by BlackSeeds
I find this site KluDX useful for enumeration of GPU capabilities, for GPU's I don't have.

here is the capabilities of your card

Also you can use glGetString(GL_SHADING_LANGUAGE_VERSION) to get the version of GLSL supported, you may also want to check what GL version it supports that site says 1.2, so thats fairly old and you may be using extensions it does not support or something.

I would ensure anything you create be it VBO's, FBO's, shaders or whatever check if they succeed when creating them.


Thanks!
glGetString(GL_SHADING_LANGUAGE_VERSION) returns: 1.10 NVIDIA via Cg 1.3 compiler. Why is that?!


Probably because your drivers emulate GLSL 1.10 in software. I also looked through your code and noticed that you're not checking to make sure that the shader object IDs you get back from glCreateShaderObjectARB() are valid. I suspect that they're actually not.

Share this post


Link to post
Share on other sites
What do you need the conversion for exactly and just how desperate are you to get performance out of ancient hardware?

Most any graphics card ought to support some basic YUV rendering in hardware, though making use of those capabilities is a bit tricky (through DirectDraw overlays on Windows.)

Share this post


Link to post
Share on other sites
I know for 100% that vertex shaders run fine on a GeForce 2, albeit somewhat slow. It might be possible to achieve YUV conversion using register combiners or some smart combination of multitexturing and blending modes. With those features you can even operate on cards like RIVA TNT which are really considered "ancient" :P

Share this post


Link to post
Share on other sites
The NV15 (GF2) and NV17 (GF4MX) do not support full-profile programmable vertex shaders on hardware. The driver emulates this functionality. The hardware is capable of accelerating some related operations, though.

---

While YUV->RGB conversion can be achieved by ping-ponging and blending render targets, hardware of that era has bad fillrate anyway so the performance is orders of magnitude worse than on modern hardware with pixel shaders.

[Edited by - Nik02 on February 9, 2010 6:08:51 AM]

Share this post


Link to post
Share on other sites
Quote:
Original post by implicit
What do you need the conversion for exactly and just how desperate are you to get performance out of ancient hardware?

Most any graphics card ought to support some basic YUV rendering in hardware, though making use of those capabilities is a bit tricky (through DirectDraw overlays on Windows.)


I need it for displaying a movie using theora. And well, i'm not that desperate but thought of searching for a good solution. I already implemented a SIMD solution(well, ripped it off from liboggplay) that takes care of yuv to rgb conversion... It works fine but i have to upload a full RGB texture(that is 2048x1024 sometimes), on the graphics memory.

Share this post


Link to post
Share on other sites
Quote:
Original post by GhostDogStudio
Most confusing marketing spin ever for a video card. I remember nVidia even coming out and saying we made a mistake with that name.


that implies nvidia learned something NOT to do with names ... when the opposite is true ... they learned that by just naming something using current gen names they can continue to sell them to the uninformed for more money than if they used the older / more correct naming. For instance, the 8800, 9800 and 250 are all the same chip design, just updated naming to allow variations on the part to sell better as time passes and other segments of their market are moving on.

Share this post


Link to post
Share on other sites
Quote:
Original post by Xai
Quote:
Original post by GhostDogStudio
Most confusing marketing spin ever for a video card. I remember nVidia even coming out and saying we made a mistake with that name.
that implies nvidia learned something NOT to do with names ... when the opposite is true ... they learned that by just naming something using current gen names they can continue to sell them to the uninformed for more money than if they used the older / more correct naming. For instance, the 8800, 9800 and 250 are all the same chip design, just updated naming to allow variations on the part to sell better as time passes and other segments of their market are moving on.
I remember buying a low end GeForce FX to get better FPS in one game I was playing on the cheap, and I actually got worse FPS than my GeForce2 MX branded card (which was Geforce 1 256 tech I believe). They made it seem like such a badass card. Shaders ran so slow on it, you'd think your PC hanged.

Share this post


Link to post
Share on other sites

Create an account or sign in to comment

You need to be a member in order to leave a comment

Create an account

Sign up for a new account in our community. It's easy!

Register a new account

Sign in

Already have an account? Sign in here.

Sign In Now

Sign in to follow this