Question

Problem:

  • I have the same OpenGL code that works fine in one machine and only partially in a different machine.

  • Hard to Debug because the problem only happen in the machine I need to deploy. In that machine theres no crash, warning, error,.. nothing.

What I've tried so far:

  • In my machine, I installed the same O.S. and the same libraries (and same version) exactly as in the second machine. Still, the code works fine in my machine and not in the machine I need to deply the application. BTW., my machine has a NVidia card and the other use AMD.

  • Im checking for errors in OpenGL everywhere (using glGetError) and didnt find anything.

  • My application has 2 parts, one that render using fixed pipeline, and the other that use shaders. The shader render is the part that doesnt in one of them. The shaders compile successfully in both machines and its log doesnt show anything (using glGetShaderInfoLog).

  • I dont have other machines (other than those 2) to do further test to see if the problem is really the hardware.

Video Cards:

  • My Machine: NVidia Quadro 600 - (Driver Version: 319.17)

  • Deploy Machine: AMD FirePro W8000

Duplicated?

  • I've check this thread, but for my app the shaders compile correctly in the AMD machine.

Code:

Im using GLFW/GLEW.

GLFW and GLEW init

if (!glfwInit())
    return;
GLFWwindow * window = glfwCreateWindow(wndWidth, wndHeight, "Window", NULL, NULL);
if (!window){
    glfwTerminate();
    return;
}
glfwMakeContextCurrent(window);
if (glewInit() != GLEW_OK){
    std::cout << "ERROR: glewInit failed!" << std::endl;
    return;
}

Main Loop: Render calls first the render using Fixed Pipeline and then the one using Shader.

while (!glfwWindowShouldClose(window))
{
    render();
    glfwSwapBuffers(window);
    glfwPollEvents();
    parseEvents();
    glCheckError("initWindow@MapViewer");
}

Initializing VBO

glGenBuffers(3, buffers);
//Vertices
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glBufferData(GL_ARRAY_BUFFER, num_vertices*3, attr_vertices.data(), GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
//Color
glBindBuffer(GL_ARRAY_BUFFER, buffers[1]);
glBufferData(GL_ARRAY_BUFFER, num_vertices*4, attr_colors.data(), GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0);
//Size
glBindBuffer(GL_ARRAY_BUFFER, buffers[2]);
glBufferData(GL_ARRAY_BUFFER, num_vertices*1, attr_size.data(), GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, 0, 0);

Shaders

std::string vertexSRC = "#version 120\n"
    "attribute vec3 vertex;\n"
    "attribute vec4 color;\n"
    "attribute float size;\n"
    "uniform mat4 matrix;\n"
    "uniform vec4 bb;\n"
    "uniform float size_mod;\n"
    "varying vec4 vColor;\n"        
    "void main(void)\n"
    "{\n"
    "    vec3 w = vec3((vertex.x-bb.x)/bb.z*2-1, (vertex.y-bb.y)/bb.w*2-1, vertex.z);\n"
    "    gl_Position = vec4(w, 1.0);\n"
    "    vColor = color;\n"
    "    gl_PointSize = size*size_mod;\n"
    "}\n";
std::string fragmentSRC = "#version 120\n"
    "varying vec4 vColor;\n"
    "uniform sampler2D texture;\n"
    "void main()\n"
    "{\n"
    "    vec4 w = texture2D(texture, vec2(gl_PointCoord.x, 1.0-gl_PointCoord.y)) * vColor;\n"
    "    gl_FragColor = w;\n"
    "}\n";

Initializing Shaders

vertexShader = glCreateShader(GL_VERTEX_SHADER);
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
const GLchar * vtx = vertexSRC.c_str();
const GLchar * frg = fragmentSRC.c_str();
glShaderSource(vertexShader, 1, &vtx, NULL);
glShaderSource(fragmentShader, 1, &frg, NULL);
GLint vst, fst;
glCompileShader(vertexShader);
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &vst);
glCompileShader(fragmentShader);
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &fst);
if (vst != GL_TRUE || fst != GL_TRUE){
    std::cout << "ERROR compiling shaders: " << vst << " " << fst << std::endl;
    return;
}
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);

Getting Uniform Handlers

glUseProgram(shaderProgram);
l_matrix = glGetUniformLocation(shaderProgram, "matrix");
l_bbLocation = glGetUniformLocation(shaderProgram, "bb");
l_texture = glGetUniformLocation(shaderProgram, "texture");
l_sizeMod = glGetUniformLocation(shaderProgram, "size_mod");
l_vertex = glGetAttribLocation(shaderProgram, "vertex");
l_color = glGetAttribLocation(shaderProgram, "color");
l_size = glGetAttribLocation(shaderProgram, "size");

Render Call

glEnable(GL_POINT_SPRITE);
glEnable(GL_PROGRAM_POINT_SIZE);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glUseProgram(shaderProgram);

//Vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
//Color
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, buffers[1]);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0);
//Size
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, buffers[2]);
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, 0, 0);

glUniformMatrix4fv(l_matrix, 1, true, m);
glUniform4fv(l_bbLocation, 1, latlng_bb);
glUniform1f(l_sizeMod, GLfloat(zoom*size_mod/9.0));

//RENDER
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[face]);
glUniform1i(l_texture, 0);

glDrawArrays(GL_POINTS, 0, num_vertices);
glUseProgram(0);

//DISABLE
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);

glDisable(GL_POINT_SPRITE);
glDisable(GL_VERTEX_PROGRAM_POINT_SIZE);
glDisable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glBindTexture(GL_TEXTURE_2D, 0);

Any idea, tips, anything that can help me trace where the problem is is welcome.

Edit 0

Added more information about the Video Cards.

Edit 1

I just comment the line #version 120 from vertex Shader and I got some warnings for the line:

0(10) : warning C7011: implicit cast from "int" to "float"

Just changed the line to the following. The warnings are gone but still have the same problem.

vec3 w = vec3((vertex.x-bb.x)/bb.z*2.0-1.0, (vertex.y-bb.y)/bb.w*2.0-1.0, vertex.z);\n

Was it helpful?

Solution

I had to do 2 things to solve the problem.

First, to make it work with the AMD card as wells, I had to get and use the AttribLocation.

Changed from:

glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);

To:

l_vertex = glGetAttribLocation(shaderProgram, "vertex");
glEnableVertexAttribArray(l_vertex);
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glVertexAttribPointer(l_vertex, 3, GL_FLOAT, GL_FALSE, 0, 0);

It wasnt showing the data because the size/vertex/color was being mixed;

After that, it stopped working in my Nvidia card. Didnt get any error when compiling the shaders. Although, I got error (using glGetError) every time I tried to use the shaders. I went back and realize that I had to remove all the non-used variables from the shaders.

Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top