Opengl, java background color rendering problem

Featured Imgs 23

Hello,

I have simple problem for professionalist (that i think ) but i asked all available AI about it and none was able to help and solve problem. so nothing is able to help me in this.

I have simple app that show 3 colored axes and camera that move around this scene by mouse click-hold . the problem is that after run appear first white backfround of frame for a second then its normal black background. i dont want this white at beginning.
can you tell me what in code change to get rid of this white flash at beginning ? I tried already plenty things.

This is the project code IntellJ, java , maven , with additional newest libraries lwjgl (lwjgl-release-3.31) and joml ( joml-1.10.61 ), this is whole code of 3d opengl - app 2 classes : Main and Camera , code is :

package org.example;

import org.joml.Matrix4f;
import org.lwjgl.glfw.GLFWVidMode;
import org.lwjgl.glfw.GLFWErrorCallback;
import org.lwjgl.opengl.GL;
import org.lwjgl.system.MemoryStack;

import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.util.Objects;

import static org.lwjgl.glfw.Callbacks.glfwFreeCallbacks;
import static org.lwjgl.glfw.GLFW.*;
import static org.lwjgl.opengl.GL11.*;
import static org.lwjgl.system.MemoryStack.stackPush;
import static org.lwjgl.system.MemoryUtil.NULL;

public class Main {

    private long window;
    private Camera camera;
    private float lastMouseX, lastMouseY;
    private boolean firstMouse = true;
    private boolean mousePressed = false;

    public void run() {
        init();
        loop();

        glfwFreeCallbacks(window);
        glfwDestroyWindow(window);

        glfwTerminate();
        Objects.requireNonNull(glfwSetErrorCallback(null)).free();
    }

    private void init() {
        GLFWErrorCallback.createPrint(System.err).set();

        if (!glfwInit()) {
            throw new IllegalStateException("Unable to initialize GLFW");
        }

        glfwDefaultWindowHints();
        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
        glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE);
        // Request a double-buffered window
        glfwWindowHint(GLFW_DOUBLEBUFFER, GL_TRUE);

        window = glfwCreateWindow(800, 600, "3D Scene", NULL, NULL);
        if (window == NULL) {
            throw new RuntimeException("Failed to create the GLFW window");
        }

        glfwSetCursorPosCallback(window, (window, xpos, ypos) -> {
            if (firstMouse) {
                lastMouseX = (float) xpos;
                lastMouseY = (float) ypos;
                firstMouse = false;
            }

            if (mousePressed) {
                float dx = (float) xpos - lastMouseX;
                float dy = (float) ypos - lastMouseY;
                lastMouseX = (float) xpos;
                lastMouseY = (float) ypos;

                camera.rotate(dy * 0.1f, dx * 0.1f);
            }
        });

        glfwSetMouseButtonCallback(window, (window, button, action, mods) -> {
            if (button == GLFW_MOUSE_BUTTON_LEFT) {
                if (action == GLFW_PRESS) {
                    mousePressed = true;
                    firstMouse = true; // Reset firstMouse when the button is pressed
                } else if (action == GLFW_RELEASE) {
                    mousePressed = false;
                }
            }
        });

        glfwSetScrollCallback(window, (window, xoffset, yoffset) -> {
            camera.zoom((float) yoffset * -0.5f);
        });

        glfwSetKeyCallback(window, (window, key, scancode, action, mods) -> {
            if (key == GLFW_KEY_ESCAPE && action == GLFW_RELEASE) {
                glfwSetWindowShouldClose(window, true);
            }
        });

        try (MemoryStack stack = stackPush()) {
            IntBuffer pWidth = stack.mallocInt(1);
            IntBuffer pHeight = stack.mallocInt(1);

            glfwGetWindowSize(window, pWidth, pHeight);

            GLFWVidMode vidmode = glfwGetVideoMode(glfwGetPrimaryMonitor());

            glfwSetWindowPos(
                    window,
                    (vidmode.width() - pWidth.get(0)) / 2,
                    (vidmode.height() - pHeight.get(0)) / 2
            );
        }

        glfwMakeContextCurrent(window);
        glfwSwapInterval(1);
        glfwShowWindow(window);

        GL.createCapabilities();

        glEnable(GL_DEPTH_TEST);

        // Set the projection matrix
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        float aspectRatio = 800.0f / 600.0f;
        glFrustum(-aspectRatio, aspectRatio, -1, 1, 1, 100);

        camera = new Camera(20, 30, 45);
    }

    private void loop() {
        // Set the clear color explicitly
        glClearColor(0.0f, 0.0f, 0.0f, 1.0f);

        while (!glfwWindowShouldClose(window)) {
            // Clear the color and depth buffers
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

            Matrix4f viewMatrix = camera.getViewMatrix();
            FloatBuffer fb = org.lwjgl.BufferUtils.createFloatBuffer(16);
            viewMatrix.get(fb);

            glMatrixMode(GL_MODELVIEW);
            glLoadMatrixf(fb);

            drawAxes();

            glfwSwapBuffers(window);
            glfwPollEvents();
        }
    }


    private void drawAxes() {
        glBegin(GL_LINES);

        // X axis in red
        glColor3f(1.0f, 0.0f, 0.0f);
        glVertex3f(-20.0f, 0.0f, 0.0f);
        glVertex3f(20.0f, 0.0f, 0.0f);

        // Y axis in green
        glColor3f(0.0f, 1.0f, 0.0f);
        glVertex3f(0.0f, -20.0f, 0.0f);
        glVertex3f(0.0f, 20.0f, 0.0f);

        // Z axis in yellow
        glColor3f(1.0f, 1.0f, 0.0f);
        glVertex3f(0.0f, 0.0f, -20.0f);
        glVertex3f(0.0f, 0.0f, 20.0f);

        glEnd();
    }

    public static void main(String[] args) {
        new Main().run();
    }
}

and Camera code :

package org.example;
import org.joml.Matrix4f;
import org.joml.Vector3f;

public class Camera {

private Vector3f position;
private float pitch;
private float yaw;
private float distance;

public Camera(float distance, float pitch, float yaw) {
    this.position = new Vector3f(0, 0, 0);
    this.pitch = pitch;
    this.yaw = yaw;
    this.distance = distance;
}

public Matrix4f getViewMatrix() {
    Matrix4f viewMatrix = new Matrix4f();
    viewMatrix.identity();

    // Calculate the camera position based on spherical coordinates
    float x = (float) (distance * Math.sin(Math.toRadians(pitch)) * Math.cos(Math.toRadians(yaw)));
    float y = (float) (distance * Math.cos(Math.toRadians(pitch)));
    float z = (float) (distance * Math.sin(Math.toRadians(pitch)) * Math.sin(Math.toRadians(yaw)));

    position.set(x, y, z);

    viewMatrix.lookAt(position, new Vector3f(0, 0, 0), new Vector3f(0, 1, 0));
    return viewMatrix;
}

public void rotate(float pitchDelta, float yawDelta) {
    this.pitch += pitchDelta;
    this.yaw += yawDelta;
}

public void zoom(float distanceDelta) {
    this.distance += distanceDelta;
    if (this.distance < 1.0f) {
        this.distance = 1.0f; // Prevent camera from getting too close
    }
}

public Vector3f getPosition() {
    return position;
}

public float getPitch() {
    return pitch;
}

public float getYaw() {
    return yaw;
}

public float getDistance() {
    return distance;
}


}

and pom.xml file is :

4.0.0
<groupId>org.example</groupId>
<artifactId>3dscene</artifactId>
<version>1.0-SNAPSHOT</version>

<properties>
    <maven.compiler.source>17</maven.compiler.source>
    <maven.compiler.target>17</maven.compiler.target>
</properties>

<dependencies>
    <!-- LWJGL dependencies -->
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl</artifactId>
        <version>3.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl-glfw</artifactId>
        <version>3.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl-opengl</artifactId>
        <version>3.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl-stb</artifactId>
        <version>3.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl</artifactId>
        <version>3.3.1</version>
        <classifier>natives-windows</classifier>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl-glfw</artifactId>
        <version>3.3.1</version>
        <classifier>natives-windows</classifier>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl-opengl</artifactId>
        <version>3.3.1</version>
        <classifier>natives-windows</classifier>
    </dependency>
    <dependency>
        <groupId>org.lwjgl</groupId>
        <artifactId>lwjgl-stb</artifactId>
        <version>3.3.1</version>
        <classifier>natives-windows</classifier>
    </dependency>

    <!-- JOML dependency -->
    <dependency>
        <groupId>org.joml</groupId>
        <artifactId>joml</artifactId>
        <version>1.10.5</version>
    </dependency>
</dependencies>

<repositories>
    <repository>
        <id>sonatype</id>
        <url>https://oss.sonatype.org/content/repositories/snapshots/</url>
    </repository>
</repositories>

Comparing GPT-4o vs Claude 3.5 Sonnet for Zero Shot Text Classification

Featured Imgs 23

On June 20, 2024, Anthropic released the Claude 3.5 sonnet large language model. Claude claims it to be the state-of-the-art model for many natural language processing tasks, surpassing the OpenAI GPT-4o model.

My first test for comparing two large language models is their zero-shot text classification ability. In this article, I will compare the Antropic Claude 3.5 sonnet with the OpenAI GPT-4o model for zero-shot tweet sentiment classification.

So, let's begin without ado.

Importing and Installing Required Libraries

The following script installs the Anthropic and OpenAI libraries to access the corresponding APIs.


!pip install anthropic
!pip install openai

The script below imports the required libraries into your Python application.


import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import anthropic
from openai import OpenAI

Importing and Preprocessing the Dataset

We will use the Twitter US Airline Sentiment dataset to perform zero-shot classification. You can download the dataset from Kaggle.

The following script imports the dataset into a Pandas DataFrame.

## Dataset download link
## https://www.kaggle.com/datasets/crowdflower/twitter-airline-sentiment?select=Tweets.csv

dataset = pd.read_csv(r"D:\Datasets\tweets.csv")
print(dataset.shape)
dataset.head()

Output:

image1.png

Tweet sentiment falls into three categories: neutral, positive, and negative. For comparison, we will filter 100 tweets. The neutral, positive, and negative categories will contain 34, 33, and 33 tweets, respectively.

# Remove rows where 'airline_sentiment' or 'text' are NaN
dataset = dataset.dropna(subset=['airline_sentiment', 'text'])

# Remove rows where 'airline_sentiment' or 'text' are empty strings
dataset = dataset[(dataset['airline_sentiment'].str.strip() != '') & (dataset['text'].str.strip() != '')]

# Filter the DataFrame for each sentiment
neutral_df = dataset[dataset['airline_sentiment'] == 'neutral']
positive_df = dataset[dataset['airline_sentiment'] == 'positive']
negative_df = dataset[dataset['airline_sentiment'] == 'negative']

# Randomly sample records from each sentiment
neutral_sample = neutral_df.sample(n=34)
positive_sample = positive_df.sample(n=33)
negative_sample = negative_df.sample(n=33)

# Concatenate the samples into one DataFrame
dataset = pd.concat([neutral_sample, positive_sample, negative_sample])

# Reset index if needed
dataset.reset_index(drop=True, inplace=True)

# print value counts
print(dataset["airline_sentiment"].value_counts())

Output:

image2.png

Zero Shot Text Classification with GPT-4o

We will first perform zero-shot text classification with GPT-4o. Remember to get your OpenAI API key before executing the following script.

The script below defines a function find_sentiment() that accepts the client and model parameters. The client corresponds to the client, i.e., anthropic or OpenAI, while the model corresponds to Claude 3.5 Sonnet or GPT-4o.

The find_sentiment() function iterates through all the tweets from the filtered dataset and predicts sentiment for each tweet. The predicted sentiments are then compared with the actual sentiment values to determine the model's accuracy.


def find_sentiment(client, model):

    tweets_list = dataset["text"].tolist()


    all_sentiments = []

    i = 0
    exceptions = 0
    while i < len(tweets_list):

        try:
            tweet = tweets_list[i]
            content = """What is the sentiment expressed in the following tweet about an airline?
            Select sentiment value from positive, negative, or neutral. Return only the sentiment value in small letters.
            tweet: {}""".format(tweet)

            sentiment_value = client.chat.completions.create(
                                  model= model,
                                  temperature = 0,
                                  max_tokens = 10,
                                  messages=[
                                        {"role": "user", "content": content}
                                    ]
                                ).choices[0].message.content

            all_sentiments.append(sentiment_value)
            i = i + 1
            print(i, sentiment_value)

        except Exception as e:
            print("===================")
            print("Exception occurred:", e)
            exceptions += 1

    print("Total exception count:", exceptions)
    accuracy = accuracy_score(all_sentiments, dataset["airline_sentiment"])
    print("Accuracy:", accuracy)

Next, we create an OpenAI client and pass the client object along with the gpt-4o model to predict sentiments.


%%time
client = OpenAI(
    # This is the default and can be omitted
    api_key = os.environ.get('OPENAI_API_KEY'),
)
model = "gpt-4o"

find_sentiment(client, model)

Output:

image3.png

The above output shows that GPT-4o achieved an accuracy of 76%.

Zero Shot Text Classification with Claude 3.5-Sonnet

Let's now predict the sentiment for the same set of tweets using the Claude 3.5 sonnet model. We define the find_sentiment_claude() function which is similar to the find_sentiment() function, but predicts sentiments using the Claude 3.5 sonnet model.


def find_sentiment_claude(client, model):

    tweets_list = dataset["text"].tolist()


    all_sentiments = []

    i = 0
    exceptions = 0
    while i < len(tweets_list):

        try:
            tweet = tweets_list[i]
            content = """What is the sentiment expressed in the following tweet about an airline?
            Select sentiment value from positive, negative, or neutral. Return only the sentiment value in small letters.
            tweet: {}""".format(tweet)

            sentiment_value = client.messages.create(
                                model= model,
                                max_tokens=10,
                                temperature=0.0,
                                messages=[
                                    {"role": "user", "content": content}
                                ]
                            ).content[0].text

            all_sentiments.append(sentiment_value)
            i = i + 1
            print(i, sentiment_value)

        except Exception as e:
            print("===================")
            print("Exception occurred:", e)
            exceptions += 1

    print("Total exception count:", exceptions)
    accuracy = accuracy_score(all_sentiments, dataset["airline_sentiment"])
    print("Accuracy:", accuracy)

The script below calls the find_sentiment_claude() function using the anthropic client and the claude-3-5-sonnet-20240620 (the id for the Claude 3.5 sonnet) model.


%%time
client = anthropic.Anthropic(
    # defaults to os.environ.get("ANTHROPIC_API_KEY")
    api_key = os.environ.get('ANTHROPIC_API_KEY')
)

model = "claude-3-5-sonnet-20240620"

find_sentiment_claude(client, model)

Output:

image4.png

The above output shows that the Claude 3.5 Sonnet model achieved an accuracy of 83%, which is significantly better than the GPT-4o model.

Final Verdict

Here is an overall comparison between the Claude Sonnet 3.5 and the GPT-4o model.

image5.png

Given the price, performance, and context window size, I prefer the Claude 3.5 Sonnet over other proprietary models such as GPT-4o.

Feel free to share your opinions. I would be very interested in any results you obtain using the two models.