Getting Chatgpt to read a dataframe by using Langchain

Hello,

My main purpose is having chat gpt to read a dataframe.

Can i do it using langchain? and if i can any codes or document source you giving is more than appreciated.

I have just started from here Quickstart | πŸ¦œοΈπŸ”— Langchain
and here
Chat with pandas DataFrames using LLMs

this is the piece of code written by chat gpt. I couldnt check if it is working

import openai
import streamlit as st
import pandas as pd

from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_types import AgentType

from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationEntityMemory
from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback

import os

# Set your OpenAI API key here
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key

# Load the DataFrame
yol = 'C:\\Users\\Ozgur\\AppData\\Local\\Programs\\Python\\Python39\\titanic.xlsx'
df = pd.read_excel(yol)

# Initialize conversation with an introduction
conversation = []

# Create a Streamlit app
st.title("DataFrame Chatbot")
st.write("You can chat with the DataFrame chatbot. Type 'exit' to end the conversation.")

user_input = st.text_input("You: ")

if user_input:
    if user_input.lower() == "exit":
        st.write("Bot: Goodbye!")
    else:
        # Extend the conversation history with the user's input
        conversation.append({"role": "user", "content": user_input})

        # Initialize OpenAI model
        llm = OpenAI(temperature=0, openai_api_key=api_key, model_name='gpt-3.5-turbo', verbose=True)

        # Ask a question to the bot
        agent = create_pandas_dataframe_agent(
            ChatOpenAI(temperature=0, model="gpt-3.5-turbo"),
            df,
            verbose=True,
            agent_type=AgentType.OPENAI_FUNCTIONS,
        )

        bot_response = agent.run(conversation)

        st.write("Bot:", bot_response)

        if 'entity_memory' not in st.session_state:
            st.session_state.entity_memory = ConversationEntityMemory(llm=llm, k=8)

        # Create the ConversationChain object with the specified configuration
        Conversation = ConversationChain(
            llm=llm,
            prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,
            memory=st.session_state.entity_memory
        )

this is a better version of the code:

import streamlit as st
import pandas as pd
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain.agents.agent_types import AgentType
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI


   
if 'messages' not in st.session_state:
   st.session_state['messages'] = []

# Display chat history
for message in st.session_state['messages']:
   with st.chat_message(message.get("role")):
       st.write(message.get("content"))

yol = 'C:\\Users\\ozgur.dugmeci\\AppData\\Local\\Programs\\Python\\Python39\\titanic.xlsx'
df = pd.read_excel(yol)

prompt = st.chat_input('Say something')

if prompt:
   # Add to the storage
   st.session_state['messages'].append({"role": "user", "content": prompt})

   # Display conversation
   with st.chat_message("user"):
       st.write(prompt)

   # Bot answering
   api_key = 'xx'
   llm = ChatOpenAI(temperature=0.6, model="gpt-3.5-turbo", openai_api_key=api_key)

   # Use the conversational memory
   
   if 'conversation_memory' not in st.session_state:
    st.session_state['conversation_memory'] = ConversationBufferMemory(memory_key="chat_history")

   agent = create_pandas_dataframe_agent(llm, df,handle_parsing_errors=True,
                                         verbose=True, agent_type=AgentType.OPENAI_FUNCTIONS)
   response = agent.run(prompt)

   st.session_state['messages'].append({"role": "assistant", "content": response})
   with st.chat_message("assistant"):
       st.write(response)