Trouble Initializing st.session_state.chat_history

If you’re creating a debugging post, please include the following info:

  1. Are you running your app locally or is it deployed? locally
  2. If your app is deployed:
    a. Is it deployed on Community Cloud or another hosting platform?
    b. Share the link to the public deployed app.
  3. Share the link to your app’s public GitHub repository (including a requirements file).
  4. Share the full text of the error message (not a screenshot). During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File “/Users/neilmcdevitt/VSCode Projects/app.py”, line 90, in
for message in st.session_state.chat_history:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/Users/neilmcdevitt/anaconda3/lib/python3.11/site-packages/streamlit/runtime/state/session_state_proxy.py”, line 121, in getattr
raise AttributeError(_missing_attr_error_message(key))
AttributeError: st.session_state has no attribute “chat_history”. Did you forget to initialize it? More info: Add statefulness to apps - Streamlit Docs
(base) neilmcdevitt@MacBook-Pro-2 VSCode Projects %
5. Share the Streamlit and Python versions.

streamlit version 1.32.1
python version 3.11.7

1 Like

Try to define your session variable first before using it.

if 'chat_history' not in st.session_state:
    st.session_state.chat_history = []
1 Like

this is exctly what i’ve done.

Here is my code, not sure where I went wrong here:

import os
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain_core.output_parsers import StrOutputParser
from langchain_experimental.text_splitter import SemanticChunker
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationChain
from langchain_core.messages import HumanMessage, AIMessage
from apikey import openai_api_key

os.environ[‘OPENAI_API_KEY’] = openai_api_key

Load data

loader = PyPDFLoader(“/Users/neilmcdevitt/VSCode Projects/Cashvertising-Free-PDF-Book.pdf”)
pages = loader.load_and_split()

text_splitter = SemanticChunker(
OpenAIEmbeddings(), breakpoint_threshold_type=“percentile”
)

Embeddings

embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(pages, embeddings)

def retrieve_info(query):
similar_response = db.similarity_search(query, k=3)
page_contents_array = [doc.page_content for doc in similar_response]
print(page_contents_array)
return page_contents_array

LLM model and memory

llm = ChatOpenAI(temperature=.2, model=“gpt-4-turbo-preview”, max_tokens=650)
memory = ConversationBufferWindowMemory(k=5)
conversation = ConversationChain(
llm=llm, verbose=True, memory=memory
)

Display

if “chat_history” not in st.session_state:
st.session_state.chat_history =

st.title(“Cashvertising”)

Function to format chat history for the template

def format_chat_history(chat_history):
formatted_history = “”
for message in chat_history:
if isinstance(message, HumanMessage):
formatted_history += f"You: {message.content}\n"
elif isinstance(message, AIMessage):
formatted_history += f"AI: {message.content}\n"
return formatted_history

Function to get response

def get_response(query, chat_history):
formatted_chat_history = format_chat_history(chat_history)
template = f"“”
Your specialized prompt template here…

Chat history: {formatted_chat_history}

User question: {query}
"""

prompt = ChatPromptTemplate.from_template(template)
chain = prompt | llm | StrOutputParser()

return chain.invoke({
    "chat_history": formatted_chat_history,
    "user_question": query
})

Conversation display

for message in st.session_state[‘chat_history’]:
if isinstance(message, HumanMessage):
with st.chat_message(“Human”):
st.markdown(message.content)
else:
with st.chat_message(“AI”):
st.markdown(message.content)

User input

user_query = st.chat_input(“your message”)
if user_query is not None and user_query != “”:
human_message = HumanMessage(user_query)
st.session_state[‘chat_history’].append(human_message)

ai_response = get_response(user_query, st.session_state['chat_history'])
ai_message = AIMessage(ai_response.content if ai_response else "I'm not sure, could you rephrase?")

st.session_state['chat_history'].append(ai_message)

with st.chat_message("AI"):
    st.markdown(ai_message.content)
1 Like
import os
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain_core.output_parsers import StrOutputParser
from langchain_experimental.text_splitter import SemanticChunker
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationChain
from langchain_core.messages import HumanMessage, AIMessage
from apikey import openai_api_key

os.environ['OPENAI_API_KEY'] = openai_api_key

# Load data
loader = PyPDFLoader("/Users/neilmcdevitt/VSCode Projects/Cashvertising-Free-PDF-Book.pdf")
pages = loader.load_and_split()

text_splitter = SemanticChunker(
    OpenAIEmbeddings(), breakpoint_threshold_type="percentile"
)

# Embeddings
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(pages, embeddings)

def retrieve_info(query):
    similar_response = db.similarity_search(query, k=3)
    page_contents_array = [doc.page_content for doc in similar_response]
    print(page_contents_array)
    return page_contents_array

# LLM model and memory
llm = ChatOpenAI(temperature=.2, model="gpt-4-turbo-preview", max_tokens=650)
memory = ConversationBufferWindowMemory(k=5)
conversation = ConversationChain(
    llm=llm, verbose=True, memory=memory
)

# Display
if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

st.title("Cashvertising")

# Function to format chat history for the template
def format_chat_history(chat_history):
    formatted_history = ""
    for message in chat_history:
        if isinstance(message, HumanMessage):
            formatted_history += f"You: {message.content}\n"
        elif isinstance(message, AIMessage):
            formatted_history += f"AI: {message.content}\n"
    return formatted_history

# Function to get response
def get_response(query, chat_history):
    formatted_chat_history = format_chat_history(chat_history)
    template = f"""
    Your specialized prompt template here...

    Chat history: {formatted_chat_history}

    User question: {query}
    """
    
    prompt = ChatPromptTemplate.from_template(template)
    chain = prompt | llm | StrOutputParser()
    
    return chain.invoke({
        "chat_history": formatted_chat_history,
        "user_question": query
    })

# Conversation display
for message in st.session_state['chat_history']:
    if isinstance(message, HumanMessage):
        with st.chat_message("Human"):
            st.markdown(message.content)
    else:
        with st.chat_message("AI"):
            st.markdown(message.content)

# User input
user_query = st.chat_input("your message")
if user_query is not None and user_query != "":
    human_message = HumanMessage(user_query)
    st.session_state['chat_history'].append(human_message)
    
    ai_response = get_response(user_query, st.session_state['chat_history'])
    ai_message = AIMessage(ai_response.content if ai_response else "I'm not sure, could you rephrase?")
    
    st.session_state['chat_history'].append(ai_message)

    with st.chat_message("AI"):
        st.markdown(ai_message.content)```
1 Like

Thanks for sharing formatted code! I’m not seeing anything in your code that would explain why that error is raised (what you’re doing to initially populate chat_history should work great), and I noticed that the line numbers are slightly different than what was shown in your error message, so perhaps the code you pasted is slightly different from the code that raised the error.

Could you please simplify your code down to the bare minimal code which:

  1. Is testable by others on the forum (meaning doesn’t actually need secrets or local files)
  2. Still shows the error

For example, you could remove all the langchain stuff, and just try to get user input and then print out the chat history. If that doesn’t still show the error, than you could add things in one piece at a time until the error pops up again, and that should be very helpful for you or others trying to diagnose the error.

1 Like

Its working like this.

import streamlit as st
from dotenv import load_dotenv

load_dotenv()

if “chat_history” not in st.session_state:
st.session_state.chat_history =

st.set_page_config(page_title=“bot”)

st.title(“bot”)

user_query = st.chat_input(“message here”)
if user_query is not None and user_query != “”:
st.session_state.chat_history.append(user_query)

with st.chat_message("Human"):
    st.markdown(user_query)
    
with st.chat_message("AI"):
    ai_response = "i don't know"
    st.markdown(ai_response)
1 Like
import streamlit as st
from dotenv import load_dotenv



load_dotenv()

if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

st.set_page_config(page_title="bot")

st.title("bot")

user_query = st.chat_input("message here")
if user_query is not None and user_query != "":
    st.session_state.chat_history.append(user_query)
    
    with st.chat_message("Human"):
        st.markdown(user_query)
        
    with st.chat_message("AI"):
        ai_response = "i don't know"
        st.markdown(ai_response)
1 Like
import streamlit as st
import os
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.output_parsers import StrOutputParser
from apikey import openai_api_key
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

os.environ['OPENAI_API_KEY']=openai_api_key

if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

st.set_page_config(page_title="bot")

st.title("bot")

# get response
def get_response(query, chat_history):
  template = """
        You're a marketing and business startup genius. You specialize in helping others grow
        their business by teaching with systems and frameworks. Let's have a back and forth conversationgoing over how
        to grow a niche community subscription revenue business using organic social media marketing and paid advertising.
        The goal is to grow my revenue while keeping membership churn as low
        as possible.

        Chat history {chat_history}
        User question: {message}
        """
  prompt = ChatPromptTemplate.from_template(template)
    
  llm = ChatOpenAI()
  
  chain = prompt | llm | StrOutputParser
  
  return chain.invoke({
      "chat_history": chat_history,
      "message": query
    })


#conversation
for message in st.session_state.chat_history:
    if isinstance(message, HumanMessage):
        with st.chat_message("Human"):
            st.markdown(message.content)
    else:
        with st.chat_message("AI"):
            st.markdown(message.content)
            
user_query = st.chat_input("message here")
if user_query is not None and user_query != "":
     st.session_state.chat_history.append(HumanMessage(user_query))
    
     with st.chat_message("Human"):
          st.markdown(user_query)
        
     with st.chat_message("AI"):
         ai_response = get_response(user_query, st.session_state.chat_history)
         st.markdown(ai_response)
        
     st.session_state.chat_history.append(AIMessage(ai_response))

#NOW I’M GETTING THIS ERROR WITH THIS CODE.

1 Like

The error is at get_response(query, chat_history).

First fix your template, use the f string properly and use the query parameter.

def get_response(query, chat_history):
    template = f"""
        You're a marketing and business startup genius. You specialize in helping others grow
        their business by teaching with systems and frameworks. Let's have a back and forth conversationgoing over how
        to grow a niche community subscription revenue business using organic social media marketing and paid advertising.
        The goal is to grow my revenue while keeping membership churn as low
        as possible.

        Chat history {chat_history}
        User question: {query}
        """

Second instantiate the class StrOutputParser.

parser = StrOutputParser()

Third define the chain with parser.

chain = prompt | llm | parser

That worked from my test.

Debugged function

def get_response(query, chat_history):
    template = f"""
        You're a marketing and business startup genius. You specialize in helping others grow
        their business by teaching with systems and frameworks. Let's have a back and forth conversationgoing over how
        to grow a niche community subscription revenue business using organic social media marketing and paid advertising.
        The goal is to grow my revenue while keeping membership churn as low
        as possible.

        Chat history {chat_history}
        User question: {query}
        """
    
    try:
        prompt = ChatPromptTemplate.from_template(template)
    except Exception as err:
        raise Exception(f'1: {err}')

    try:
        llm = ChatOpenAI()
    except Exception as err:
        raise Exception(f'2: {err}')
    
    parser = StrOutputParser()  
    chain = prompt | llm | parser

    try:
        gr = chain.invoke({
            "chat_history": chat_history,
            "message": query
        })
    except Exception as err:
        raise Exception(f'3: {err}')

    return gr
2 Likes

Thank You so Much!

1 Like

Now running into a new error, I’ve tried a few different solutions and asked gpt 4 for help but can’t find an answer.
Will provide code below. Thank you!

1 Like
import streamlit as st
import os
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.output_parsers import StrOutputParser
from apikey import openai_api_key
from langchain_core.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
from langchain_core.documents import Document
from langchain_community.document_loaders import PyPDFLoader
from langchain.chains import create_retrieval_chain 
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import ConversationChain
from langchain_community.vectorstores import FAISS

os.environ['OPENAI_API_KEY']=openai_api_key

if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

st.set_page_config(page_title="bot")

st.title("bot")

# Doc loader
loader = PyPDFLoader("/Users/neilmcdevitt/VSCode Projects/Cashvertising-Free-PDF-Book.pdf")
docs = loader.load_and_split()

# Embeddings
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)

text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vectorstore = FAISS.from_documents(documents, embeddings)



def retrieve_info(query):
   similar_response = db.similarity_search(query, k=3)
   page_contents_array = [doc.page_content for doc in similar_response]
   print(page_contents_array)
   return page_contents_array



# LLM model and memory
llm = ChatOpenAI(temperature=0, model="gpt-4-turbo-preview", max_tokens=450)
window_memory = ConversationBufferWindowMemory(k=5)
conversation_with_summary = ConversationChain(
   llm=llm, verbose=True, memory=window_memory
)

# get response
def get_response(user_question, chat_history):
  template = f"""
        You're a marketing and business expert that helps users grow revenue for their subscription
        revenue community business.
        Your specialties are paid advertising and organic marketing on social media, integrating 
        feedback loops to improve product and improve marketing efficiencies, and reducing membership churn.
        Based on the User question, the Chat history, and the PDF document provided give your best response 
        in natural language.

        Chat history: {chat_history}
        User question: {user_question}
        
        """
  try:
        prompt = ChatPromptTemplate.from_template(template)
        document_chain = create_stuff_documents_chain(llm, prompt)
  except Exception as err:
        raise Exception(f'1: {err}')

  try:
        llm = ChatOpenAI()
  except Exception as err:
        raise Exception(f'2: {err}')
    
  parser = StrOutputParser()  
  chain = prompt | llm | parser

  retriever = vectorstore.as_retriever()
  retrieval_chain = create_retrieval_chain(retriever, document_chain)

  try:
        gr = retrieval_chain.invoke({
            "chat_history": chat_history,
            "user_question": user_question
        })
  except Exception as err:
        raise Exception(f'3: {err}')

  return gr



#conversation
for message in st.session_state.chat_history:
    if isinstance(message, HumanMessage):
        with st.chat_message("Human"):
            st.markdown(message.content)
    else:
        with st.chat_message("AI"):
            st.markdown(message.content)
            
user_query = st.chat_input("message here")
if user_query is not None and user_query != "":
     st.session_state.chat_history.append(HumanMessage(user_query))
    
     with st.chat_message("Human"):
          st.markdown(user_query)
        
     with st.chat_message("AI"):
         ai_response = get_response(user_query, st.session_state.chat_history)
         st.markdown(ai_response)
        
     st.session_state.chat_history.append(AIMessage(ai_response))

1 Like

Call llm first because document_chain needs llm.

try:
    llm = ChatOpenAI()
except Exception as err:
    raise Exception(f'2: {err}')

try:
    prompt = ChatPromptTemplate.from_template(template)
    document_chain = create_stuff_documents_chain(llm, prompt)
except Exception as err:
    raise Exception(f'1: {err}')

That gave me this error.

This topic was automatically closed 2 days after the last reply. New replies are no longer allowed.