Hi guys,
I’m making a chatbot app, i wanted to know why during a second request of having a new plot through a request made by submitted a query in athena, the chatbot the first time made an obscured duplicated image? After this, if i replicate the problem disappear:
Thanks for the answer tony!
Yes, the major issue is the ghost visualization when loading the first new one image request. Later if I request 2 or 3 times later, the problem doesn’t appear.
The piece of code:
# session state, to initialize the chatbot giving a starter message at the first app run
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "Hello 👋🏻"}]
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
if "image" in message:
components.html(message['image'], height=500, width=600)
# React to user input
if prompt := st.chat_input("Message here.."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
st.chat_message("user").write(prompt)
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner('I\'m generating the response...'):
response_payload = generate_response_llm(prompt)
if response_payload['plot_flg']:
description = response_payload['res_chain']
msg = f"{description}"
# settings aws
bucket_name = 'example-of-bucket-name'
key = 'imagegenerate.html'
local_file_path = 'imagegenerate.html'
# Try Catch file
try:
s3.download_file(bucket_name, key, local_file_path)
except Exception as e:
print(f"Error {e}")
st.write(msg)
file_open = open(local_file_path).read()
components.html(file_open, height=500, width=600)
st.session_state.messages.append({"role": "assistant", "content": msg, "prompt":prompt, "image":file_open})
else:
# Text Response
response = f"{response_payload['res_chain']}"
# Display assistant response in chat message container
st.write(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
the response is generated using some LLMs in Bedrock and it gives text and image.
Actually, i think is a streamlit problem with the connection with AWS, but i’m not sure.
Thank you for the extra details. The issue might be related to how the app is handling the session state and the dynamic HTML content from AWS. One potential solution you could try is by creating an empty placeholder initially and then populate it with content. Here’s an example using the code you provided:
# Initialize an empty placeholder for the image
# outside the loop
placeholder = st.empty()
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
if "image" in message:
# Use the placeholder to display the image
placeholder.html(message['image'], height=500, width=600)
I saw, that the main problem is this one but I cannot use .html() 'cause it is not supported as you can see in the previous message. I cannot consider to use .markdown() 'cause it reproduce the html tags of the image so mmm idk what i need to do
EDIT 12:26pm:
I changed in this way:
placeholder = st.empty()
with placeholder.container():
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
if "image" in message:
components.html(message['image'], height=500, width=600)
and leave as default, with components the next input, but doesn’t changed we have the same problem in the opposite side:
# session state, to initialize the chatbot giving a starter message at the first app run
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "Hello 👋🏻"}]
placeholder = st.empty()
with placeholder.container():
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
if "image" in message:
components.html(message['image'], height=500, width=600)
# React to user input
if prompt := st.chat_input("Message here.."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner('I\'m generating the response...'):
response_payload = generate_response_llm(prompt)
if response_payload['plot_flg']:
description = response_payload['res_chain']
msg = f"{description}"
# settings aws
bucket_name = 'example-of-bucket-name'
key = 'imagegenerate.html'
local_file_path = 'imagegenerate.html'
# Try Catch file
try:
s3.download_file(bucket_name, key, local_file_path)
except Exception as e:
print(f"Error {e}")
st.write(msg)
file_open = open(local_file_path).read()
components.html(file_open, height=500, width=600)
st.session_state.messages.append({"role": "assistant", "content": msg, "prompt":prompt, "image":file_open})
else:
# Text Response
response = f"{response_payload['res_chain']}"
# Display assistant response in chat message container
st.write(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})