In the code below, I want the styled_paragraph
to stay, while I Q and A using the chat elements. But it is not staying. As soon as I ask a question, the paragraph disappears.
if key_validated:
# Initialize the documents in memory
with st.spinner(text="Initializing the bot. Hang tight!."):
document_db = load_data()
# instantiate the database retriever
retriever = document_db.as_retriever(search_type="similarity", search_kwargs={"k": 3})
# instantiate the large language model
llm = AzureChatOpenAI(
openai_api_type=OPENAI_API_TYPE,
deployment_name=DEPLOYMENT_NAME,
model_name=DEPLOYMENT_NAME,
temperature=0.0
)
template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Give detailed answers.
{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
)
police_report_prompt = """
Summarize my report in plain English
"""
if st.button("Police Report Summary"):
with st.spinner("Thinking..."):
response = qa_chain({"query": report_prompt})
# Using HTML tags to style the paragraph
styled_paragraph = f"""
<div style="background-color: #f5f5f5; padding: 20px; border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);">
<h2 style="color: #333;">My Report</h2>
<p style="font-size: 18px; line-height: 1.6; color: #666;">{response["result"]}</p>
</div>
"""
# Display the styled paragraph using Streamlit
st.markdown(styled_paragraph, unsafe_allow_html=True)
# Prompt for user input and display message history
if prompt := st.chat_input(
"Your question here ..",
disabled=not openai.api_key): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Pass query to chat engine and display response
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = qa_chain({"query": prompt})
st.markdown(response["result"])
message = {"role": "assistant", "content": response["result"]}
st.session_state.messages.append(message) # Add response to message history