User-provided prompt
if prompt := st.chat_input():
st.session_state.messages.append({“role”: “user”, “content”: prompt})
with st.chat_message(“user”):
st.session_state.user_prompt_count += 1
st.write(prompt)
# Add a button to allow the user to stop generation
if st.button("Stop Generation"):
st.session_state.generation_enabled = False
Generate a new response if last message is not from assistant
if st.session_state.messages[-1][“role”] != “assistant” and st.session_state.generation_enabled:
with st.chat_message(“assistant”):
with st.spinner(“Thinking…”):
response = generate_llama2_response(prompt)
placeholder = st.empty()
full_response = ''
if "error" in response:
# Handle the error case
error_message = response["error"]
# Display the error message in a red background
st.markdown(f'<div style="background-color: #ff0000; color: #ffffff; padding: 10px;">Error: {error_message}</div>', unsafe_allow_html=True)
else:
for item in response:
# Convert the dictionary to a JSON-formatted string
item_str = json.dumps(item, indent=2)
full_response += item_str
# Use st.json to display the JSON data
placeholder.json(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message)