,I would like the backend responses to be displayed immediately below my frontend input entries. For example, if I enter two commands in the frontend and the backend provides two responses, I want the first response to appear below the first command。I didn’t manage to get an immediate response displayed on the screen in the frontend because my backend processing time is quite long. I entered the second command before the backend had finished processing the first one, so the code would jump back to if prompt := st.chat_input():
and execute again. Therefore, I later used a queue to store all backend replies and then output the contents of the queue one by one. However, I don’t want them to be in sequential order. Instead, I want the first reply corresponding to the first command entered by the frontend to be inserted after the first command, and the reply to the second command to be inserted after the second command, achieving a QAQA pattern, not QQAA.
and Here is my code:
from datetime import datetime
from openai import OpenAI
import streamlit as st
from backend import process_response
from concurrent.futures import ThreadPoolExecutor, as_completed
import queue
import asyncio
import json
import threading
import time
import requests
import aiohttp
result = None
# 创建一个队列来存储响应
if "response_queue" not in st.session_state:
st.session_state.response_queue = queue.Queue()
if "executor" not in st.session_state:
st.session_state.executor = ThreadPoolExecutor(max_workers=16) # 设置线程池的最大工作线程数
def stream_data(response):
for char in response:
yield char
time.sleep(0.05) # 可以添加小延迟,使输出更平滑
def call_your_backend_api(user_input, response_queue):
try:
url = "http://localhost:5000/api/agent"
headers = {"Content-Type": "application/json"}
data = json.dumps({"user_input": user_input})
response = requests.post(url, headers=headers, data=data)
response = json.loads(response.text)
response_queue.put({"response": response, "time": datetime.now()})
return response
except requests.RequestException as e:
st.error(f"与后端通信时发生错误: {str(e)}")
response_queue.put({"response": "抱歉,我现在无法回答。请稍后再试。", "time": datetime.now()})
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🤖 Kaos小助手")
st.caption("🚀 Powered by Kaos")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "我是Kaos小助手,可以给你提供帮助喔!"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).markdown(msg["content"])
def main():
# 前端输入部分
if prompt := st.chat_input():
with st.container():
str_temp = {"role": "user", "content": prompt, "time": str(datetime.now())}
st.session_state.messages.append(str_temp)
st.chat_message("user").markdown(prompt)
# user_prompt_index = st.session_state.messages.index(str_temp)
# with st.spinner("管理智能体正在分配任务..."):
with st.status(label="Kaos正在执行中...",state='running') as status:
thread = threading.Thread(target=call_your_backend_api, args=(prompt, st.session_state.response_queue), )
thread.start()
thread.join()
status.update(label="正在执行任务!", state='complete')
while not st.session_state.response_queue.empty():
response_data = st.session_state.response_queue.get()
response_data = response_data["response"]
agent = response_data["agent"]
response = response_data["response"]
msg = process_response(response)
if agent == "meeting_agent" or agent == "application_agent" or agent == "ppt_agent" or agent == "writing_agent":
st.session_state.messages.append({"role": "assistant","avatar":"icon/{agent}.png" ,"content": msg})
with st.status(f"{agent}正在执行中...") as status:
st.chat_message("assistant", avatar=f"icon/{agent}.png").write_stream(stream_data(msg))
status.update(label=f"{agent}执行完毕!")
time.sleep(5)
if __name__ == "__main__":
main()