How to change the position of the response from server, not just only display in sequence

,I would like the backend responses to be displayed immediately below my frontend input entries. For example, if I enter two commands in the frontend and the backend provides two responses, I want the first response to appear below the first command。I didn’t manage to get an immediate response displayed on the screen in the frontend because my backend processing time is quite long. I entered the second command before the backend had finished processing the first one, so the code would jump back to if prompt := st.chat_input(): and execute again. Therefore, I later used a queue to store all backend replies and then output the contents of the queue one by one. However, I don’t want them to be in sequential order. Instead, I want the first reply corresponding to the first command entered by the frontend to be inserted after the first command, and the reply to the second command to be inserted after the second command, achieving a QAQA pattern, not QQAA.

and Here is my code:

from datetime import datetime
from openai import OpenAI
import streamlit as st
from backend import process_response
from concurrent.futures import ThreadPoolExecutor, as_completed
import queue
import asyncio
import json
import threading
import time
import requests
import aiohttp
result = None

# 创建一个队列来存储响应
if "response_queue" not in st.session_state:
    st.session_state.response_queue = queue.Queue()

if "executor" not in st.session_state:
    st.session_state.executor = ThreadPoolExecutor(max_workers=16)  # 设置线程池的最大工作线程数

def stream_data(response):
    for char in response:
        yield char
        time.sleep(0.05)  # 可以添加小延迟,使输出更平滑

        
        
def call_your_backend_api(user_input, response_queue):
    try:
        url = "http://localhost:5000/api/agent"
        headers = {"Content-Type": "application/json"}
        data = json.dumps({"user_input": user_input})
        
        response = requests.post(url, headers=headers, data=data)
     
        
        response = json.loads(response.text)
        response_queue.put({"response": response, "time": datetime.now()})
        
        return response
    except requests.RequestException as e:
        st.error(f"与后端通信时发生错误: {str(e)}")
        response_queue.put({"response": "抱歉,我现在无法回答。请稍后再试。", "time": datetime.now()})




with st.sidebar:
    openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
    "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
    "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
    "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"


st.title("🤖 Kaos小助手")
st.caption("🚀 Powered by Kaos")
if "messages" not in st.session_state:
    
    st.session_state["messages"] = [{"role": "assistant", "content": "我是Kaos小助手,可以给你提供帮助喔!"}]

for msg in st.session_state.messages:
    st.chat_message(msg["role"]).markdown(msg["content"])



def main():
# 前端输入部分
    
    if prompt := st.chat_input():
        with st.container():
            str_temp = {"role": "user", "content": prompt, "time": str(datetime.now())}
            st.session_state.messages.append(str_temp)
            st.chat_message("user").markdown(prompt)
            
            # user_prompt_index = st.session_state.messages.index(str_temp)
            
        # with st.spinner("管理智能体正在分配任务..."):
        with st.status(label="Kaos正在执行中...",state='running') as status:
            thread = threading.Thread(target=call_your_backend_api, args=(prompt, st.session_state.response_queue), )
            thread.start()
            
            thread.join()
            status.update(label="正在执行任务!", state='complete')

        while not st.session_state.response_queue.empty():
            response_data = st.session_state.response_queue.get()
            response_data = response_data["response"]
            agent = response_data["agent"]
            response = response_data["response"]
            
            msg = process_response(response)
            if agent == "meeting_agent" or agent == "application_agent" or agent == "ppt_agent" or agent == "writing_agent":
                st.session_state.messages.append({"role": "assistant","avatar":"icon/{agent}.png" ,"content": msg})
                with st.status(f"{agent}正在执行中...") as status:
                    st.chat_message("assistant", avatar=f"icon/{agent}.png").write_stream(stream_data(msg))
                    
                    
                    status.update(label=f"{agent}执行完毕!")
                    
                    
            
            
            time.sleep(5)
        
if __name__ == "__main__":
    main()

Hey @kai_mao, it sounds like you have started to use Streamlit :raised_hands:
Based on the title, it sounds like what you want is the following:

import streamlit as st

input1 = st.text_input("enter something")
st.write(input1)

input2 = st.text_input("enter something else")
st.write(input2)

May I ask you to update the title of your post which is pretty long and unreadable (and cutoff in the end) to a shorter one and move the title’s content to the actual post, please? :slightly_smiling_face:

ok, thank u. I modify my question.please take a look at my question during your leisure time.thanks very much.

Blockquote

It sounds like you might need to have a counter or something like this, which you use to associate the prompt with and also pass to your call_your_backend_api to associate the response with. So, perhaps the way would be to change st.session_state.messages to be a dict instead of a list.
The following shows in a shortened manner what I mean:

if "messages" not in st.session_state:
  # map of prompt_id (counter) to list of messages belonging to this prompt; usually one user prompt and X agent response messages.
  st.session_state["messages"]: dict[int, list[dict]] = {}
  st.session_state.prompt_counter = 0

for _, messages in st.session_state.messages.items():
  for msg in messages:
    st.chat_message(msg["role"]).markdown(msg["content"])

if prompt := st.chat_input():
  str_temp = {"role": "user", "content": prompt, "time": str(datetime.now())}
  st.session_state.messages[st.session_state.prompt_counter] = [str_temp]
  
  thread = threading.Thread(target=call_your_backend_api, args=(prompt, st.session_state.response_queue, st.session_state.prompt_counter), )
  response_data = st.session_state.response_queue.get()
  msg = process_response(response_data)

  # msg.prompt_counter is the value of 'st.session_state.prompt_counter' passed to the thread above
  st.session_state.messages[msg.prompt_counter].append({"role": "assistant","avatar":"icon/{agent}.png" ,"content": msg})

  # next prompt gets a new counter
  st.session_state.prompt_counter = st.session_state.prompt_counter + 1

I think something like this should work :thinking:

This topic was automatically closed 180 days after the last reply. New replies are no longer allowed.