- My app is deployed on a remote server.
- http://27.18.114.16:23023/
- The code is as below:
import os
import re
import uuid
import time
import pytz
import streamlit as st
import mysql.connector
from openai import OpenAI
from trubrics_utils import trubrics_config
from trubrics.integrations.streamlit import FeedbackCollector
import json
import requests
from apig_sdk import signer
import pandas as pd
import urllib3
import sseclient
from datetime import datetime
urllib3.disable_warnings()
st.set_page_config(
page_title="Chip Expert",
page_icon='⚛️'
# layout="wide"
)
st.title("👨🔬 **Chip Expert**")
with st.sidebar:
name, email, password, feedback_component, feedback_type = trubrics_config(False)
st.divider()
MODELS = {
'Chip-Expert-Model': "chip-expert",
'GPT-3.5': "gpt-3.5-turbo",
'GPT-4': "gpt-4-1106-preview",
}
model = st.selectbox(
label='**Choose your model:**',
options=list(MODELS.keys()),
key='model'
)
if model == 'GPT-3.5':
selected_model = "gpt-3.5-turbo"
elif model == 'GPT-4':
selected_model = "gpt-4-1106-preview"
elif model == 'Chip-Expert-Model':
selected_model = "chip-expert"
DEFAULT_TEMPERATURE = 0.2
DEFAULT_TOP_K = 5
DEFAULT_TOP_P = 0.9
DEFAULT_MAX_SEQ_LEN = 512
DEFAULT_MAX_NEW_TOKENS = 1000
DEFAULT_REPETITION_PENALTY = 1.1
temperature = st.slider(
'**Temperature:**',
min_value=0.0,
max_value=1.0,
value=DEFAULT_TEMPERATURE,
step=0.05
)
top_n_tokens = st.slider(
'**Top K:**',
min_value=1,
max_value=10,
value=DEFAULT_TOP_K,
step=1
)
top_p = st.slider(
'**Top P:**',
min_value=0.01,
max_value=1.0,
value=DEFAULT_TOP_P,
step=0.05
)
# max_seq_len = st.slider(
# '**Max Sequence Length:**',
# min_value=64,
# max_value=4096,
# value=DEFAULT_MAX_SEQ_LEN,
# step=8
# )
max_new_tokens = st.slider(
'**Max New Tokens:**',
min_value=64,
max_value=4096,
value=DEFAULT_MAX_NEW_TOKENS,
step=8
)
repetition_penalty = st.slider(
'**Repetition Penalty:**',
min_value=0.1,
max_value=10.0,
value=DEFAULT_REPETITION_PENALTY,
step=0.1
)
do_sample = st.toggle("**Do Sample**", value=True)
stream = st.toggle("**Stream LLM response**", value=True)
# @st.cache_data
# def init_trubrics(email, password):
# try:
# collector = FeedbackCollector(email=email, password=password, project="chip-expert")
# return collector
# except Exception:
# st.error(f"Error authenticating '{email}' with [Trubrics](https://trubrics.streamlit.app/). Please try again.")
# st.stop()
# collector = init_trubrics(email, password)
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you? I'd be happy to answer your questions about integrated circuit."}]
if "prompt_ids" not in st.session_state:
st.session_state["prompt_ids"] = []
if "session_id" not in st.session_state:
st.session_state["session_id"] = str(uuid.uuid4())
tags = [f"llm_chatbot{'_stream' if stream else ''}.py"]
messages = st.session_state.messages
for n, msg in enumerate(messages):
st.chat_message(msg["role"]).write(msg["content"])
if msg["role"] == "assistant" and n > 1:
feedback_key = f"feedback_{int(n / 2)}"
if feedback_key not in st.session_state:
st.session_state[feedback_key] = None
# feedback = collector.st_feedback(
# component=feedback_component,
# feedback_type=feedback_type,
# open_feedback_label="[Optional] Provide additional feedback",
# model=model,
# tags=tags,
# key=feedback_key,
# prompt_id=st.session_state.prompt_ids[int(n / 2) - 1],
# user_id=name,
# )
# if feedback:
# with st.sidebar:
# st.write(":orange[Here's the raw feedback you sent to [Trubrics](https://trubrics.streamlit.app/):]")
# st.write(feedback)
mydb = mysql.connector.connect(
host="27.18.114.16",
port="23090",
user="root",
passwd="******",
database="qa"
)
mycursor = mydb.cursor()
st.session_state['time'] = None
st.session_state['option1'] = None
st.session_state['option2'] = None
st.session_state['suggestion'] = None
st.session_state['prompt'] = None
st.session_state['generation'] = None
if prompt := st.chat_input(placeholder="Ask your questions"):
st.session_state['prompt'] = prompt
messages.append({"role": "user", "content": "**" + prompt + "**"})
st.chat_message("user").write("**" + prompt + "**")
openai_client = OpenAI(
base_url='https://api.openai-proxy.org/v1',
api_key='sk-******',
)
with st.chat_message("assistant"):
if model == "GPT-3.5" or model == 'GPT-4':
if stream:
message_placeholder = st.empty()
generation = ""
for part in openai_client.chat.completions.create(model=selected_model, messages=messages, temperature=temperature, top_p=top_p, max_tokens=DEFAULT_MAX_SEQ_LEN, stream=True):
generation += part.choices[0].delta.content or ""
message_placeholder.markdown(generation + "▌")
message_placeholder.markdown(generation)
else:
with st.spinner("Thinking..."):
message_placeholder = st.empty()
generation = ""
response = openai_client.chat.completions.create(model=selected_model, messages=messages)
generation = response.choices[0].message.content
message_placeholder.markdown(generation)
messages.append({"role": "assistant", "content": generation})
else:
system_template = "Below is an instruction that describes a task, paired with an input that provides further " \
"context. Write a response that appropriately completes the request. " \
"Please note that you need to think through your response logically and step by step.\n\n"
dialog_template = "### Instruction:\n{instruction}\n\n### Response:"
template = system_template + dialog_template
url = "http://27.18.114.16:23100/generate_stream"
headers = {
"Accept": "application/json",
"Content-type": "application/json"
}
template = template.format(instruction=prompt)
data = {
"inputs": template,
"parameters": {
"details": False,
"do_sample": do_sample,
"repetition_penalty": repetition_penalty,
"return_full_text": False,
"seed": None,
"temperature": temperature,
"top_n_tokens": top_n_tokens,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
},
}
response = requests.post(url, headers=headers, data=json.dumps(data), timeout=None, stream=True)
message_placeholder = st.empty()
generation = ""
client = sseclient.SSEClient(response)
for event in client.events():
generation += json.loads(event.data)['token']['text']
message_placeholder.markdown(generation + "▌")
message_placeholder.markdown(generation)
# logged_prompt = collector.log_prompt(
# config_model={"model": model},
# prompt=prompt,
# generation=generation,
# session_id=st.session_state.session_id,
# tags=tags,
# user_id=email,
# )
# st.session_state.prompt_ids.append(logged_prompt.id)
messages.append({"role": "assistant", "content": generation})
st.session_state['generation'] = generation
beijing_tz = pytz.timezone('Asia/Shanghai')
current_time = datetime.now(beijing_tz)
# formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
st.session_state['time'] = current_time.strftime("%Y-%m-%d %H:%M:%S")
st.session_state['option1'] = "未知"
st.session_state['option2'] = "others"
st.session_state['suggestion'] = "null"
# option1 = "未知"
# option2 = "others"
# suggestion = "null"
default_new_sample = {
"Question": st.session_state['prompt'],
"Original Answer": st.session_state['generation'],
"Modified Answer": "null",
"Category": st.session_state['option1'],
"Judge": st.session_state['option2'],
"Suggestion": st.session_state['suggestion'],
"Timestamp": st.session_state['time']
}
sql = "INSERT INTO sheet1 (Timestamp, Question, Original_Answer, Modified_Answer, Category, Judge, Suggestion) VALUES (%s, %s, %s, %s, %s, %s, %s)"
val = (default_new_sample["Timestamp"], default_new_sample["Question"], default_new_sample["Original Answer"], default_new_sample["Modified Answer"], default_new_sample["Category"], default_new_sample["Judge"], default_new_sample["Suggestion"])
mycursor.execute(sql, val)
mydb.commit()
col1, col2, col3, col4, col5 = st.columns(5)
with col5:
with st.popover(":red[Feedback]"):
with st.form("my_form"):
col1, col2 = st.columns(2)
with col1:
st.session_state['option1'] = st.selectbox(
"Which subdirection does your problem belong to?",
("存内计算", "模拟电路", "射频", "数字电路", "SOC", "EDA", "功率半导体", "未知")
)
with col2:
st.session_state['option2'] = st.selectbox(
"What do you think of the answer to your question?",
("Good", "Information Loss", "Factual Error", "Repetition", 'Others')
)
st.session_state['suggestion'] = st.text_area("I think...")
def callback():
sql = "DELETE FROM sheet1 WHERE Timestamp = %s"
val = (st.session_state['time'],)
mycursor.execute(sql, val)
mydb.commit()
beijing_tz = pytz.timezone('Asia/Shanghai')
current_time = datetime.now(beijing_tz)
st.session_state['time'] = current_time.strftime("%Y-%m-%d %H:%M:%S")
feedback_new_sample = {
"Question": st.session_state['prompt'],
"Original Answer": st.session_state['generation'],
"Modified Answer": "null",
"Category": st.session_state['option1'],
"Judge": st.session_state['option2'],
"Suggestion": st.session_state['suggestion'],
"Timestamp": st.session_state['time']
}
sql = "INSERT INTO sheet1 (Timestamp, Question, Original_Answer, Modified_Answer, Category, Judge, Suggestion) VALUES (%s, %s, %s, %s, %s, %s, %s)"
val = (feedback_new_sample["Timestamp"], feedback_new_sample["Question"], feedback_new_sample["Original Answer"], feedback_new_sample["Modified Answer"], feedback_new_sample["Category"], feedback_new_sample["Judge"], feedback_new_sample["Suggestion"])
mycursor.execute(sql, val)
mydb.commit()
st.toast('Upload feedback successfully!', icon='✅')
st.form_submit_button("Submit", on_click=callback)
# st.rerun() # force rerun of app, to load last feedback componentdockdock
When I finish the operations in the form and click the form button, session_state[‘option1’], session_state[‘option2’] and session_state[‘suggestion’] are assigned with default value.
5. streamlit>=1.28 and python=3.9