Langchain chatbot in Streamlit is not sending feedback to LangSmith

Hi,
I have a streamline app with a chatbot which traces the conversation in LangSmith. However, I’m not able to make the code work so it sends back the feedback to LangSmith. Below the code:

import os
import streamlit as st
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.callbacks import collect_runs
from langsmith import Client
from streamlit_feedback import streamlit_feedback
from dotenv import load_dotenv
import uuid

# Load environment variables
load_dotenv()

@st.cache_resource
def create_chain():
    llm = ChatOpenAI(model='gpt-4o',temperature=0,openai_api_key=os.environ['OPENAI_API_KEY'],openai_organization=os.environ['OPENAI_ORGANIZATION'])
    # load documents
    documents = PyPDFDirectoryLoader('x/')
    #text_splitter = RecursiveCharacterTextSplitter(chunk_size=2500, chunk_overlap=200)
    text_splitter = CharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
    all_splits = text_splitter.split_documents(documents.load())
    # create vector DB
    vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings(chunk_size=1500))

    # Setup memory for conversation
    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

    # Create the RAG chain
    qa_chain = ConversationalRetrievalChain.from_llm(
        llm=llm,
        retriever=vectorstore.as_retriever(),
        memory=memory,
    )
    return qa_chain

def _submit_feedback(user_response, emoji=None):
    st.toast(f"Feedback submitted: {user_response}", icon=emoji)
    return user_response.update({"metadata": 123})

def handle_feedback_submission():
    feedback = st.session_state.get("feedback_data")
    run_id = st.session_state.run_id
    st.write(run_id)
    st.write(feedback)
    score_mappings = {
                    "😀": 1,
                    "🙂": 0.75,
                    "😐": 0.5,
                    "🙁": 0.25,
                    "😞": 0,
                }

    # Get the score mapping based on the selected feedback option
    score = score_mappings.get(feedback.get("score"))

    if score is not None:
        feedback_type_str = f"faces {feedback.get('score')}"
        try:
            feedback_record = client.create_feedback(
                run_id=run_id, #st.session_state.run_id, # "BG_chatbot",  # Replace with appropriate run ID if available
                feedback_type=feedback_type_str,
                score=score
    #                comment=feedback.get("text", "")
            )
            st.session_state.feedback = {
                "feedback_id": str(feedback_record.id),
                "score": score,
            }
            st.write(f"Feedback recorded with ID: {feedback_record.id}")
        except Exception as e:
            st.error(f"Failed to record feedback: {e}")
    else:
        st.warning("Invalid feedback score.")

client = Client()
chain = create_chain()

# User input for question
if "messages" not in st.session_state:
    st.session_state["messages"] = []

# Display chat history
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

user_input = st.chat_input("Stelle eine Frage...")

if user_input:
    st.session_state.messages.append({"role": "user", "content": user_input})
    with st.chat_message("user"):
        st.markdown(user_input)

    # Get the response from the chain
    with collect_runs() as cb:
        response = chain({"question": user_input})
        if cb.traced_runs:
            run_id = cb.traced_runs[0].id
            st.session_state.run_id = run_id
            st.write(f"Run ID: {run_id}")
        else:
            st.error("No runs collected")
            run_id = None

    answer = response['answer']

    st.session_state.messages.append({"role": "assistant", "content": answer})
    with st.chat_message("assistant"):
        st.markdown(answer)
    
    # Collect feedback for the response
    if answer is not None:
        feedback = streamlit_feedback(
            feedback_type="faces",
            key=f"feedback_{run_id}"
            #on_submit=lambda: handle_feedback_submission(run_id)
        )
        if feedback:
            st.session_state["feedback_data"] = feedback
            handle_feedback_submission()

Thanks in advance!