Unable to incorporate GPT4ALL with streamlit conversational elements

Error:
ttributeError: ‘dict’ object has no attribute ‘encode’

Traceback:

File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 552, in _run_script
    exec(code, module.__dict__)File "C:\Users\Samraat\Downloads\docs1\mentalapp.py", line 21, in <module>
    result=llm.generate([{"role": "assistant", "content":prompt}])
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\llms\base.py", line 225, in generate
    output = self._generate_helper(
             ^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\llms\base.py", line 176, in _generate_helper
    raise eFile "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\llms\base.py", line 163, in _generate_helper
    self._generate(File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\llms\base.py", line 523, in _generate
    self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\llms\gpt4all.py", line 208, in _call
    for token in self.client.generate(prompt, **params):
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\gpt4all\gpt4all.py", line 182, in generate
    return self.model.prompt_model(prompt, streaming=streaming, **generate_kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\Samraat\AppData\Local\Programs\Python\Python311\Lib\site-packages\gpt4all\pyllmodel.py", line 203, in prompt_model
    prompt = prompt.encode('utf-8')
             ^^^^^^^^^^^^^

Code:import streamlit as st
from langchain.llms import GPT4All

PATH = ‘C:/Users/Samraat/AppData/Local/nomic.ai/GPT4All/orca-mini-3b.ggmlv3.q4_0.bin’
llm = GPT4All(model=PATH, verbose=False,streaming=True,n_batch=8,temp=0.8)

if “messages” not in st.session_state:
st.session_state.messages =

for message in st.session_state.messages:
with st.chat_message(message.get(“role”)):
st.write(message.get(“content”))

prompt = st.chat_input(“Enter your concern”)

if prompt:
st.session_state.messages.append({“role”: “user”, “content”:prompt})
with st.chat_message(“user”):
st.write(prompt)

result=llm.generate([{"role": "assistant", "content":prompt}])
response= result["choices"][0]["message"]["content"]

st.session_state.messages.append({"role": "assistant", "content": response})  

with st.chat_message("assistant"):
        st.markdown(response)

versions :
python - 3.11
streamlit- 1.24

hi @samraat_sapehia try this

if prompt:
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.write(prompt)

    result = llm.generate([{"role": "user", "content": prompt}])
    response = result["choices"][0]["message"]["content"]

    st.session_state.messages.append({"role": "assistant", "content": response})

    with st.chat_message("assistant"):
        st.markdown(response)

This topic was automatically closed 180 days after the last reply. New replies are no longer allowed.