Using Langchain, document retrieval and custom prompt

I have developed a module that uses langchain, a set of documents and a custom prompt for an AI chatbot. Below is the code for the ChatBot Class, and I am facing an error

ValueError: One output key expected, got dict_keys(['answer', 'source_documents'])

Can someone please help me resolve this error.

My Code:

class DocLLMBot():
    def __init__(self, document_dir) -> None:
        self.embeddings_model = HuggingFaceInstructEmbeddings(model_name="BAAI/bge-large-en-v1.5")
        self.llm_model = Ollama(model="llama2")
        self.documents = document_dir
        self.prompt_template = """You are given the following extracted parts of a long document and a question. Provide a conversational answer. If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer. If the question is not about the faculty, politely inform them that you are tuned to only answer questions about the faculty.
        Use the context for the answer: {context}
        Use chat history for the answer: {chat_history}
        Answer for the following question:
        Question: {question}"""
        self.qa_prompt = PromptTemplate(template=self.prompt_template, input_variables=["question", "context", "chat_history"])
    def load_documents(self):
        documents = []
        for file in os.listdir(self.documents):
            if file.endswith(".pdf"):
                pdf_path = self.documents + "/" + file
                loader = PyPDFLoader(pdf_path)
                # print(pdf_path)
                # print(loader.load())
            elif file.endswith('.docx') or file.endswith('.doc'):
                doc_path = self.documents + "/" + file
                loader = Docx2txtLoader(doc_path)
                # print(loader.load())
            elif file.endswith('.txt'):
                text_path = self.documents + "/" + file
                loader = TextLoader(text_path)
        self.vectorstore = Chroma.from_documents(documents, embedding=self.embeddings_model, persist_directory="data_mul_doc")
        self.vectorstore.persist() = ConversationalRetrievalChain.from_llm(
            self.vectorstore.as_retriever(search_kwargs={'k': 6}),
            memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True),
            combine_docs_chain_kwargs={"prompt": self.qa_prompt}
    def get_answer(self, query, chat_history):
        result =
            {"question": query, "chat_history": chat_history})
        return result["answer"]