here is the entire code that works:
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from dotenv import load_dotenv
from pytesseract import image_to_string
from PIL import Image
from io import BytesIO
import fitz
import streamlit as st
import multiprocessing
from tempfile import NamedTemporaryFile
import pandas as pd
import json
load_dotenv()
1. Convert PDF file into images via pypdfium2
def convert_pdf_to_images(pdf_path):
pdf_document = fitz.open(pdf_path)
images =
for page_num in range(pdf_document.page_count):
page = pdf_document[page_num]
image_list = page.get_images(full=True)
for img_index, img in enumerate(image_list):
xref = img[0]
base_image = pdf_document.extract_image(xref)
image = Image.open(BytesIO(base_image["image"]))
images.append(image)
pdf_document.close()
return images
2. Extract text from images via pytesseract
def extract_text_from_img(image_list):
image_content = []
for index, image in enumerate(image_list):
raw_text = str(image_to_string(image))
image_content.append(raw_text)
return "\n".join(image_content)
def extract_content_from_url(url: str):
images_list = convert_pdf_to_images(url)
text_with_pytesseract = extract_text_from_img(images_list)
return text_with_pytesseract
3. Extract structured info from text via LLM
def extract_structured_data(content: str, data_points):
llm = ChatOpenAI(temperature=0, model=“gpt-3.5-turbo-0613”)
template = “”"
You are an expert admin people who will extract core information from documents
{content}
Above is the content; please try to extract all data points from the content above
and export in a JSON array format:
{data_points}
Now please extract details from the content and export in a JSON array format,
return ONLY the JSON array:
"""
prompt = PromptTemplate(
input_variables=["content", "data_points"],
template=template,
)
chain = LLMChain(llm=llm, prompt=prompt)
results = chain.run(content=content, data_points=data_points)
return results
4. Send data to make.com via webhook
5. Streamlit app
def main():
default_data_points = “”“{
“invoice_item”: “what is the item that charged”,
“Amount”: “how much does the invoice item cost in total”,
“Company_name”: “company that issued the invoice”,
“invoice_date”: “when was the invoice issued”,
}”“”
st.set_page_config(page_title="Doc extraction", page_icon=":bird:")
st.header("Doc extraction :bird:")
data_points = st.text_area(
"Data points", value=default_data_points, height=170)
uploaded_files = st.file_uploader(
"upload PDFs", accept_multiple_files=True)
if uploaded_files is not None and data_points is not None:
results = []
for file in uploaded_files:
with NamedTemporaryFile(delete=False, suffix='.csv') as f:
f.write(file.getbuffer())
content = extract_content_from_url(f.name)
print(content)
data = extract_structured_data(content, data_points)
json_data = json.loads(data)
if isinstance(json_data, list):
results.extend(json_data) # Use extend() for lists
else:
results.append(json_data) # Wrap the dict in a list
if len(results) > 0:
try:
df = pd.DataFrame(results)
st.subheader("Results")
st.data_editor(df)
except Exception as e:
st.error(
f"An error occurred while creating the DataFrame: {e}")
st.write(results) # Print the data to see its content
if name == ‘main’:
multiprocessing.freeze_support()
main()
what i did was use PyMuPdf instead of pdfium an d some other changes. try it out. I have removed the entire xero integration section. I do not need that.