I done an POC while i am testing with a test case in terminal i am getting like this:
import streamlit as st
import pandas as pd
import google.generativeai as genai
import os
import ssl
import certifi
context = ssl.create_default_context(cafile=certifi.where())
Load API Key securely
API_KEY = os.getenv(“GEMINI_API_KEY”) # Ensure the API key is set in environment variables
Function to generate prompts using LLM (Google Gemini)
def generate_prompts(text):
if not API_KEY:
return [“Error: API Key is missing. Set the GEMINI_API_KEY environment variable.”]
# Configure Generative AI with API Key
genai.configure(api_key=API_KEY)
model = genai.GenerativeModel("gemini-2.0-flash")
try:
# Generate response
response = model.generate_content(text)
if response and hasattr(response, "text"):
return [response.text]
else:
return ["Error: No response from LLM."]
except Exception as e:
return [f"LLM Error: {str(e)}"]
Function to analyze Excel and generate LLM-based prompts
def analyze_excel_with_llm(file):
try:
df = pd.read_excel(file)
if df.empty:
return ["Error: Uploaded Excel file is empty."]
column_names = df.columns.tolist() # Extract column names
summary = df.describe(include="all").to_string() if not df.select_dtypes(include=['object']).empty else df.info()
# Construct prompt for LLM
llm_prompt = f"""
Given the following dataset structure, generate 5-7 insightful data analysis questions.
The dataset contains the following columns:
{column_names}
Summary Statistics:
{summary}
"""
return generate_prompts(llm_prompt)
except Exception as e:
return [f"Error processing file: {str(e)}"]
Streamlit UI
st.title(“Prompt Generator”)
uploaded_file = st.file_uploader(“Upload your Excel file”, type=[“xlsx”])
if uploaded_file:
prompts = analyze_excel_with_llm(uploaded_file)
if prompts:
st.write("### Generated Prompts:")
for prompt in prompts:
st.write(f"- {prompt}")
else:
st.write("No prompts generated. Please upload a valid Excel file.")