I have an app that i just deployed and it works but one part of the app doesn’t work. In short its a custom ai connected SQL agent and based on user input ai classifies it and routers to trigger the SQL agent.
In the cloud deployed app when I ran spesifc input to trigger SQL its loading +15min and no response.
but I ran locally as well and got final response very fast and everything how should be. I’m confused.
when it crashed after multiple mints +30min I got this
TransportError: Failed to retrieve
http://metadata.google.internal/computeMetadata/v1/universe/universe-domain from
the Google Compute Engine metadata service. Compute Engine Metadata server
unavailable due to HTTPConnectionPool(host=‘metadata.google.internal’, port=80):
Max retries exceeded with url: /computeMetadata/v1/universe/universe-domain
(Caused by ConnectTimeoutError(<urllib3.connection.HTTPConnection object at
0x7f5892d88140>, ‘Connection to metadata.google.internal timed out. (connect
timeout=120)’))
here is the part of the code that calls the SQL agent
@tool
def execute_sql_query(query: str) → str:
“”“Runs an SQL query, formats results using AI, and returns a clean response.”“”
# ✅ Load credentials from Streamlit Secrets
credentials_info = st.secrets["GOOGLE_CREDENTIALS"]
credentials = service_account.Credentials.from_service_account_info(credentials_info)
PROJECT_ID = "mogulmedia"
DATASET_ID = "mm"
database_uri = f"bigquery://{PROJECT_ID}/{DATASET_ID}?dialect=standard"
# ✅ Ensure correct table access
allowed_tables = ["rpt_tweets", "raw_profiles"]
# ✅ Initialize BigQuery Client
client = bigquery.Client(credentials=credentials, project=PROJECT_ID)
dataset_ref = client.dataset(DATASET_ID, project=PROJECT_ID)
tables = list(client.list_tables(dataset_ref))
available_tables = {table.table_id for table in tables}
# ✅ Validate Tables
if "rpt_tweets" not in available_tables:
return "❌ Error: `rpt_tweets` table/view not found in BigQuery!"
# ✅ Initialize SQL Agent (NO NEED FOR `bigquery_langchain_agent.py`)
sql_database = SQLDatabase.from_uri(
database_uri,
include_tables=allowed_tables,
view_support=True,
)
llm = ChatOpenAI(model_name="gpt-4-turbo", temperature=0)
sql_agent = initialize_sql_agent(llm, sql_database) # ✅ Initialize SQL Agent
# ✅ Run the query
result = sql_agent.invoke(query)
if not result:
return "⚠️ No results found."
# ✅ Convert SQL result into a structured text format for AI to reformat
if isinstance(result, list) and all(isinstance(row, tuple) for row in result):
raw_text = "\n".join(
[
f"{idx + 1}. Tweet ID: {row[0]} | Text: {row[1]} | Likes: {row[2]}"
for idx, row in enumerate(result)
]
)
elif isinstance(result, dict):
raw_text = "\n".join([f"{key}: {value}" for key, value in result.items()])
else:
raw_text = str(result) # ✅ Fallback
# ✅ Ask AI to reformat & present results nicely
format_prompt = f"""
You are an AI that structures and presents SQL results cleanly.
**Raw SQL Output:**
{raw_text}
**Instructions:**
- Format the output in a user-friendly manner based on user input.
- Tweet text might come with grammar, unstructured mistakes, and some links. Format it and fix that for readability.
- Keep it concise but structured.
- Ensure readability.
**Final Output:**
"""
formatted_result = llm.invoke(format_prompt).content.strip()
return formatted_result