import streamlit as st
from streamlit_webrtc import webrtc_streamer
import av
import cv2
import numpy as np
import mediapipe as mp
from keras.models import load_model
import ytm
import sys
import random
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import statistics
from statistics import mode
import os
path = os.path.dirname(__file__)
model = load_model(os.path.join(path,"model_4.h5"))
label = np.load(os.path.join(path,"labels.npy"))
holistic = mp.solutions.holistic
hands = mp.solutions.hands
holis = holistic.Holistic()
drawing = mp.solutions.drawing_utils
st.header("Emotion Based Music Recommender")
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
if "run" not in st.session_state:
st.session_state["run"] = "true"
try:
emotion = np.load(os.path.join(path,"emotion.npy"))[0]
except:
emotion=""
if not(emotion):
st.session_state["run"] = "true"
else:
st.session_state["run"] = "false"
class EmotionProcessor:
def recv(self, frame):
frm = frame.to_ndarray(format="bgr24")
##############################
frm = cv2.flip(frm, 1)
res = holis.process(cv2.cvtColor(frm, cv2.COLOR_BGR2RGB))
lst = []
if res.face_landmarks:
for i in res.face_landmarks.landmark:
lst.append(i.x - res.face_landmarks.landmark[1].x)
lst.append(i.y - res.face_landmarks.landmark[1].y)
if res.left_hand_landmarks:
for i in res.left_hand_landmarks.landmark:
lst.append(i.x - res.left_hand_landmarks.landmark[8].x)
lst.append(i.y - res.left_hand_landmarks.landmark[8].y)
else:
for i in range(42):
lst.append(0.0)
if res.right_hand_landmarks:
for i in res.right_hand_landmarks.landmark:
lst.append(i.x - res.right_hand_landmarks.landmark[8].x)
lst.append(i.y - res.right_hand_landmarks.landmark[8].y)
else:
for i in range(42):
lst.append(0.0)
lst = np.array(lst).reshape(1,-1)
pred = label[np.argmax(model.predict(lst))]
cv2.putText(frm, pred, (50,50),cv2.FONT_ITALIC, 1, (255,0,0),2)
np.save("emotion.npy", np.array([pred]))
##############################
return av.VideoFrame.from_ndarray(frm, format="bgr24")
webrtc_streamer(key="key", desired_playing_state=True,
video_processor_factory=EmotionProcessor,
media_stream_constraints={"video": True, "audio": False})
def most_common(List):
return(mode(List))
def song_url(song_id: str) -> str:
return ytm.utils.url_ytm('watch', params = {'v': song_id})
def search(query: str) -> str:
k = random.randint(0,10)
return song_url(ytm.YouTubeMusic().search_songs(query)['items'][k]['id'])
class play:
def __init__():
global driver
def close():
driver.close()
def play_music():
driver = webdriver.Chrome(options=options)
driver.get(p)
driver.implicitly_wait(3)
try:
element_present = EC.presence_of_element_located((By.ID, "video-title"))
WebDriverWait(driver, 10).until(element_present)
except:
st.write("Timed out waiting for page to load")
return []
driver.find_element(By.XPATH, '//*[@id="play-pause-button"]').click()
time.sleep(30)
for i in range(0,1):
driver.find_element(By.XPATH, '//*[@id="left-controls"]/div/tp-yt-paper-icon-button[5]').click()
time.sleep(150)
if btn2:
pl.close()
driver.close()
emo = emotion
btn1 = st.button("Start")
btn2 = st.button("Stop")
pl=play
x = EmotionProcessor
if btn1 and emo != 'happy':
while emo != 'happy' or not btn2:
emo = ''
l=[]
p = search('top random')
print(p)
pl.play_music()
if btn2:
pl.close()
break
for i in range(1,20):
EmotionProcessor
emotion = np.load("emotion.npy")[0]
l.append(emotion)
emo = most_common(l)
if emo == 'happy':
pl.close()
I am fairly new to streamlit and this is my first app to launch.
Can anyone please help me to publish this app.
I have also pasted the error log screenshot