I have created a sign language recognition CNN model. i want to deploy it into web. it is working perfectly in localhost. for that i tried uploading the model and weight (.json and .h5 file) file into github. but my model (.h5 file) is 49 mb and github allows only a maximum of 25 mb. so i have uploaded the .json and .h5 into a google cloud storage bucket. i created a service account and made bucket and added the file into it. i dont know what to do next. please help me how to deploy the website into streamlit. the code is given below. also modify the codes so thati can run it as a webpage.
import os
import cv2
import pyttsx3
t2s = pyttsx3.init()
import numpy as np
import streamlit as st
import mediapipe as mp
from matplotlib import pyplot as plt
from keras.models import model_from_json
from google.cloud import storage
Authenticate to GCP (Google Cloud Platform)
os.environ[âGOOGLE_APPLICATION_CREDENTIALSâ] = â#########.jsonâ
Your GCP bucket name
bucket_name = âtest_aslâ
Download model and weight files from Google Cloud Storage
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blob_model = bucket.blob(âsignlanguagedetectionmodel4.jsonâ)
blob_weights = bucket.blob(âsignlanguagedetectionmodel4.h5â)
blob_model.download_to_filename(âsignlanguagedetectionmodel4.jsonâ)
blob_weights.download_to_filename(âsignlanguagedetectionmodel4.h5â)
Load the downloaded model and weights
json_file = open(âsignlanguagedetectionmodel4.jsonâ, ârâ)
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights(âsignlanguagedetectionmodel4.h5â)
def extract_features(image):
feature = np.array(image)
feature = feature.reshape(1, 48, 48, 1)
return feature / 255.0
previous_prediction = None
st.title("test Sign Language ")
frame_placeholder = st.empty()
stop_button_pressed = st.button(âStopâ)
cap = cv2.VideoCapture(0)
label = [âAâ, âBâ, âCâ, âDâ, âEâ, âFâ, âGâ, âHâ, âIâ, âJâ, âKâ, âLâ, âblankâ]
while True:
_, frame = cap.read(-1)
cv2.rectangle(frame, (0, 40), (300, 300), (0, 165, 255), 1)
cropframe = frame[40:300, 0:300]
cropframe = cv2.cvtColor(cropframe, cv2.COLOR_BGR2GRAY)
cropframe = cv2.resize(cropframe, (48, 48))
cropframe = extract_features(cropframe)
pred = model.predict(cropframe)
prediction_label = label[pred.argmax()]
cv2.rectangle(frame, (0, 0), (300, 40), (0, 165, 255), -1)
if prediction_label == 'blank':
cv2.putText(frame, " ", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
else:
accu = "{:.2f}".format(np.max(pred) * 100)
cv2.putText(frame, f'{prediction_label} {accu}%', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
if prediction_label != previous_prediction:
t2s.say(prediction_label)
t2s.runAndWait()
previous_prediction = prediction_label
frame_placeholder.image(frame, channels="BGR")
if stop_button_pressed:
break
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()