Opencv with Streamlit issue on the output

Summary

Im trying to build app using streamlitt and open cv
the functions is totally fine and worked on my notebook but when i try to use it on the app.py (streamlit app)
don’t know what happened I traced the code and i found that one the opencv functions “bitwise_and” when the code reached it
no output and i cant print or show anything out of it

Steps to reproduce

Code snippet:

import base64
import re
import uuid
from pathlib import Path

import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
import streamlit as st
import streamlit.components.v1 as components
import streamlit.components.v1 as stc
from keras.optimizers import Adam
from PIL import Image
from st_click_detector import click_detector

np.random.seed(2)
import itertools
import os
import random

import cv2
import cv2 as intpaint
import tensorflow as tf
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
from PIL import Image, ImageChops, ImageEnhance
from pylab import *
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split

sns.set(style='white', context='notebook', palette='deep')
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
#All Functions
@st.experimental_memo
def get_img_as_base64(file):
    with open(file, "rb") as f:
        data = f.read()
    return base64.b64encode(data).decode()
def make_grid(cols,rows):
    grid = [0]*cols
    for i in range(cols):
        with st.container():
            grid[i] = st.sidebar.columns(rows)
    return grid
@st.cache
def convert_to_ela_image(path, quality):
    temp_filename = 'temp_file_name.jpg'
    ela_filename = 'temp_ela.png'
    
    image = Image.open(path).convert('RGB')
    image.save(temp_filename, 'JPEG', quality = quality)
    temp_image = Image.open(temp_filename)
    
    ela_image = ImageChops.difference(image, temp_image)
    
    extrema = ela_image.getextrema()
    max_diff = max([ex[1] for ex in extrema])
    if max_diff == 0:
        max_diff = 1
    scale = 255.0 / max_diff
    
    ela_image = ImageEnhance.Brightness(ela_image).enhance(scale)
    
    return ela_image
def prepare_image(image_path):
    image_size = (128, 128)
    return np.array(convert_to_ela_image(image_path, 90).resize(image_size)).flatten() / 255.0
def read_imgs(path):
    path = real
    for dirname, _, filenames in os.walk(path):
        for filename in filenames:
            if filename.endswith(('jpg', 'png','tif')):
                full_path = os.path.join(dirname, filename)
                X.append(prepare_image(full_path))
                Y.append(1)
                if len(Y) % 500 == 0:
                    print(f'Processing {len(Y)} images')

    random.shuffle(X)
    return X, Y
def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')



def apply_mask_to_image(image_path, uploaded_files):
    # Find mask image in the same directory as the input image
    mask_filename = os.path.splitext(image_path)[0] + ".mask.png"
    # st.write("mask file name:", mask_filename)
    
    # Iterate through the uploaded files and search for the desired mask file
    matching_file = None
    for uploaded_file in uploaded_files:
        if mask_filename.endswith(uploaded_file.name):
            matching_file = uploaded_file
            # st.write("matching file:", matching_file)
            image = Image.open(uploaded_file)
            # st.image(image, caption='Mask Image', use_column_width=True)
            break

    if matching_file is not None:
        # Load mask image
        original_image = np.array(image)
        mask = original_image
        return mask, matching_file
    else:
        return None, None

def show_images(org_img_path, mask, inpainted_img):
    org_img = plt.imread(org_img_path)
    org_img_bgr = cv2.cvtColor(org_img, cv2.COLOR_RGB2BGR)
    mask, _ = apply_mask_to_image(org_img_path)
    masked_img = cv2.bitwise_and(org_img_bgr, org_img_bgr, mask=cv2.bitwise_not(mask))
    
    st.subheader("ORIGINAL")
    st.image(org_img_bgr, channels="BGR", use_column_width=True)
    
    st.subheader("MASKED")
    st.image(masked_img, channels="BGR", use_column_width=True)
    
    st.subheader("INPAINTED")
    st.image(inpainted_img, channels="BGR", use_column_width=True)


def Inpaint(filepath):
    img = Image.open(filepath)
    img = np.array(img)
    img = cv2.resize(img, None, fx=0.2, fy=0.2)
    mask, _ = apply_mask_to_image(filepath, file2)
    try:
        mask = cv2.resize(mask, (img.shape[1], img.shape[0]))
        _, mask1 = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY_INV)
        kernel = np.ones((10, 10), np.uint8)
        mask1=cv2.dilate(mask, kernel, iterations=1)
        mask1=cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        mask1=cv2.medianBlur(mask, 5)
        mask1=cv2.bitwise_not(mask).astype(np.uint8)
        st.text("Displaying mask image for debugging")
        st.image(mask1, caption='Distorted Image', channels="RGB")
        distort = cv2.bitwise_and(img,img, mask=mask1)
        st.text("Displaying distort image for debugging")
        st.image(distort, caption='Distorted Image', channels="RGB")
        show_images(filepath, mask, img)
        distort = distort * np.expand_dims(mask1, axis=2)
        
        # st.text("Displaying distort image for debugging")
        # st.image(distort, caption='Distorted Image', channels="RGB")
        restored1 = img.copy()
        cv2.inpaint(distort, mask1, restored1, cv2.INPAINT_FSR_FAST)

        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)
            color = (255, 0, 0)
            thickness = 5
            cv2.rectangle(restored1, (x, y), (x + w, y + h), color, thickness)
    except:
        st.error("No Mask Image found")

        
def load_model():
  model=tf.keras.models.load_model('model1.h5')
  return model
def Inference(Image, model):
    class_names = ['fake', 'real']
    image = prepare_image(Image)
    image = image.reshape(-1, 128, 128, 3)
    y_pred = model.predict(image)
    y_pred_class = np.argmax(y_pred, axis=1)[0]
    confidence = np.amax(y_pred) * 100
    ela_image = convert_to_ela_image(Image, 150)
    # Return the class name, confidence, and ELA image
    return class_names[y_pred_class], confidence, ela_image
footer="""
Used Tools for this Project: OpenCV, TensorFlow, Keras, Pillow, Streamlit 

The source code is available on [GitHub]().
"""
# img = get_img_as_base64("gedo.png")
page_bg_img = f"""
<style>
[data-testid="stAppViewContainer"] > .main {{ 
background-position: center;
# background-repeat: repeat;
background-attachment: local;
}}
[data-testid="stToolbar"] {{
right: 2rem;
}}
</style>
""" 
HTML_WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem">{}</div>"""
st.markdown(page_bg_img, unsafe_allow_html=True)
st.title("Fake Image Detector","\n")
st.sidebar.image("career/cs.png",use_column_width=True)
add_select = st.sidebar.radio(
        "Choose one of the following:",
        ("Detector", "Inpainting")
    )
st.sidebar.image("career/acu.png", use_column_width=True)
st.sidebar.caption(footer,unsafe_allow_html=False) 
mygrid_logos = make_grid(4,4)
mygrid_logos[1][0].image("tools/open.png",width=100,use_column_width=True)
mygrid_logos[1][1].image("Streamlit.png",width=50, use_column_width=True)
mygrid_logos[1][2].image("tools/tfp.png",width=160 )
file2=st.file_uploader("", type=["png"],accept_multiple_files=True)
def main0():    
    file = st.file_uploader("", type=["jpg", "png", "jpeg", "jfif", "bmp", "tiff", "tif"])
    if file is not None:
    # Save the uploaded file to a temporary location
        new=file.name
        with open(new, "wb") as f:
            f.write(file.read())
        # Get the file path of the temporary file
        file_path = new
    model=load_model()
    if add_select == "Detector":
        if file is None:
            st.text("Please upload an image file")
        else:
            # Load the selected image using Pillow
            image = Image.open(file)
            st.image(image, use_column_width=True)
            # Call the Inference function to classify the image and get the confidence score
            image_class, confidence, ela_image = Inference(file, model)
            # Print the class and confidence score
            st.write("The image is classified as", image_class)
            st.write("The Confidence score is approximately", np.round(confidence,2))
            st.write("path",file)
    elif add_select == "Inpainting":
            image = Image.open(file)
            st.image(image, use_column_width=True)
            # Call the Inference function to classify the image and get the confidence score
            image_class, confidence, ela_image = Inference(file, model)
            # Print the class and confidence score
            st.write("The image is classified as", image_class)
            st.write("The Confidence score is approximately", np.round(confidence,2))
            if image_class == 'fake':

                Inpaint(file_path)
            else:
                st.write("No Parts to be Inpainted The image is real")
if __name__ == '__main__':
    main0()


If applicable, please provide the steps we should take to reproduce the error or specified behavior.

Expected behavior:

the expected output should be the Image , masked image , inpainted image with bound box on the inpainted part

Actual behavior:

I dont know what happened when the code reach bitwise_and and i cant see the img

Debug info

  • Streamlit version:1.11.0
  • Python version: 3.9.7
  • Using Vs code
  • OS version: Windows 10 Pro ver: 22H2
    os build :19045.2965
  • Browser version:Microsoft Edge Version 114.0.1823.37 (Official build) (64-bit)

Requirements file

Using Conda? PipEnv? PyEnv? Pex? Share the contents of your requirements file here.
Not sure what a requirements file is? Check out this doc and add a requirements file to your app.

Links

  • Link to your GitHub repo:
  • Link to your deployed app:

Additional information

If needed, add any other context about the problem here.