How to stop callback class based from runing on streamlit webrtc

Hi
im new to this amazing solution
and im making face recognition web based using streamlit
so im susing streamlit and its amazing component streamlit_web_rtc
so io created a class that made the face recogntion but i want to auto stop the clallbacks for recv() funtion when tthe person recognised
so how i can do it ?
thank you
my code and please dont focus on the face detection and recogntion because its only a part of a project :

class VideoProcessor(VideoProcessorBase):

        def __init__(self):
            # self.model_lock = threading.Lock()
            self.camera_frame=''
            self.returned_image=''
            self.get_ready=180
            self.result_face=''
            self.glasses = cv2.resize(cv2.imread('images/glasses.png', cv2.IMREAD_UNCHANGED),(30,30))  # IMREAD_UNCHANGED => open image with the alpha channel
            self.mask = cv2.resize(cv2.imread('images/mask1.png', cv2.IMREAD_UNCHANGED),(30,30))  # IMREAD_UNCHANGED => open image with the alpha channel
            self.my_play_state=True
        def add_rect(self,frame):
            x, y, w, h = 5,5, frame.shape[1]-10, int(frame.shape[0]*12/100)
            sub_img = frame[y:y+h, x:x+w]
            white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255

            res = cv2.addWeighted(sub_img, 0.5, white_rect, 0.5, 1.0)

            # Putting the image back to its position
            frame[y:y+h, x:x+w] = res
            return frame
        def write_on_image(self,im,text,color,thikness=2,zone=(10,50),flip=True):
            im = imutils.resize(im, width=720)
            if flip :
                im = cv2.flip(im, 1) 
            cv2.putText(im,text,zone,cv2.FONT_HERSHEY_DUPLEX,1,color,thikness)
            return im
        def insert_glasses_and_masks(self,background,over,emp1,emp2,emp3,emp4):
            
            alpha_channel = over[:, :, 3] / 255 # convert from 0-255 to 0.0-1.0
            overlay_colors = over[:, :, :3]
            alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))


            h, w = over.shape[:2]
            background_subsection = background[emp1:emp2,emp3:emp4]

            # combine the background with the overlay image weighted by alpha
            composite = background_subsection * (1 - alpha_mask) + overlay_colors * alpha_mask

            # overwrite the section of the background image that has been updated
            background[emp1:emp2,emp3:emp4] = composite
            return background
                        
        
        def process(self,frame):
                self.camera_frame = frame.to_ndarray(format="bgr24")
                copy=self.camera_frame
                self.camera_frame=self.add_rect(self.camera_frame)
                if self.get_ready>50 and self.get_ready<=180 :
                    self.get_ready-=1
                    # self.returned_image=self.write_on_image(self.camera_frame,'prepare your self', color=(0,0,255),thikness=2,zone=(30,70))
                    self.returned_image=self.write_on_image(self.camera_frame,'Retirer les lunettes et masques', color=(0,0,255),thikness=2,zone=(10,40))
                    self.returned_image=self.insert_glasses_and_masks(self.returned_image,self.mask,15,45,530,560) 
                    self.returned_image=self.insert_glasses_and_masks(self.returned_image,self.glasses,15,45,590,620)
                elif self.get_ready<=50 and self.get_ready>0  :
                    self.get_ready-=1
                    self.returned_image=self.write_on_image(self.camera_frame,'Rester stable', color=(238,151,30),thikness=2,zone=(10,40))
                elif self.get_ready==0   :
                    self.returned_image=self.write_on_image(self.camera_frame,'Rester stable', color=(238,151,30),thikness=2,zone=(10,40))
                    try :
                        self.gray_frame = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
                        self.gray_frame= cv2.cvtColor(self.gray_frame,cv2.COLOR_GRAY2RGB)
                        self.gray_front = cv2.cvtColor(self.front_card_id_croped, cv2.COLOR_BGR2GRAY)
                        self.gray_front = cv2.cvtColor(self.gray_front ,cv2.COLOR_GRAY2RGB)
                        self.result_face=DeepFace.verify(img1_path =self.gray_frame, 
                                img2_path = self.gray_front,
                                detector_backend='ssd',
                                distance_metric=  'euclidean_l2',
                                model_name = 'Facenet512')['verified']
                        self.get_ready-=1
                    except : 
                        self.result_face=False
                        self.get_ready-=1
                        self.returned_image =self.returned_image=self.write_on_image(self.camera_frame,"vous n'etes pas le proprietaire de la carte", color=(0,0,255), zone=(10,40))
                elif self.result_face :

                    self.returned_image =self.returned_image=self.write_on_image(self.camera_frame,"Le proprietaire de la carte", color=(67,181,75), zone=(10,40))
                    self.my_play_state=False
                    
                elif not self.result_face :

                    self.returned_image =self.returned_image=self.write_on_image(self.camera_frame,"vous n'etes pas le proprietaire de la carte", color=(0,0,255), zone=(10,40))
                return self.returned_image
        
        def run(self,img_front):
                self.front_card_id_croped=img_front
             
            
                    
        def recv(self, frame):
            if self.my_play_state:
                return av.VideoFrame.from_ndarray(self.process(frame), format="bgr24")
            else :
                self.returned_image =self.returned_image=self.write_on_image(self.camera_frame,"Vous pouvez fermer la fenetre", color=(0,0,255), zone=(10,40))
                return  av.VideoFrame.from_ndarray(self.returned_image, format="bgr24")
            
                # exit()
            

    ctx = webrtc_streamer(
        key="Liveness",
        video_processor_factory=VideoProcessor,
        # rtc_configuration=RTC_CONFIGURATION,
        # desired_playing_state=True,
        media_stream_constraints={
            "video": {"width": {"min": 800, "ideal": 1280, "max": 1920 },"height": {"min": 720, "ideal": 720, "max": 720 }},
            "audio": False,
        }
    )```

Hi @Abdelkrim_Nachef,

Thanks for sharing your question! If I understand correctly, you want to stop the callback function from being executed once a certain condition is satisfied.

One way to do this would be to pass a parameter to your callback function, e.g. is_recognized, and add an if if statement to your callback function that checks whether is_recognized is False. If is_recognized is False, just have the function continue as normal (meaning that if is_recognized is True, the rest of the function won’t execute).

Hope this helps – let me know if you have any questions!

This topic was automatically closed 180 days after the last reply. New replies are no longer allowed.