App restarting completly when clicking on a button

Summary

When I click on a button, it retsrats completly the app, and do again the transcription, etc…

Steps to reproduce

Code snippet:

import streamlit as strmlt
import whisper
from whisper.utils import get_writer
import pydub
import os 


def print_file(namefile): # print vtt file
    with open(namefile, 'r',encoding='utf-8') as f:
        f.readline() #skip WEBVTT
        strmlt.write(f.read()) #write content

        
def ingestion():
    with strmlt.spinner('Ingestion du contenu...'):
        os.system('python /workspace/localGPT-main/ingest.py')
 
click=True
strmlt.set_page_config(page_title="Transcription d'appel",page_icon="/workspace/ressources/icon.png")
#setting icon and title of the page
#hide_streamlit_style = """
#       <style>
#       [data-testid="stToolbar"] {visibility: hidden !important;}
#       </style>
#       """
#strmlt.markdown(hide_streamlit_style, unsafe_allow_html=True)
html_string='''<script>
// To break out of iframe and access the parent window
const streamlitDoc = window.parent.document;

// Make the replacement
document.addEventListener("DOMContentLoaded", function(event){
streamlitDoc.getElementsByTagName("footer")[0].innerHTML = "Provided by Streamlit\</a>";
    });
</script>
'''
strmlt.components.v1.html(html_string)
strmlt.header("Transcription d'appel 🔊")
#main title 

strmlt.subheader("Fichier audio")
audio=strmlt.file_uploader("Télécharger un fichier audio (format wav)",type="wav")
#getting the audio file
if(audio is not None and click==True):
    strmlt.subheader("Transcription")
    audio_file=audio.read()
    #readable audio
    strmlt.caption("Écouter l'appel")
    strmlt.audio(audio_file,format='audio/wav', start_time=60)
    #audio player    
    
    #recreating file uploaded
    file_var = pydub.AudioSegment.from_wav(audio) 
    file_var.export("/workspace/static/files/"+audio.name, format='wav')

    #loading model
    with strmlt.spinner('Chargement du modele...'):
        #model used (you can choose whatever you want)
        model = whisper.load_model("tiny", download_root="/workspace/cache")
    #loading transcription
    with strmlt.spinner('Transcription en cours...'):
        #retranscription
        result = model.transcribe("/workspace/static/files/" + audio.name, initial_prompt="ETHIAS, Ethias", language="nl")
    strmlt.success('Transcription effectuée')

    vtt_writer = get_writer("vtt", "/workspace/localGPT-main/SOURCE_DOCUMENTS")
    vtt_writer(result, audio.name)
    os.rename("/workspace/localGPT-main/SOURCE_DOCUMENTS/"+audio.name.split(".")[0]+".vtt","/workspace/localGPT-main/SOURCE_DOCUMENTS/"+audio.name.split(".")[0]+".txt")
        
    with strmlt.expander("Voir la transcription"):
        #print vtt file
        print_file("/workspace/localGPT-main/SOURCE_DOCUMENTS/"+audio.name.split(".")[0]+".txt")

    if strmlt.button("Ingérer la transcription :drooling_face:"):
        click=False
        ingestion()

When clicking on the “Ingérer la transcription” button, it reloads the model and the transcription

Expected behavior:

I just want the code to continue without doing again the previous steps.

Thanks in advance for the answers

Hey @RAPHCVR :wave:

Have you tried implementing session state into your app?

Streamlit Session State Documentation

This should allow for what you need. :slight_smile:

Best, Charly

Thanks, @Charly_Wargnier, I tried this and it worked. But I encounter a problem: only the widget after the condition is checked are present on the website. For example, keeping the audio widget and the transcription expander. How to save them too ?

import streamlit as strmlt
import whisper
from whisper.utils import get_writer
import pydub
import os 
import gc
import torch

def print_file(namefile): # print vtt file
    with open(namefile, 'r',encoding='utf-8') as f:
        f.readline() #skip WEBVTT
        strmlt.write(f.read()) #write content

        
def ingestion():
    strmlt.session_state.clicked = True
    with strmlt.spinner('Ingestion du contenu...'):
        os.system('python /workspace/localGPT-main/ingest.py')
 
strmlt.set_page_config(page_title="Transcription d'appel",page_icon="/workspace/ressources/icon.png")
if 'clicked' not in strmlt.session_state:
    strmlt.session_state.clicked = False
#setting icon and title of the page
#hide_streamlit_style = """
#       <style>
#       [data-testid="stToolbar"] {visibility: hidden !important;}
#       </style>
#       """
#strmlt.markdown(hide_streamlit_style, unsafe_allow_html=True)
html_string='''<script>
// To break out of iframe and access the parent window
const streamlitDoc = window.parent.document;

// Make the replacement
document.addEventListener("DOMContentLoaded", function(event){
streamlitDoc.getElementsByTagName("footer")[0].innerHTML = "Provided by RAPHCVR\</a>";
    });
</script>
'''
strmlt.components.v1.html(html_string)
strmlt.header("Transcription d'appel 🔊")
#main title 

strmlt.subheader("Fichier audio")
audio=strmlt.file_uploader("Télécharger un fichier audio (format wav)",type="wav")
#getting the audio file
if(audio is not None and strmlt.session_state.clicked==False):
    strmlt.subheader("Transcription")
    audio_file=audio.read()
    #readable audio
    strmlt.caption("Écouter l'appel")
    strmlt.audio(audio_file,format='audio/wav')
    #audio player    
    
    #recreating file uploaded
    file_var = pydub.AudioSegment.from_wav(audio) 
    file_var.export("/workspace/static/files/"+audio.name, format='wav')

    #loading model
    with strmlt.spinner('Chargement du modele...'):
        #model used (you can choose whatever you want)
        model = whisper.load_model("tiny", download_root="/workspace/cache")
    #loading transcription
    with strmlt.spinner('Transcription en cours...'):
        #retranscription
        result = model.transcribe("/workspace/static/files/" + audio.name, initial_prompt="ETHIAS, Ethias", language="nl")
    strmlt.success('Transcription effectuée')
    vtt_writer = get_writer("vtt", "/workspace/localGPT-main/SOURCE_DOCUMENTS")
    vtt_writer(result, audio.name)
    del model
    gc.collect()
    torch.cuda.empty_cache()
    os.rename("/workspace/localGPT-main/SOURCE_DOCUMENTS/"+audio.name.split(".")[0]+".vtt","/workspace/localGPT-main/SOURCE_DOCUMENTS/"+audio.name.split(".")[0]+".txt")
        
    with strmlt.expander("Voir la transcription"):
        #print vtt file
        print_file("/workspace/localGPT-main/SOURCE_DOCUMENTS/"+audio.name.split(".")[0]+".txt")

    strmlt.button("Ingérer la transcription", on_click=ingestion)