hi @randyzwitch , thankyou!! 
this is my full code
col1, col2 = st.beta_columns(2)
with col1:
st.title("Uji Hyperparameter FastText")
from gensim.models import FastText
data = pd.read_csv("Hasil_tokenizing.csv",usecols=["content_tokenizing","label"])
sentences = data['content_tokenizing'].values
phrases = Phrases(sentences, min_count=5, threshold=1)
bigram = Phraser(phrases)
st.markdown('''
<p><b>Parameter yang dibuat konstan</b><p>
<ul>
<li>alpha = 0.0025</li>
<li>min_count = 3</li>
<li>workers = 4</li>
<li>iterasi = 10</li>
</ul>
''',unsafe_allow_html=True)
st.markdown('''
<p><b>Uji Hyperparameter</b></p>
''',unsafe_allow_html=True)
size = st.number_input("Size", 10,300)
window = st.number_input("Window", 1,15)
def create_FastText (sizes,windows):
model = None
model = FastText(bigram[sentences], size= size, window = window, alpha = 0.0025, min_count= 3, workers=4, iter=10, sg=1, hs=1)
model.save("fasttext.ft")
return model
ftmodel = create_FastText(size,window)
review = pd.read_csv("Hasil_stopword.csv", usecols=["content_stopword","label"])
review = review[review['content_stopword'].notnull()]
def posneg(x):
if x=="Negatif":
return 0
elif x=="Positif":
return 1
return x
filtered_score = review["label"].map(posneg)
review["label"] = filtered_score
def norm_sent_vector(sentence, FastText_model):
vecs = [FastText_model.wv[word] for word in word_tokenize(sentence)]
norm_vecs = [vec / np.linalg.norm(vec) for vec in vecs if np.linalg.norm(vec) > 0]
sent_vec = np.mean(norm_vecs, axis=0)
return sent_vec
vecs = [norm_sent_vector(sentence, ftmodel) for sentence in review.content_stopword]
vecs = np.array(vecs)
X = vecs
Y = review.label
smt = SMOTE(random_state=777, k_neighbors=1)
X_SMOTE, y_SMOTE = smt.fit_sample(X, Y)
def get_metric(Y_test,Y_pred_mlp):
Y_test, Y_pred_mlp = Y_test.ravel(), Y_pred_mlp.ravel()
total = Y_test.shape[0]
true_positive, true_negative, false_positive, false_negative = 0,0,0,0
for i in range(total):
if Y_test[i] == 1 and Y_pred_mlp[i] == 1:
true_positive += 1
elif Y_test[i] == 0 and Y_pred_mlp[i] == 1:
false_positive += 1
elif Y_test[i] == 1 and Y_pred_mlp[i] == 0:
false_negative += 1
else:
true_negative += 1
precision = true_positive/(true_positive + false_positive) if (true_positive + false_positive) != 0 else 0
recall = true_positive/(true_positive + false_negative) if (true_positive + false_positive) != 0 else 0
f1_score = 2*(precision*recall)/(precision + recall) if (true_positive + false_positive) != 0 else 0
conf_matrix = np.array([[true_positive, false_positive],[false_negative, true_negative]])
accuracy = (true_positive + true_negative)/ total
return precision, recall, f1_score, accuracy, conf_matrix
clf = MLPClassifier(random_state=0)
Kf_pred = KFold(n_splits = 5, shuffle = True, random_state = 0)
Kf_pred.get_n_splits(X_SMOTE, y_SMOTE)
cvscores = []
akurasis = []
fscores = []
matrixs = []
fold_var = 1
for train, test in Kf_pred.split(X_SMOTE, y_SMOTE):
X_train_MLP, X_test_MLP = X_SMOTE[train], X_SMOTE[test]
y_train_MLP, Y_test_MLP = y_SMOTE[train], y_SMOTE[test]
clf.fit(X_train_MLP,y_train_MLP)
y_pred = clf.predict(X_test_MLP)
y_pred = y_pred.round()
precision, recall, f1_score, accuracy, conf_matrix = get_metric(Y_test_MLP, y_pred)
akurasi = accuracy
akurasis.append(akurasi)
fscore = f1_score
fscores.append(f1_score)
matrix = confusion_matrix(Y_test_MLP,y_pred.round())
matrixs.append(matrix)
fold_var += 1
with col2:
st.title("Hasil Uji Hyperparameter FastText")
st.markdown('''
<hr style="border: 1px solid red;">
<h3>Akurasi dan F1-score</h3>
''',unsafe_allow_html=True)
st.write("Rata - Rata Akurasi :{:05.4f} " .format(np.mean(akurasis)))
st.write("Rata - Rata F1-score :{:05.4f} " .format(np.mean(fscores)))