import numpy as np
import pandas as pd
import streamlit as st
#user_input = st.text_input(“please provide a query”, “two times 4.”)
###################################
with st.form(key=‘my_form’):
user_input = st.text_input(label=‘Enter some text’)
submit_button = st.form_submit_button(label=‘Submit’)
###################################
import os
import openai
openai.api_key = ‘###############################################’
start_sequence = “\nAI:”
restart_sequence = "\nHuman: "
response = openai.Completion.create(
model=“text-davinci-002”,
prompt=f"The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman:{user_input} “,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=[” Human:“, " AI:”]
)
content = response.choices[0].text.split(’ ')[-1]
var = int(content)
querys = “number after 5.”
response = gpt3(querys)
var=response
print(var)
# type(var)
# z
In[122]:
response[‘choices’][0][‘text’]
In[19]:
import numpy as np
import matplotlib.pyplot as plt
In[2]:
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, LeakyReLU, BatchNormalization
from keras.layers import Input, Flatten, Embedding, multiply, Dropout
from keras.optimizers import Adam
from keras import initializers
In[3]:
model = Sequential()
model.add(Embedding(10, 2))
the model will take as input an integer matrix of size (batch, input_length).
the largest integer (i.e. word index) in the input should be no larger than 999 (vocabulary size).
now model.output_shape == (None, 10, 64), where None is the batch dimension.
input_array = np.random.randint(10, size=(1, 10))
input_array = np.arange(0, 10).reshape(1, -1)
model.compile(‘rmsprop’, ‘mse’)
output_array = model.predict(input_array)
print(output_array)
print(output_array.shape)
plt.scatter(output_array[0, :, 0], output_array[0, :, 1])
In[4]:
load dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
In[5]:
fig = plt.figure()
for i in range(10):
plt.subplot(2, 5, i+1)
x_y = X_train[y_train == i]
plt.imshow(x_y[0], cmap=‘gray’, interpolation=‘none’)
plt.title(“Class %d” % (i))
plt.xticks()
plt.yticks()
plt.tight_layout()
In[6]:
print(‘X_train.shape’, X_train.shape)
print(‘y_train.shape’, y_train.shape)
reshaping the inputs
X_train = X_train.reshape(60000, 28*28)
normalizing the inputs (-1, 1)
X_train = (X_train.astype(‘float32’) / 255 - 0.5) * 2
print(‘X_train reshape:’, X_train.shape)
In[7]:
latent space dimension
latent_dim = 100
imagem dimension 28x28
img_dim = 784
init = initializers.RandomNormal(stddev=0.02)
Generator network
generator = Sequential()
Input layer and hidden layer 1
generator.add(Dense(128, input_shape=(latent_dim,), kernel_initializer=init))
generator.add(LeakyReLU(alpha=0.2))
generator.add(BatchNormalization(momentum=0.8))
Hidden layer 2
generator.add(Dense(256))
generator.add(LeakyReLU(alpha=0.2))
generator.add(BatchNormalization(momentum=0.8))
Hidden layer 3
generator.add(Dense(512))
generator.add(LeakyReLU(alpha=0.2))
generator.add(BatchNormalization(momentum=0.8))
Output layer
generator.add(Dense(img_dim, activation=‘tanh’))
In[8]:
prints a summary representation of your model
generator.summary()
In[9]:
Embedding condition in input layer
num_classes = 10
Create label embeddings
label = Input(shape=(1,), dtype=‘int32’)
label_embedding = Embedding(num_classes, latent_dim)(label)
label_embedding = Flatten()(label_embedding)
latent space
z = Input(shape=(latent_dim,))
Merge inputs (z x label)
input_generator = multiply([z, label_embedding])
Output image
img = generator(input_generator)
Generator with condition input
generator = Model([z, label], img)
In[10]:
prints a summary representation of your model
generator.summary()
In[11]:
Discriminator network
discriminator = Sequential()
Input layer and hidden layer 1
discriminator.add(Dense(128, input_shape=(img_dim,), kernel_initializer=init))
discriminator.add(LeakyReLU(alpha=0.2))
Hidden layer 2
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(alpha=0.2))
Hidden layer 3
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(alpha=0.2))
Output layer
discriminator.add(Dense(1, activation=‘sigmoid’))
In[12]:
prints a summary representation of your model
discriminator.summary()
In[13]:
Embedding condition in input layer
Create label embeddings
label_d = Input(shape=(1,), dtype=‘int32’)
label_embedding_d = Embedding(num_classes, img_dim)(label_d)
label_embedding_d = Flatten()(label_embedding_d)
imagem dimension 28x28
img_d = Input(shape=(img_dim,))
Merge inputs (img x label)
input_discriminator = multiply([img_d, label_embedding_d])
Output image
validity = discriminator(input_discriminator)
Discriminator with condition input
discriminator = Model([img_d, label_d], validity)
In[14]:
prints a summary representation of your model
discriminator.summary()
In[15]:
Optimizer
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(optimizer=optimizer, loss=‘binary_crossentropy’, metrics=[‘binary_accuracy’])
In[16]:
discriminator.trainable = False
validity = discriminator([generator([z, label]), label])
d_g = Model([z, label], validity)
d_g.compile(optimizer=optimizer, loss=‘binary_crossentropy’, metrics=[‘binary_accuracy’])
In[17]:
prints a summary representation of your model
d_g.summary()
In[134]:
epochs = 100
batch_size = 64
smooth = 0.1
real = np.ones(shape=(batch_size, 1))
fake = np.zeros(shape=(batch_size, 1))
d_loss =
d_g_loss =
for e in range(epochs + 1):
for i in range(len(X_train) // batch_size):
# Train Discriminator weights
discriminator.trainable = True
# Real samples
X_batch = X_train[i*batch_size:(i+1)*batch_size]
real_labels = y_train[i*batch_size:(i+1)*batch_size].reshape(-1, 1)
d_loss_real = discriminator.train_on_batch(x=[X_batch, real_labels], y=real * (1 - smooth))
# Fake Samples
z = np.random.normal(loc=0, scale=1, size=(batch_size, latent_dim))
random_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
X_fake = generator.predict_on_batch([z, random_labels])
d_loss_fake = discriminator.train_on_batch(x=[X_fake, random_labels], y=fake)
# Discriminator loss
d_loss_batch = 0.5 * (d_loss_real[0] + d_loss_fake[0])
# Train Generator weights
discriminator.trainable = False
z = np.random.normal(loc=0, scale=1, size=(batch_size, latent_dim))
random_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
d_g_loss_batch = d_g.train_on_batch(x=[z, random_labels], y=real)
print(
'epoch = %d/%d, batch = %d/%d, d_loss=%.3f, g_loss=%.3f' % (e + 1, epochs, i, len(X_train) // batch_size, d_loss_batch, d_g_loss_batch[0]),
100*' ',
end='\r'
)
d_loss.append(d_loss_batch)
d_g_loss.append(d_g_loss_batch[0])
print('epoch = %d/%d, d_loss=%.3f, g_loss=%.3f' % (e + 1, epochs, d_loss[-1], d_g_loss[-1]), 100*' ')
if e % 10 == 0:
samples = 10
z = np.random.normal(loc=0, scale=1, size=(samples, latent_dim))
# labels = np.arange(0, 1).reshape(-1, 1)
y=[]
yes = []
yes.append(var)
for i in range(10):
y.append(yes)
q = np.array(y)
x_fake = generator.predict([z, q])
for k in range(samples):
fig = plt.figure(figsize=(12,6))
plt.subplot(2, 5, k+1)
plt.imshow(x_fake[k].reshape(28, 28), cmap='gray')
plt.xticks([])
plt.yticks([])
plt.tight_layout()
# plt.show()
st.pyplot(fig)