Initial commit
This commit is contained in:
5
Tensorflow/tutoriel35/README.md
Normal file
5
Tensorflow/tutoriel35/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel Tensorflow
|
||||
## GAN conditionnel
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=9Je5f8UwQ98
|
||||
|
||||
102
Tensorflow/tutoriel35/gan.py
Normal file
102
Tensorflow/tutoriel35/gan.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import tensorflow as tf
|
||||
import glob
|
||||
import numpy as np
|
||||
import os
|
||||
from tensorflow.keras import layers, models
|
||||
import time
|
||||
import cv2
|
||||
import model
|
||||
|
||||
batch_size=256
|
||||
epochs=500
|
||||
noise_dim=100
|
||||
tab_size=5
|
||||
num_examples_to_generate=tab_size*tab_size
|
||||
dir_images='images_gan'
|
||||
checkpoint_dir='./training_checkpoints_gan'
|
||||
checkpoint_prefix=os.path.join(checkpoint_dir, "ckpt")
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
os.mkdir(dir_images)
|
||||
|
||||
(train_images, train_labels), (test_images, test_labels)=tf.keras.datasets.mnist.load_data()
|
||||
|
||||
train_images=train_images.reshape(-1, 28, 28, 1).astype('float32')
|
||||
train_images=(train_images-127.5)/127.5
|
||||
|
||||
train_dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(len(train_images)).batch(batch_size)
|
||||
|
||||
def discriminator_loss(real_output, fake_output):
|
||||
real_loss=cross_entropy(tf.ones_like(real_output), real_output)
|
||||
fake_loss=cross_entropy(tf.zeros_like(fake_output), fake_output)
|
||||
total_loss=real_loss+fake_loss
|
||||
return total_loss
|
||||
|
||||
def generator_loss(fake_output):
|
||||
return cross_entropy(tf.ones_like(fake_output), fake_output)
|
||||
|
||||
generator=model.generator_model()
|
||||
discriminator=model.discriminator_model()
|
||||
|
||||
cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
||||
|
||||
train_generator_loss=tf.keras.metrics.Mean()
|
||||
train_discriminator_loss=tf.keras.metrics.Mean()
|
||||
|
||||
generator_optimizer=tf.keras.optimizers.Adam(1e-4)
|
||||
discriminator_optimizer=tf.keras.optimizers.Adam(1e-4)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,
|
||||
discriminator_optimizer=discriminator_optimizer,
|
||||
generator=generator,
|
||||
discriminator=discriminator)
|
||||
|
||||
seed=tf.random.normal([num_examples_to_generate, noise_dim])
|
||||
|
||||
@tf.function
|
||||
def train_step(images):
|
||||
noise=tf.random.normal([batch_size, noise_dim])
|
||||
|
||||
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
||||
generated_images=generator(noise, training=True)
|
||||
|
||||
real_output=discriminator(images, training=True)
|
||||
fake_output=discriminator(generated_images, training=True)
|
||||
|
||||
gen_loss=generator_loss(fake_output)
|
||||
disc_loss=discriminator_loss(real_output, fake_output)
|
||||
|
||||
train_generator_loss(gen_loss)
|
||||
train_discriminator_loss(disc_loss)
|
||||
|
||||
gradients_of_generator=gen_tape.gradient(gen_loss, generator.trainable_variables)
|
||||
gradients_of_discriminator=disc_tape.gradient(disc_loss, discriminator.trainable_variables)
|
||||
|
||||
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
|
||||
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
|
||||
|
||||
def train(dataset, epochs):
|
||||
for epoch in range(epochs):
|
||||
start=time.time()
|
||||
for image_batch in dataset:
|
||||
train_step(image_batch)
|
||||
generate_and_save_images(generator, epoch+1, seed)
|
||||
if (epoch+1)%15==0:
|
||||
checkpoint.save(file_prefix=checkpoint_prefix)
|
||||
print ('Epoch {}: loss generator: {:.4f} loss discriminator: {:.4f} {:.4f} sec'.format(epoch+1,
|
||||
train_generator_loss.result(),
|
||||
train_discriminator_loss.result(),
|
||||
time.time()-start))
|
||||
train_generator_loss.reset_states()
|
||||
train_discriminator_loss.reset_states()
|
||||
|
||||
def generate_and_save_images(model, epoch, test_input):
|
||||
labels=tf.one_hot(tf.range(0, num_examples_to_generate, 1)%10, 10)
|
||||
predictions=model([test_input, labels], training=False)
|
||||
img=np.empty(shape=(tab_size*28, tab_size*28), dtype=np.float32)
|
||||
for i in range(tab_size):
|
||||
for j in range(tab_size):
|
||||
img[j*28:(j+1)*28, i*28:(i+1)*28]=predictions[j*tab_size+i, :, :, 0]*127.5+127.5
|
||||
cv2.imwrite('{}/image_{:04d}.png'.format(dir_images, epoch), img)
|
||||
|
||||
train(train_dataset, epochs)
|
||||
95
Tensorflow/tutoriel35/gan_cond.py
Normal file
95
Tensorflow/tutoriel35/gan_cond.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import os
|
||||
from tensorflow.keras import layers, models
|
||||
import time
|
||||
import cv2
|
||||
import model_cond
|
||||
|
||||
batch_size=256
|
||||
epochs=500
|
||||
noise_dim=100
|
||||
tab_size=6
|
||||
num_examples_to_generate=tab_size*tab_size
|
||||
dir_images='images_gan_cond'
|
||||
checkpoint_dir='./training_checkpoints_gan_cond'
|
||||
checkpoint_prefix=os.path.join(checkpoint_dir, "ckpt")
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
os.mkdir(dir_images)
|
||||
|
||||
(train_images, train_labels), (test_images, test_labels)=tf.keras.datasets.mnist.load_data()
|
||||
|
||||
train_labels=tf.one_hot(train_labels, 10)
|
||||
|
||||
train_images=train_images.reshape(-1, 28, 28, 1).astype('float32')
|
||||
train_images=(train_images-127.5)/127.5
|
||||
|
||||
train_dataset=tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(len(train_images)).batch(batch_size)
|
||||
|
||||
def discriminator_loss(real_output, fake_output):
|
||||
real_loss=cross_entropy(tf.ones_like(real_output), real_output)
|
||||
fake_loss=cross_entropy(tf.zeros_like(fake_output), fake_output)
|
||||
total_loss=real_loss+fake_loss
|
||||
return total_loss
|
||||
|
||||
def generator_loss(fake_output):
|
||||
return cross_entropy(tf.ones_like(fake_output), fake_output)
|
||||
|
||||
generator=model_cond.generator_model()
|
||||
discriminator=model_cond.discriminator_model()
|
||||
|
||||
cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
||||
|
||||
generator_optimizer=tf.keras.optimizers.Adam(1E-4)
|
||||
discriminator_optimizer=tf.keras.optimizers.Adam(1E-4)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,
|
||||
discriminator_optimizer=discriminator_optimizer,
|
||||
generator=generator,
|
||||
discriminator=discriminator)
|
||||
|
||||
|
||||
seed=tf.random.normal([num_examples_to_generate, noise_dim])
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
noise=tf.random.normal([len(labels), noise_dim])
|
||||
generated_labels=tf.random.uniform(shape=[len(labels)], minval=0, maxval=10, dtype=tf.dtypes.int32)
|
||||
generated_labels=tf.one_hot(generated_labels, 10)
|
||||
|
||||
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
||||
generated_images=generator([noise, generated_labels], training=True)
|
||||
|
||||
real_output=discriminator([images, labels], training=True)
|
||||
fake_output=discriminator([generated_images, generated_labels], training=True)
|
||||
|
||||
gen_loss=generator_loss(fake_output)
|
||||
disc_loss=discriminator_loss(real_output, fake_output)
|
||||
|
||||
gradients_of_generator=gen_tape.gradient(gen_loss, generator.trainable_variables)
|
||||
gradients_of_discriminator=disc_tape.gradient(disc_loss, discriminator.trainable_variables)
|
||||
|
||||
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
|
||||
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
|
||||
|
||||
def train(dataset, epochs):
|
||||
for epoch in range(epochs):
|
||||
start=time.time()
|
||||
for image_batch, label_batch in dataset:
|
||||
train_step(image_batch, label_batch)
|
||||
generate_and_save_images(generator, epoch+1, seed)
|
||||
if (epoch+1)%15==0:
|
||||
checkpoint.save(file_prefix=checkpoint_prefix)
|
||||
print ('Time for epoch {} is {} sec'.format(epoch+1, time.time()-start))
|
||||
|
||||
def generate_and_save_images(model, epoch, test_input):
|
||||
labels=tf.one_hot(tf.range(0, num_examples_to_generate, 1)%10, 10)
|
||||
predictions=model([test_input, labels], training=False)
|
||||
img=np.empty(shape=(tab_size*28, tab_size*28), dtype=np.float32)
|
||||
for i in range(tab_size):
|
||||
for j in range(tab_size):
|
||||
img[j*28:(j+1)*28, i*28:(i+1)*28]=predictions[j*tab_size+i, :, :, 0]*127.5+127.5
|
||||
cv2.imwrite('{}/image_{:04d}.png'.format(dir_images, epoch), img)
|
||||
|
||||
train(train_dataset, epochs)
|
||||
34
Tensorflow/tutoriel35/genere.py
Normal file
34
Tensorflow/tutoriel35/genere.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import time, threading
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model_cond
|
||||
|
||||
noise_dim=100
|
||||
|
||||
generator=model_cond.generator_model()
|
||||
checkpoint=tf.train.Checkpoint(generator=generator)
|
||||
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir='./training_checkpoints_gan_cond/'))
|
||||
|
||||
marge=20
|
||||
|
||||
while True:
|
||||
chiffres=input("Entrez une serie de chiffre:")
|
||||
try:
|
||||
chiffres_int=int(chiffres)
|
||||
except:
|
||||
continue
|
||||
|
||||
liste_chiffres=[]
|
||||
while (chiffres_int):
|
||||
liste_chiffres.append(chiffres_int%10)
|
||||
chiffres_int=int(chiffres_int/10)
|
||||
seed=tf.random.normal([len(liste_chiffres), noise_dim])
|
||||
labels=tf.one_hot(liste_chiffres, 10)
|
||||
image=np.zeros(shape=(28+2*marge, len(liste_chiffres)*28+2*marge), dtype=np.float32)
|
||||
prediction=generator([seed, labels], training=False)
|
||||
for i in range(len(prediction)):
|
||||
image[marge:marge+28, marge+i*28:marge+(i+1)*28]=prediction[len(liste_chiffres)-i-1, :, :, 0]*127.5+127.5
|
||||
cv2.imshow("Image", image.astype(np.uint8))
|
||||
key=cv2.waitKey(10)
|
||||
68
Tensorflow/tutoriel35/horloge.py
Normal file
68
Tensorflow/tutoriel35/horloge.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import tensorflow as tf
|
||||
import time, threading
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model_cond
|
||||
|
||||
noise_dim=100
|
||||
|
||||
generator=model_cond.generator_model()
|
||||
checkpoint=tf.train.Checkpoint(generator=generator)
|
||||
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir='./training_checkpoints_gan_cond/'))
|
||||
|
||||
marge=20
|
||||
marge2=5
|
||||
image=np.zeros(shape=(28+2*marge, 6*28+2*marge+4*marge2), dtype=np.float32)
|
||||
old_h1=old_h2=old_m1=old_m2=old_s1=old_s2=-1
|
||||
cont=1
|
||||
|
||||
def foo():
|
||||
global old_h1, old_h2, old_m1, old_m2, old_s1, old_s2
|
||||
|
||||
if cont:
|
||||
threading.Timer(1, foo).start()
|
||||
seed=tf.random.normal([6, noise_dim])
|
||||
heure=time.strftime('%H:%M:%S')
|
||||
print(heure)
|
||||
h1=int(int(heure.split(':')[0])/10)
|
||||
h2=int(heure.split(':')[0])%10
|
||||
m1=int(int(heure.split(':')[1])/10)
|
||||
m2=int(heure.split(':')[1])%10
|
||||
s1=int(int(heure.split(':')[2])/10)
|
||||
s2=int(heure.split(':')[2])%10
|
||||
labels=tf.one_hot([h1, h2, m1, m2, s1, s2], 10)
|
||||
|
||||
prediction=generator([seed, labels], training=False)
|
||||
if h1!=old_h1:
|
||||
image[0+marge:28+marge, 0*28+marge:1*28+marge]=prediction[0, :, :, 0]*127.5+127.5
|
||||
if h2!=old_h2:
|
||||
image[0+marge:28+marge, 1*28+marge:2*28+marge]=prediction[1, :, :, 0]*127.5+127.5
|
||||
if m1!=old_m1:
|
||||
image[0+marge:28+marge, 2*28+marge+2*marge2:3*28+marge+2*marge2]=prediction[2, :, :, 0]*127.5+127.5
|
||||
if m2!=old_m2:
|
||||
image[0+marge:28+marge, 3*28+marge+2*marge2:4*28+marge+2*marge2]=prediction[3, :, :, 0]*127.5+127.5
|
||||
if s1!=old_s1:
|
||||
image[0+marge:28+marge, 4*28+marge+4*marge2:5*28+marge+4*marge2]=prediction[4, :, :, 0]*127.5+127.5
|
||||
if s2!=old_s2:
|
||||
image[0+marge:28+marge, 5*28+marge+4*marge2:6*28+marge+4*marge2]=prediction[5, :, :, 0]*127.5+127.5
|
||||
|
||||
cv2.circle(image, (marge+2*28+marge2, marge+8), 1, (255, 255, 255), 2)
|
||||
cv2.circle(image, (marge+2*28+marge2, marge+20), 1, (255, 255, 255), 2)
|
||||
cv2.circle(image, (marge+4*28+3*marge2, marge+8), 1, (255, 255, 255), 2)
|
||||
cv2.circle(image, (marge+4*28+3*marge2, marge+20), 1, (255, 255, 255), 2)
|
||||
|
||||
old_h1=h1
|
||||
old_h2=h2
|
||||
old_m1=m1
|
||||
old_m2=m2
|
||||
old_s1=s1
|
||||
old_s2=s2
|
||||
|
||||
foo()
|
||||
while True:
|
||||
cv2.imshow("Horloge", image.astype(np.uint8))
|
||||
key=cv2.waitKey(10)
|
||||
if key==ord('q')&0xFF:
|
||||
cv2.destroyAllWindows()
|
||||
cont=0
|
||||
quit()
|
||||
40
Tensorflow/tutoriel35/model.py
Normal file
40
Tensorflow/tutoriel35/model.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
def generator_model():
|
||||
entree=layers.Input(shape=(100), dtype='float32')
|
||||
|
||||
result=layers.Dense(7*7*256, use_bias=False)(entree)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
result=layers.Reshape((7, 7, 256))(result)
|
||||
|
||||
result=layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
result=layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
sortie=layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def discriminator_model():
|
||||
entree=layers.Input(shape=(28, 28, 1), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(entree)
|
||||
result=layers.LeakyReLU()(result)
|
||||
result=layers.Dropout(0.3)(result)
|
||||
|
||||
result=layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
result=layers.Dropout(0.3)(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
sortie=layers.Dense(1)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
49
Tensorflow/tutoriel35/model_cond.py
Normal file
49
Tensorflow/tutoriel35/model_cond.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
def generator_model():
|
||||
entree_bruit =layers.Input(shape=(100), dtype='float32')
|
||||
entree_classe=layers.Input(shape=(10), dtype='float32')
|
||||
|
||||
result=layers.concatenate([entree_bruit, entree_classe])
|
||||
|
||||
result=layers.Dense(7*7*256, use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
result=layers.Reshape((7, 7, 256))(result)
|
||||
|
||||
result=layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
result=layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
sortie=layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(result)
|
||||
|
||||
model=models.Model(inputs=[entree_bruit, entree_classe], outputs=sortie)
|
||||
return model
|
||||
|
||||
def discriminator_model():
|
||||
entree_image =layers.Input(shape=(28, 28, 1), dtype='float32')
|
||||
entree_classe=layers.Input(shape=(10), dtype='float32')
|
||||
|
||||
result1=layers.Dense(28*28, use_bias=False)(entree_classe)
|
||||
result1=layers.Reshape((28, 28, 1))(result1)
|
||||
|
||||
result=layers.concatenate([entree_image, result1])
|
||||
|
||||
result=layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
result=layers.Dropout(0.3)(result)
|
||||
|
||||
result=layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
result=layers.Dropout(0.3)(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
sortie=layers.Dense(1)(result)
|
||||
|
||||
model=models.Model(inputs=[entree_image, entree_classe], outputs=sortie)
|
||||
return model
|
||||
Reference in New Issue
Block a user