Initial commit
This commit is contained in:
5
Tensorflow/tutoriel38/README.md
Normal file
5
Tensorflow/tutoriel38/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel tensorflow
|
||||
## Vecteur embedding, TripletLoss, FaceNet
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=x3PbnSNV5n0
|
||||
|
||||
27
Tensorflow/tutoriel38/common.py
Normal file
27
Tensorflow/tutoriel38/common.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import numpy as np
|
||||
from tensorflow.keras import layers, models
|
||||
import tensorflow as tf
|
||||
import io
|
||||
|
||||
def write_labels_embs(model, ds, file_embeddings, file_labels):
|
||||
embeddings=model.predict(ds)
|
||||
np.savetxt(file_embeddings, embeddings, delimiter='\t')
|
||||
if file_labels is not None:
|
||||
fichier=io.open(file_labels, 'w', encoding='utf-8')
|
||||
for images, labels in ds:
|
||||
[fichier.write("{:d}\n".format(x)) for x in labels]
|
||||
fichier.close()
|
||||
|
||||
def model_embedding(nbr_cc, embeddings_size):
|
||||
entree=layers.Input(shape=(28, 28, 1), dtype=tf.float32)
|
||||
|
||||
result=layers.Conv2D(nbr_cc, 3, activation='relu', padding='same')(entree)
|
||||
result=layers.MaxPool2D()(result)
|
||||
result=layers.Conv2D(nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
result=layers.Flatten()(result)
|
||||
result=layers.Dense(embeddings_size, activation=None)(result)
|
||||
embeddings=layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=embeddings)
|
||||
return model
|
||||
29
Tensorflow/tutoriel38/mnist.py
Normal file
29
Tensorflow/tutoriel38/mnist.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import tensorflow_addons as tfa
|
||||
|
||||
import common
|
||||
|
||||
batch_size=64
|
||||
nbr_cc=64 # Nombre de cartes de caracteristique du modele
|
||||
embeddings_size=256 # Taille du vecteur embedding (sortie du réseau de neurone)
|
||||
|
||||
(x_train, y_train),(x_test, y_test)=tf.keras.datasets.mnist.load_data()
|
||||
|
||||
train_dataset=(x_train.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
test_dataset=(x_test.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((train_dataset, y_train)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((test_dataset, y_test)).batch(batch_size)
|
||||
|
||||
model=common.model_embedding(nbr_cc, embeddings_size)
|
||||
|
||||
common.write_labels_embs(model, test_ds, 'embeddings1.tsv', 'labels.tsv')
|
||||
|
||||
model.compile(
|
||||
optimizer=tf.keras.optimizers.Adam(1E-3),
|
||||
loss=tfa.losses.TripletSemiHardLoss())
|
||||
|
||||
model.fit(train_ds, epochs=5)
|
||||
|
||||
common.write_labels_embs(model, test_ds, 'embeddings2.tsv', None)
|
||||
51
Tensorflow/tutoriel38/mnist_avec_predictions.py
Normal file
51
Tensorflow/tutoriel38/mnist_avec_predictions.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import tensorflow_addons as tfa
|
||||
from tensorflow.keras import layers, models
|
||||
from sklearn import cluster
|
||||
import sklearn
|
||||
|
||||
import common
|
||||
|
||||
batch_size=16
|
||||
nbr_cc=64 # Nombre de cartes de caracteristique du modele
|
||||
embeddings_size=256 # Taille du vecteur embedding (sortie du réseau de neurone)
|
||||
|
||||
(x_train, y_train), (x_test, y_test)=tf.keras.datasets.mnist.load_data()
|
||||
|
||||
train_dataset=(x_train.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
test_dataset=(x_test.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((train_dataset, y_train)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((test_dataset, y_test)).batch(batch_size)
|
||||
|
||||
model=common.model_embedding(nbr_cc, embeddings_size)
|
||||
|
||||
model.compile(
|
||||
optimizer=tf.keras.optimizers.Adam(1E-3),
|
||||
loss=tfa.losses.TripletSemiHardLoss())
|
||||
|
||||
model.fit(train_ds, epochs=5)
|
||||
|
||||
# Recherche des centroids des clusters
|
||||
embeddings=model.predict(train_dataset)
|
||||
kmeans=cluster.KMeans(n_clusters=len(set(y_train)))
|
||||
kmeans.fit(embeddings)
|
||||
centroids=kmeans.cluster_centers_
|
||||
|
||||
# Recherche du label des centroids
|
||||
distances=sklearn.metrics.pairwise_distances(embeddings, centroids)
|
||||
# shape distances: 6000, 10
|
||||
# lmin contient le vecteur embeddings le plus proche de chacun des centroids
|
||||
lmin=np.argmin(distances, axis=0)
|
||||
labels_centroids=y_train[lmin]
|
||||
|
||||
# Calcul de précision de la base d'entrainement
|
||||
result=np.equal(y_train, labels_centroids[np.argmin(distances, axis=-1)]).astype(np.float32)
|
||||
print("Train: précision {:4.2%}".format(np.mean(result)))
|
||||
|
||||
# Calcul de précision de la base de test
|
||||
embeddings=model.predict(test_dataset)
|
||||
distances=sklearn.metrics.pairwise_distances(embeddings, centroids)
|
||||
result=np.equal(y_test.astype(np.int32), labels_centroids[np.argmin(distances, axis=-1)]).astype(np.float32)
|
||||
print("Test : précision {:4.2%}".format(np.mean(result)))
|
||||
Reference in New Issue
Block a user