Initial commit
4
Tensorflow/concours_drive/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Concours DRIVE: Digital Retinal Images for Vessel Extraction
|
||||
|
||||
La vidéo du tutoriel se trouve à l'adresse suivante:
|
||||
https://www.youtube.com/watch?v=MQiHsZurr5k
|
||||
75
Tensorflow/concours_drive/model.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
def model(nbr):
|
||||
entree=layers.Input(shape=(576, 560, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(entree)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result1=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result1)
|
||||
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result2=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result2)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result3=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result3)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result4=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result4)
|
||||
|
||||
result=layers.Conv2D(8*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result4], axis=3)
|
||||
|
||||
result=layers.Conv2D(8*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result3], axis=3)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result2], axis=3)
|
||||
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result1], axis=3)
|
||||
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
sortie=layers.Conv2D(1, 1, activation='sigmoid', padding='same')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
98
Tensorflow/concours_drive/train.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import tensorflow as tf
|
||||
from sklearn.model_selection import train_test_split
|
||||
from PIL import Image
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
import cv2
|
||||
import model
|
||||
import traitement_images as ti
|
||||
|
||||
dir_images='./training/images/'
|
||||
dir_mask ='./training/1st_manual/'
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
quit("The directory {} doesn't exist !".format(dir_images))
|
||||
if not os.path.isdir(dir_mask):
|
||||
quit("The directory {} doesn't exist !".format(dir_mask))
|
||||
|
||||
tab_images=[]
|
||||
tab_masks=[]
|
||||
|
||||
list_file=os.listdir(dir_images)
|
||||
if list_file is None:
|
||||
quit("No file in {} !".format(dir_images))
|
||||
|
||||
for fichier in list_file:
|
||||
img_orig=cv2.imread(dir_images+fichier)
|
||||
tab_images.append(img_orig[:576, :560])
|
||||
num=fichier.split('_')[0]
|
||||
file_mask=dir_mask+num+'_manual1.gif'
|
||||
if not os.path.isfile(file_mask):
|
||||
quit("Mask of {} doesn't exist in {}".format(file_mask, dir_mask))
|
||||
img_mask_orig=np.array(Image.open(file_mask))
|
||||
tab_masks.append(img_mask_orig[:576, :560])
|
||||
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=ti.rotateImage(img_orig, angle)
|
||||
img=img_r.copy()
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_mask=ti.rotateImage(img_mask_orig, angle)
|
||||
tab_masks.append(img_mask[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, 0)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, 0)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, 1)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, 1)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, -1)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, -1)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_masks =np.array(tab_masks, dtype=np.float32)[:, :, :]/255
|
||||
|
||||
train_images, test_images, train_masks, test_masks=train_test_split(tab_images, tab_masks, test_size=0.05)
|
||||
|
||||
del tab_images
|
||||
del tab_masks
|
||||
|
||||
my_model=model.model(64)
|
||||
|
||||
my_model.compile(optimizer='adam',
|
||||
loss='binary_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
my_model.fit(train_images,
|
||||
train_masks,
|
||||
epochs=20,
|
||||
batch_size=4,
|
||||
validation_data=(test_images, test_masks))
|
||||
|
||||
dir_test_images='./test/images/'
|
||||
|
||||
tab_test_images=[]
|
||||
tab_files=[]
|
||||
for fichier in os.listdir(dir_test_images):
|
||||
img=cv2.imread(dir_test_images+fichier)
|
||||
tab_test_images.append(img[:576, :560])
|
||||
tab_files.append(fichier.split('_')[0])
|
||||
|
||||
tab_test_images=np.array(tab_test_images, dtype=np.float32)/255
|
||||
tab_files=np.array(tab_files)
|
||||
|
||||
for id in range(len(tab_test_images)):
|
||||
mask=np.zeros((584, 565, 1), dtype=np.float32)
|
||||
prediction=my_model.predict(np.array([tab_test_images[id]]))
|
||||
mask[:576, :560]=prediction[0]*255
|
||||
cv2.imwrite("./predictions/"+str(tab_files[id])+".png", mask)
|
||||
|
||||
30
Tensorflow/concours_drive/traitement_images.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def change_gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def color(image, alpha=20):
|
||||
n=[random.randint(-alpha, alpha), random.randint(-alpha, alpha),random.randint(-alpha, alpha)]
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def random_change(image):
|
||||
if np.random.randint(2):
|
||||
img=change_gamma(image, random.uniform(0.8, 1.2), np.random.randint(100)-50)
|
||||
if np.random.randint(2):
|
||||
img=bruit(image)
|
||||
if np.random.randint(2):
|
||||
img=color(image)
|
||||
return image
|
||||
5
Tensorflow/concours_drive_2/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Concours DRIVE: Digital Retinal Images for Vessel Extraction
|
||||
Mesure de similarité: Dice et Jaccard à la rescousse !
|
||||
|
||||
La vidéo du tutoriel se trouve à l'adresse suivante:
|
||||
https://www.youtube.com/watch?v=26mpUDOS_IE
|
||||
87
Tensorflow/concours_drive_2/model.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
def LossDice(y_true, y_pred):
|
||||
numerateur =tf.reduce_sum(y_true*y_pred, axis=(1, 2))
|
||||
denominateur=tf.reduce_sum(y_true+y_pred, axis=(1, 2))
|
||||
dice=2*numerateur/(denominateur+1E-4)
|
||||
return 1-dice
|
||||
|
||||
def LossJaccard(y_true, y_pred):
|
||||
intersection=tf.reduce_sum(y_true*y_pred, axis=(1, 2))
|
||||
union =tf.reduce_sum(y_true+y_pred, axis=(1, 2))
|
||||
jaccard=intersection/(union-intersection+1E-4)
|
||||
return 1-jaccard
|
||||
|
||||
def model(nbr):
|
||||
entree=layers.Input(shape=(576, 560, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(entree)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result1=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result1)
|
||||
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result2=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result2)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result3=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result3)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result4=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result4)
|
||||
|
||||
result=layers.Conv2D(8*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result4], axis=3)
|
||||
|
||||
result=layers.Conv2D(8*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result3], axis=3)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result2], axis=3)
|
||||
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result1], axis=3)
|
||||
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
sortie=layers.Conv2D(1, 1, activation='sigmoid', padding='same')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
98
Tensorflow/concours_drive_2/train.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import tensorflow as tf
|
||||
from sklearn.model_selection import train_test_split
|
||||
from PIL import Image
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
import cv2
|
||||
import model
|
||||
import traitement_images as ti
|
||||
|
||||
dir_images='./training/images/'
|
||||
dir_mask ='./training/1st_manual/'
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
quit("The directory {} doesn't exist !".format(dir_images))
|
||||
if not os.path.isdir(dir_mask):
|
||||
quit("The directory {} doesn't exist !".format(dir_mask))
|
||||
|
||||
tab_images=[]
|
||||
tab_masks=[]
|
||||
|
||||
list_file=os.listdir(dir_images)
|
||||
if list_file is None:
|
||||
quit("No file in {} !".format(dir_images))
|
||||
|
||||
for fichier in list_file:
|
||||
img_orig=cv2.imread(dir_images+fichier)
|
||||
tab_images.append(img_orig[:576, :560])
|
||||
num=fichier.split('_')[0]
|
||||
file_mask=dir_mask+num+'_manual1.gif'
|
||||
if not os.path.isfile(file_mask):
|
||||
quit("Mask of {} doesn't exist in {}".format(file_mask, dir_mask))
|
||||
img_mask_orig=np.array(Image.open(file_mask))
|
||||
tab_masks.append(img_mask_orig[:576, :560])
|
||||
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=ti.rotateImage(img_orig, angle)
|
||||
img=img_r.copy()
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_mask=ti.rotateImage(img_mask_orig, angle)
|
||||
tab_masks.append(img_mask[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, 0)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, 0)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, 1)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, 1)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, -1)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, -1)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_masks =np.array(tab_masks, dtype=np.float32)[:, :, :]/255
|
||||
|
||||
train_images, test_images, train_masks, test_masks=train_test_split(tab_images, tab_masks, test_size=0.05)
|
||||
|
||||
del tab_images
|
||||
del tab_masks
|
||||
|
||||
my_model=model.model(64)
|
||||
|
||||
my_model.compile(optimizer='adam',
|
||||
loss=model.LossDice,
|
||||
metrics=['accuracy'])
|
||||
my_model.fit(train_images,
|
||||
train_masks,
|
||||
epochs=20,
|
||||
batch_size=4,
|
||||
validation_data=(test_images, test_masks))
|
||||
|
||||
dir_test_images='./test/images/'
|
||||
|
||||
tab_test_images=[]
|
||||
tab_files=[]
|
||||
for fichier in os.listdir(dir_test_images):
|
||||
img=cv2.imread(dir_test_images+fichier)
|
||||
tab_test_images.append(img[:576, :560])
|
||||
tab_files.append(fichier.split('_')[0])
|
||||
|
||||
tab_test_images=np.array(tab_test_images, dtype=np.float32)/255
|
||||
tab_files=np.array(tab_files)
|
||||
|
||||
for id in range(len(tab_test_images)):
|
||||
mask=np.zeros((584, 565, 1), dtype=np.float32)
|
||||
prediction=my_model.predict(np.array([tab_test_images[id]]))
|
||||
mask[:576, :560]=prediction[0]*255
|
||||
cv2.imwrite("./predictions/"+str(tab_files[id])+".png", mask)
|
||||
|
||||
30
Tensorflow/concours_drive_2/traitement_images.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def change_gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def color(image, alpha=20):
|
||||
n=[random.randint(-alpha, alpha), random.randint(-alpha, alpha),random.randint(-alpha, alpha)]
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def random_change(image):
|
||||
if np.random.randint(2):
|
||||
img=change_gamma(image, random.uniform(0.8, 1.2), np.random.randint(100)-50)
|
||||
if np.random.randint(2):
|
||||
img=bruit(image)
|
||||
if np.random.randint(2):
|
||||
img=color(image)
|
||||
return image
|
||||
4
Tensorflow/concours_foetus/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Concours HC18: Mesurer un foetus à partir d'image d'échographie
|
||||
|
||||
La vidéo du tutoriel se trouve à l'adresse suivante:
|
||||
https://www.youtube.com/watch?v=BCOLI8CTF00
|
||||
104
Tensorflow/concours_foetus/common.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import csv
|
||||
import random
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import csv
|
||||
import random
|
||||
import config
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
def complete_dataset(image, image_ellipse, tab_images, tab_labels):
|
||||
contours, hierarchy=cv2.findContours(image_ellipse[:, :, 0], cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
|
||||
if len(contours) is not 2:
|
||||
return 1
|
||||
else:
|
||||
if len(contours[0])<6 or len(contours[1])<6:
|
||||
return 1
|
||||
(x1, y1), (ma1, MA1), a1=cv2.fitEllipse(contours[0])
|
||||
(x2, y2), (ma2, MA2), a2=cv2.fitEllipse(contours[1])
|
||||
x=(x1+x2)/2
|
||||
y=(y1+y2)/2
|
||||
ma=(ma1+ma2)/2
|
||||
MA=(MA1+MA2)/2
|
||||
a=(a1+a2)/2
|
||||
tab_images.append(image[:, :, 0])
|
||||
tab_labels.append([x/config.norm, y/config.norm, MA/config.norm, ma/config.norm, a/180])
|
||||
return 0
|
||||
|
||||
def prepare_data(fichier):
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
next(lignes)
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
nbr=0
|
||||
for ligne in lignes:
|
||||
image_orig=cv2.imread(config.dir_images+ligne[0])
|
||||
if image_orig is None:
|
||||
print("Fichier absent", config.dir_images+ligne[0])
|
||||
continue
|
||||
|
||||
f_ellipse=ligne[0].split('.')[0]+"_Annotation.png"
|
||||
image_ellipse_orig=cv2.imread(config.dir_images+f_ellipse)
|
||||
if image_ellipse_orig is None:
|
||||
print("Fichier absent", config.dir_images+f_ellipse)
|
||||
continue
|
||||
|
||||
for angle in range(0, 360, 30):
|
||||
|
||||
if np.random.randint(2)==0:
|
||||
h, w, c=image_orig.shape
|
||||
H=int(h*1.4)
|
||||
W=int(w*1.4)
|
||||
h_shift=np.random.randint(H-h)
|
||||
w_shift=np.random.randint(W-w)
|
||||
|
||||
i=np.zeros(shape=(H, W, c), dtype=np.uint8)
|
||||
i[h_shift:h_shift+h, w_shift:w_shift+w, :]=image_orig
|
||||
image_orig2=i
|
||||
|
||||
i=np.zeros(shape=(H, W, c), dtype=np.uint8)
|
||||
i[h_shift:h_shift+h, w_shift:w_shift+w, :]=image_ellipse_orig
|
||||
image_ellipse_orig2=i
|
||||
else:
|
||||
image_orig2=image_orig
|
||||
image_ellipse_orig2=image_ellipse_orig
|
||||
|
||||
image=cv2.resize(image_orig2, (config.largeur, config.hauteur), interpolation=cv2.INTER_AREA)
|
||||
image_ellipse=cv2.resize(image_ellipse_orig2, (config.largeur, config.hauteur), interpolation=cv2.INTER_AREA)
|
||||
img_r=rotateImage(image, angle)
|
||||
|
||||
#if np.random.randint(3)==0:
|
||||
# kernel_blur=np.random.randint(2)*2+1
|
||||
# img_r=cv2.GaussianBlur(img_r, (kernel_blur, kernel_blur), 0)
|
||||
|
||||
bruit=np.random.randn(config.hauteur, config.largeur, 3)*random.randint(1, 50)
|
||||
img_r=np.clip(img_r+bruit, 0, 255).astype(np.uint8)
|
||||
|
||||
img_ellipse=rotateImage(image_ellipse, angle)
|
||||
nbr+=complete_dataset(img_r, img_ellipse, tab_images, tab_labels)
|
||||
|
||||
img_f=cv2.flip(img_r, 0)
|
||||
img_ellipse_f=cv2.flip(img_ellipse, 0)
|
||||
nbr+=complete_dataset(img_f, img_ellipse_f, tab_images, tab_labels)
|
||||
|
||||
img_f=cv2.flip(img_r, 1)
|
||||
img_ellipse_f=cv2.flip(img_ellipse, 1)
|
||||
nbr+=complete_dataset(img_f, img_ellipse_f, tab_images, tab_labels)
|
||||
|
||||
img_f=cv2.flip(img_r, -1)
|
||||
img_ellipse_f=cv2.flip(img_ellipse, -1)
|
||||
nbr+=complete_dataset(img_f, img_ellipse_f, tab_images, tab_labels)
|
||||
|
||||
print("Image(s) rejetée(s):", nbr)
|
||||
print("Nombre d'images:", len(tab_images))
|
||||
return tab_images, tab_labels
|
||||
|
||||
14
Tensorflow/concours_foetus/config.py
Normal file
@@ -0,0 +1,14 @@
|
||||
largeur=200
|
||||
hauteur=135
|
||||
|
||||
#largeur=220
|
||||
#hauteur=148
|
||||
|
||||
|
||||
norm=max(largeur, hauteur)
|
||||
|
||||
batch_size=64
|
||||
input_model=8
|
||||
|
||||
dir_images="training_set/"
|
||||
dir_images_test="test_set/"
|
||||
17
Tensorflow/concours_foetus/genere_csv.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import cv2
|
||||
import glob
|
||||
import config
|
||||
|
||||
for image in glob.glob(config.dir_images+'*_HC.png'):
|
||||
image_ellipse=image.split('.')[0]+"_Annotation.png"
|
||||
img=cv2.imread(image_ellipse)
|
||||
img=cv2.resize(img, (config.largeur, config.hauteur))
|
||||
print(img.shape)
|
||||
h, w, c=img.shape
|
||||
img=img[:, :, 0]
|
||||
contours, hierarchy=cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
for cont in contours:
|
||||
(x, y),(ma, MA) ,angle = cv2.fitEllipse(cont)
|
||||
print("{}:{:f}:{:f}:{:f}:{:f}:{:f}".format(image, x/w, y/h, ma/w, MA/h, angle/180))
|
||||
29
Tensorflow/concours_foetus/images.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels=common.prepare_data('training_set.csv')
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index].reshape(-1, config.hauteur, config.largeur, 1)
|
||||
labels=labels[index]
|
||||
|
||||
print("Nombre d'image:", len(images))
|
||||
|
||||
for i in range(len(images)):
|
||||
x, y, grand_axe, petit_axe, angle=labels[i]
|
||||
print("Label:", labels[i], angle*180)
|
||||
img_couleur=np.tile(images[i], (1, 1, 3))
|
||||
cv2.ellipse(img_couleur, (int(x*config.norm), int(y*config.norm)), (int(petit_axe*config.norm/2), int(grand_axe*config.norm/2)), angle*180, 0., 360., (0, 0, 255), 2)
|
||||
cv2.imshow("Image", img_couleur)
|
||||
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
|
||||
87
Tensorflow/concours_foetus/inference.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
import csv
|
||||
|
||||
model=model.model(config.input_model)
|
||||
|
||||
rouge=(0, 0, 255)
|
||||
vert=(0, 255, 0)
|
||||
|
||||
if True:
|
||||
dir=config.dir_images
|
||||
fichier="training_set.csv"
|
||||
test=False
|
||||
else:
|
||||
dir=config.dir_images_test
|
||||
fichier="test_set.csv"
|
||||
test=True
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
for ligne in lignes:
|
||||
print("LIGNE:", ligne)
|
||||
print("XXX", dir+ligne[0])
|
||||
img_originale=cv2.imread(dir+ligne[0])
|
||||
if img_originale is None:
|
||||
continue
|
||||
print("WWW", ligne[0], dir+ligne[0], img_originale.shape)
|
||||
H, W, C=img_originale.shape
|
||||
mm_pixel=float(ligne[1])
|
||||
img=cv2.resize(img_originale, (config.largeur, config.hauteur))
|
||||
img2=img.copy()
|
||||
img=np.array(img, dtype=np.float32)/255
|
||||
img=np.expand_dims(img[:, :, 0], axis=-1)
|
||||
predictions=model(np.array([img]))
|
||||
x, y, grand_axe, petit_axe, angle=predictions[0]
|
||||
cv2.ellipse(img2, (x*config.norm, y*config.norm), (petit_axe*config.norm/2, grand_axe*config.norm/2), angle*180, 0., 360., rouge, 2)
|
||||
print("Prediction", np.array(predictions[0]))
|
||||
|
||||
if test is False:
|
||||
f_ellipse=ligne[0].split('.')[0]+"_Annotation.png"
|
||||
image_ellipse=cv2.imread(dir+f_ellipse)
|
||||
if image_ellipse is None:
|
||||
print("Fichier absent", dir+f_ellipse)
|
||||
continue
|
||||
img_ellipse_f_=image_ellipse[:, :, 0]
|
||||
contours, hierarchy=cv2.findContours(img_ellipse_f_, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
(x_, y_), (ma_, MA_), a_=cv2.fitEllipse(contours[0])
|
||||
cv2.ellipse(img_originale, (int(x_), int(y_)), (int(ma_/2), int(MA_/2)), a_, 0., 360., vert, 3)
|
||||
|
||||
cv2.ellipse(img_originale, (x*W, y*W), (petit_axe*W/2, grand_axe*W/2), angle*180, 0., 360., rouge, 2)
|
||||
|
||||
x=float(x*W*mm_pixel)
|
||||
y=float(y*W*mm_pixel)
|
||||
axis_x=float(grand_axe*W*mm_pixel/2)
|
||||
axis_y=float(petit_axe*W*mm_pixel/2)
|
||||
|
||||
r=180.
|
||||
r_2=r/2
|
||||
if angle>=0.5:
|
||||
angle=angle*r-r_2
|
||||
else:
|
||||
angle=angle*r+r_2
|
||||
|
||||
HC=np.pi*np.sqrt(2*(axis_x**2+axis_y**2))
|
||||
|
||||
cv2.putText(img_originale, "HC: {:5.2f}mm".format(HC), (20, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
|
||||
print("{},{:f},{:f},{:f},{:f},{:f} HC: {:f}mm".format(ligne[0], x, y, axis_x, axis_y, angle, HC))
|
||||
if len(ligne)==3:
|
||||
cv2.putText(img_originale, "HC: {:5.2f}mm".format(float(ligne[2])), (20, 60), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 2)
|
||||
print("HC: {}mm prediction: {:f}mm".format(ligne[2], HC))
|
||||
|
||||
cv2.imshow("Image originale", img_originale)
|
||||
cv2.imshow("Inference", img2)
|
||||
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
92
Tensorflow/concours_foetus/model.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import config
|
||||
|
||||
def block_resnet(input, filters, kernel_size, reduce, dropout=0.):
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME', activation='relu')(input)
|
||||
if dropout is not 0.:
|
||||
result=layers.Dropout(dropout)(result)
|
||||
if reduce is True:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=2, padding='SAME')(result)
|
||||
else:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(result)
|
||||
|
||||
if input.shape[-1]==filters:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=input
|
||||
else:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=1, padding='SAME')(input)
|
||||
result=layers.add([result, shortcut])
|
||||
if dropout is not 0.:
|
||||
result=layers.Dropout(dropout)(result)
|
||||
result=layers.Activation('relu')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
return result
|
||||
|
||||
def model(nbr):
|
||||
entree=layers.Input(shape=(config.largeur, config.hauteur, 1), dtype='float32')
|
||||
|
||||
result=block_resnet(entree, 2*nbr, 3, False, 0.3)
|
||||
result=block_resnet(result, 2*nbr, 3, False, 0.3)
|
||||
result=block_resnet(result, 2*nbr, 3, False, 0.3)
|
||||
result=block_resnet(result, 2*nbr, 3, True, 0.3)
|
||||
|
||||
result=block_resnet(result, 4*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 4*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 4*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 4*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 4*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 4*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 4*nbr, 3, True, 0.4)
|
||||
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, False, 0.4)
|
||||
result=block_resnet(result, 8*nbr, 3, True, 0.4)
|
||||
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
result=block_resnet(result, 16*nbr, 3, False, 0.5)
|
||||
|
||||
result=layers.AveragePooling2D()(result)
|
||||
result=layers.Flatten()(result)
|
||||
sortie=layers.Dense(5, activation='sigmoid')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
45
Tensorflow/concours_foetus/result.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
import csv
|
||||
|
||||
model=model.model(config.input_model)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
print("filename,center_x_mm,center_y_mm,semi_axes_a_mm,semi_axes_b_mm,angle_rad")
|
||||
with open("test_set.csv", newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
for ligne in lignes:
|
||||
img=cv2.imread(config.dir_images_test+ligne[0])
|
||||
if img is None:
|
||||
continue
|
||||
mm_pixel=float(ligne[1])
|
||||
H, W, C=img.shape
|
||||
img=cv2.resize(img, (config.largeur, config.hauteur))
|
||||
img=np.array(img, dtype=np.float32)/255
|
||||
img=np.expand_dims(img[:, :, 0], axis=-1)
|
||||
predictions=model(np.array([img]))
|
||||
x, y, grand_axe, petit_axe, angle=predictions[0]
|
||||
|
||||
x=float(x*W*mm_pixel)
|
||||
y=float(y*W*mm_pixel)
|
||||
axis_x=float(grand_axe*W*mm_pixel/2)
|
||||
axis_y=float(petit_axe*W*mm_pixel/2)
|
||||
|
||||
r=np.pi
|
||||
r_2=r/2
|
||||
if angle>=0.5:
|
||||
angle=angle*r-r_2
|
||||
else:
|
||||
angle=angle*r+r_2
|
||||
|
||||
print("{},{:f},{:f},{:f},{:f},{:f}".format(ligne[0], x, y, axis_x, axis_y, angle))
|
||||
|
||||
75
Tensorflow/concours_foetus/train.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels=common.prepare_data('training_set.csv')
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index].reshape(-1, config.hauteur, config.largeur, 1)
|
||||
labels=labels[index]
|
||||
|
||||
print("Nbr images:", len(images))
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((images, labels)).batch(config.batch_size)
|
||||
|
||||
del images
|
||||
del labels
|
||||
|
||||
def my_loss(labels, preds):
|
||||
lambda_xy=5
|
||||
lambda_Aa=5
|
||||
lambda_angle=1
|
||||
|
||||
preds_xy=preds[:, 0:2]
|
||||
preds_Aa=preds[:, 2:4]
|
||||
preds_angle=preds[:, 4]
|
||||
|
||||
labels_xy=labels[:, 0:2]
|
||||
labels_Aa=labels[:, 2:4]
|
||||
labels_angle=labels[:, 4]
|
||||
|
||||
loss_xy=tf.reduce_sum(tf.math.square(preds_xy-labels_xy), axis=-1)
|
||||
loss_Aa=tf.reduce_sum(tf.math.square(preds_Aa-labels_Aa), axis=-1)
|
||||
loss_angle=tf.math.square(preds_angle-labels_angle)
|
||||
|
||||
loss=lambda_xy*loss_xy+lambda_Aa*loss_Aa+lambda_angle*loss_angle
|
||||
return loss
|
||||
|
||||
model=model.model(config.input_model)
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
time.time()-start))
|
||||
if not entrainement%10:
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
train(train_ds, 60)
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
84
Tensorflow/tutoriel1/MNIST.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plot
|
||||
import cv2
|
||||
|
||||
nbr_ni=100
|
||||
learning_rate=0.0001
|
||||
taille_batch=100
|
||||
nbr_entrainement=200
|
||||
|
||||
mnist_train_images=np.fromfile("mnist/train-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 784)/255
|
||||
mnist_train_labels=np.eye(10)[np.fromfile("mnist/train-labels-idx1-ubyte", dtype=np.uint8)[8:]]
|
||||
mnist_test_images=np.fromfile("mnist/t10k-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 784)/255
|
||||
mnist_test_labels=np.eye(10)[np.fromfile("mnist/t10k-labels-idx1-ubyte", dtype=np.uint8)[8:]]
|
||||
|
||||
ph_images=tf.placeholder(shape=(None, 784), dtype=tf.float32)
|
||||
ph_labels=tf.placeholder(shape=(None, 10), dtype=tf.float32)
|
||||
|
||||
wci=tf.Variable(tf.truncated_normal(shape=(784, nbr_ni)), dtype=tf.float32)
|
||||
bci=tf.Variable(np.zeros(shape=(nbr_ni)), dtype=tf.float32)
|
||||
sci=tf.matmul(ph_images, wci)+bci
|
||||
sci=tf.nn.sigmoid(sci)
|
||||
|
||||
wcs=tf.Variable(tf.truncated_normal(shape=(nbr_ni, 10)), dtype=tf.float32)
|
||||
bcs=tf.Variable(np.zeros(shape=(10)), dtype=tf.float32)
|
||||
scs=tf.matmul(sci, wcs)+bcs
|
||||
scso=tf.nn.softmax(scs)
|
||||
|
||||
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=scs)
|
||||
train=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(scso, 1), tf.argmax(ph_labels, 1)), dtype=tf.float32))
|
||||
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
|
||||
tab_acc_train=[]
|
||||
tab_acc_test=[]
|
||||
for id_entrainement in range(nbr_entrainement):
|
||||
print("ID entrainement", id_entrainement)
|
||||
for batch in range(0, len(mnist_train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
ph_images: mnist_train_images[batch:batch+taille_batch],
|
||||
ph_labels: mnist_train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
|
||||
tab_acc=[]
|
||||
for batch in range(0, len(mnist_train_images), taille_batch):
|
||||
acc=s.run(accuracy, feed_dict={
|
||||
ph_images: mnist_train_images[batch:batch+taille_batch],
|
||||
ph_labels: mnist_train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_acc.append(acc)
|
||||
print("accuracy train:", np.mean(tab_acc))
|
||||
tab_acc_train.append(1-np.mean(tab_acc))
|
||||
|
||||
tab_acc=[]
|
||||
for batch in range(0, len(mnist_test_images), taille_batch):
|
||||
acc=s.run(accuracy, feed_dict={
|
||||
ph_images: mnist_test_images[batch:batch+taille_batch],
|
||||
ph_labels: mnist_test_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_acc.append(acc)
|
||||
print("accuracy test :", np.mean(tab_acc))
|
||||
tab_acc_test.append(1-np.mean(tab_acc))
|
||||
|
||||
plot.ylim(0, 1)
|
||||
plot.grid()
|
||||
plot.plot(tab_acc_train, label="Train error")
|
||||
plot.plot(tab_acc_test, label="Test error")
|
||||
plot.legend(loc="upper right")
|
||||
plot.show()
|
||||
|
||||
resulat=s.run(scso, feed_dict={ph_images: mnist_test_images[0:taille_batch]})
|
||||
np.set_printoptions(formatter={'float': '{:0.3f}'.format})
|
||||
for image in range(taille_batch):
|
||||
print("image", image)
|
||||
print("sortie du réseau:", resulat[image], np.argmax(resulat[image]))
|
||||
print("sortie attendue :", mnist_test_labels[image], np.argmax(mnist_test_labels[image]))
|
||||
cv2.imshow('image', mnist_test_images[image].reshape(28, 28))
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
|
||||
|
||||
26
Tensorflow/tutoriel1/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réalisation d'un perceptron multicouche et utilisation sur la base MNIST
|
||||
|
||||
La vidéo du tutoriel se trouve à l'adresse suivante:
|
||||
https://www.youtube.com/watch?v=WeotsGN_138
|
||||
|
||||
Si vous souhaitez me soutenir: <https://fr.tipeee.com/l42-project>
|
||||
|
||||
Le code de cette vidéo est écrit pour la version 1.X de tensorflow (je recommande la version 1.13.1), pour l'installer, il suffit de taper la commande suivante :
|
||||
|
||||
`# pip install tensorflow==1.13.1`
|
||||
|
||||
ou la version GPU:
|
||||
|
||||
`# pip install tensorflow-gpu==1.13.1`
|
||||
|
||||
Pour utiliser ce programme, vous devez récuperer les fichiers MNIST sur le site suivant:
|
||||
http://yann.lecun.com/exdb/mnist/
|
||||
et les placer dans le repertoire ./mnist
|
||||
|
||||
La courbe d'erreur après 200 cycles d'apprentissage est la suivante :
|
||||
|
||||

|
||||
|
||||
Cet apprentissage prend environ 6 minutes sur une GeForce 1080
|
||||
|
||||
BIN
Tensorflow/tutoriel1/graph_error.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
200
Tensorflow/tutoriel1/log_error
Normal file
@@ -0,0 +1,200 @@
|
||||
0:0.313583:0.327500
|
||||
1:0.480800:0.493700
|
||||
2:0.572250:0.584200
|
||||
3:0.630350:0.638400
|
||||
4:0.668667:0.674600
|
||||
5:0.695733:0.702900
|
||||
6:0.716550:0.722100
|
||||
7:0.733633:0.738100
|
||||
8:0.748117:0.752700
|
||||
9:0.759833:0.763700
|
||||
10:0.770050:0.772800
|
||||
11:0.778600:0.780700
|
||||
12:0.786300:0.788300
|
||||
13:0.792750:0.793800
|
||||
14:0.798500:0.799500
|
||||
15:0.804867:0.804600
|
||||
16:0.809367:0.809100
|
||||
17:0.813883:0.813600
|
||||
18:0.817800:0.818300
|
||||
19:0.822300:0.822200
|
||||
20:0.825900:0.825000
|
||||
21:0.829000:0.828400
|
||||
22:0.831950:0.831300
|
||||
23:0.834917:0.833700
|
||||
24:0.837567:0.836100
|
||||
25:0.840133:0.838400
|
||||
26:0.842733:0.841300
|
||||
27:0.845067:0.843300
|
||||
28:0.847617:0.845100
|
||||
29:0.849667:0.846300
|
||||
30:0.851567:0.847900
|
||||
31:0.853717:0.849800
|
||||
32:0.855650:0.851900
|
||||
33:0.857483:0.853200
|
||||
34:0.859050:0.854900
|
||||
35:0.860417:0.857600
|
||||
36:0.861533:0.859400
|
||||
37:0.863217:0.860500
|
||||
38:0.864533:0.861600
|
||||
39:0.865900:0.862700
|
||||
40:0.866917:0.863900
|
||||
41:0.868117:0.865300
|
||||
42:0.869217:0.866400
|
||||
43:0.870283:0.867400
|
||||
44:0.871050:0.869200
|
||||
45:0.871983:0.870500
|
||||
46:0.873233:0.871300
|
||||
47:0.874017:0.872400
|
||||
48:0.875033:0.873900
|
||||
49:0.875883:0.875300
|
||||
50:0.876883:0.876500
|
||||
51:0.877767:0.877300
|
||||
52:0.878433:0.878200
|
||||
53:0.879583:0.878700
|
||||
54:0.880483:0.879500
|
||||
55:0.881300:0.880700
|
||||
56:0.881933:0.881500
|
||||
57:0.882600:0.882200
|
||||
58:0.883183:0.882500
|
||||
59:0.883850:0.883100
|
||||
60:0.884350:0.883800
|
||||
61:0.885217:0.884500
|
||||
62:0.886083:0.885600
|
||||
63:0.886817:0.885900
|
||||
64:0.887500:0.886200
|
||||
65:0.888150:0.886800
|
||||
66:0.888867:0.887900
|
||||
67:0.889467:0.888400
|
||||
68:0.890200:0.888600
|
||||
69:0.890583:0.889000
|
||||
70:0.891083:0.889500
|
||||
71:0.891717:0.890000
|
||||
72:0.892183:0.890400
|
||||
73:0.892650:0.890700
|
||||
74:0.893250:0.891200
|
||||
75:0.893700:0.891600
|
||||
76:0.894500:0.892000
|
||||
77:0.894967:0.892800
|
||||
78:0.895450:0.893000
|
||||
79:0.895933:0.893300
|
||||
80:0.896500:0.893300
|
||||
81:0.897150:0.893900
|
||||
82:0.897550:0.894000
|
||||
83:0.898050:0.894900
|
||||
84:0.898467:0.895700
|
||||
85:0.899050:0.896100
|
||||
86:0.899533:0.896400
|
||||
87:0.900033:0.896800
|
||||
88:0.900483:0.897100
|
||||
89:0.900850:0.897500
|
||||
90:0.901283:0.897700
|
||||
91:0.901600:0.898000
|
||||
92:0.901967:0.898400
|
||||
93:0.902383:0.898600
|
||||
94:0.902583:0.899000
|
||||
95:0.903083:0.899600
|
||||
96:0.903400:0.900000
|
||||
97:0.903717:0.900300
|
||||
98:0.904017:0.900700
|
||||
99:0.904483:0.901100
|
||||
100:0.904783:0.901300
|
||||
101:0.905067:0.901500
|
||||
102:0.905400:0.901500
|
||||
103:0.905733:0.902000
|
||||
104:0.906133:0.902400
|
||||
105:0.906433:0.903100
|
||||
106:0.906817:0.903600
|
||||
107:0.906900:0.903700
|
||||
108:0.907250:0.904000
|
||||
109:0.907533:0.904300
|
||||
110:0.907817:0.904100
|
||||
111:0.907967:0.904100
|
||||
112:0.908217:0.904600
|
||||
113:0.908500:0.904700
|
||||
114:0.908800:0.904700
|
||||
115:0.909117:0.904900
|
||||
116:0.909250:0.905100
|
||||
117:0.909683:0.905400
|
||||
118:0.909850:0.905600
|
||||
119:0.910167:0.905700
|
||||
120:0.910417:0.906000
|
||||
121:0.910683:0.906100
|
||||
122:0.911000:0.906600
|
||||
123:0.911333:0.907000
|
||||
124:0.911717:0.907100
|
||||
125:0.912067:0.907500
|
||||
126:0.912250:0.907700
|
||||
127:0.912467:0.907600
|
||||
128:0.912700:0.907700
|
||||
129:0.912933:0.907900
|
||||
130:0.913133:0.908200
|
||||
131:0.913450:0.908300
|
||||
132:0.913783:0.908500
|
||||
133:0.914067:0.908500
|
||||
134:0.914300:0.908700
|
||||
135:0.914517:0.908700
|
||||
136:0.914717:0.909100
|
||||
137:0.914900:0.909500
|
||||
138:0.915067:0.909800
|
||||
139:0.915167:0.910200
|
||||
140:0.915333:0.910400
|
||||
141:0.915533:0.910500
|
||||
142:0.915800:0.910500
|
||||
143:0.916017:0.910400
|
||||
144:0.916217:0.910600
|
||||
145:0.916517:0.910900
|
||||
146:0.916700:0.911100
|
||||
147:0.916783:0.911300
|
||||
148:0.917050:0.911500
|
||||
149:0.917350:0.911500
|
||||
150:0.917583:0.911600
|
||||
151:0.917917:0.911600
|
||||
152:0.918100:0.911700
|
||||
153:0.918333:0.911800
|
||||
154:0.918567:0.911800
|
||||
155:0.918733:0.911700
|
||||
156:0.918867:0.911700
|
||||
157:0.919100:0.911800
|
||||
158:0.919233:0.912200
|
||||
159:0.919567:0.912300
|
||||
160:0.919783:0.912700
|
||||
161:0.920100:0.912900
|
||||
162:0.920350:0.913100
|
||||
163:0.920517:0.912900
|
||||
164:0.920750:0.913000
|
||||
165:0.920983:0.913000
|
||||
166:0.921250:0.913100
|
||||
167:0.921300:0.913200
|
||||
168:0.921400:0.913100
|
||||
169:0.921567:0.913100
|
||||
170:0.921700:0.913400
|
||||
171:0.921883:0.913500
|
||||
172:0.922067:0.913800
|
||||
173:0.922167:0.913900
|
||||
174:0.922400:0.914200
|
||||
175:0.922650:0.914500
|
||||
176:0.922817:0.914600
|
||||
177:0.922933:0.914900
|
||||
178:0.923067:0.914900
|
||||
179:0.923317:0.915200
|
||||
180:0.923550:0.915300
|
||||
181:0.923733:0.915500
|
||||
182:0.923933:0.915600
|
||||
183:0.924133:0.915800
|
||||
184:0.924367:0.915900
|
||||
185:0.924617:0.916100
|
||||
186:0.924900:0.916100
|
||||
187:0.925067:0.916000
|
||||
188:0.925367:0.916000
|
||||
189:0.925550:0.916400
|
||||
190:0.925683:0.916600
|
||||
191:0.925750:0.916900
|
||||
192:0.925917:0.917200
|
||||
193:0.926133:0.917500
|
||||
194:0.926233:0.917300
|
||||
195:0.926583:0.917400
|
||||
196:0.926850:0.917400
|
||||
197:0.926967:0.917700
|
||||
198:0.927167:0.917900
|
||||
199:0.927317:0.918000
|
||||
98
Tensorflow/tutoriel10/CIFAR_10_vgg.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plot
|
||||
import cv2
|
||||
import vgg
|
||||
from sklearn.utils import shuffle
|
||||
|
||||
def read_cifar_file(file, images, labels):
|
||||
shift=0
|
||||
f=np.fromfile(file, dtype=np.uint8)
|
||||
while shift!=f.shape[0]:
|
||||
labels.append(np.eye(10)[f[shift]])
|
||||
shift+=1
|
||||
images.append(f[shift:shift+3*32*32].reshape(3, 32, 32).transpose(1, 2, 0)/255)
|
||||
shift+=3*32*32
|
||||
|
||||
taille_batch=100
|
||||
nbr_entrainement=50
|
||||
labels=['avion', 'automobile', 'oiseau', 'chat', 'cerf', 'chien', 'grenouille', 'cheval', 'bateau', 'camion']
|
||||
|
||||
train_images=[]
|
||||
train_labels=[]
|
||||
read_cifar_file("cifar-10-batches-bin/data_batch_1.bin", train_images, train_labels)
|
||||
read_cifar_file("cifar-10-batches-bin/data_batch_2.bin", train_images, train_labels)
|
||||
read_cifar_file("cifar-10-batches-bin/data_batch_3.bin", train_images, train_labels)
|
||||
read_cifar_file("cifar-10-batches-bin/data_batch_4.bin", train_images, train_labels)
|
||||
read_cifar_file("cifar-10-batches-bin/data_batch_5.bin", train_images, train_labels)
|
||||
|
||||
test_images=[]
|
||||
test_labels=[]
|
||||
read_cifar_file("cifar-10-batches-bin/test_batch.bin", test_images, test_labels)
|
||||
|
||||
images, labels, is_training, sortie, train, accuracy, save=vgg.vggnet(nbr_classes=10, learning_rate=0.01)
|
||||
|
||||
def transform_img(img):
|
||||
img=tf.image.random_flip_left_right(img)
|
||||
img=tf.image.random_hue(img, 0.08)
|
||||
img=tf.image.random_saturation(img, 0.6, 1.6)
|
||||
img=tf.image.random_brightness(img, 0.05)
|
||||
img=tf.image.random_contrast(img, 0.7, 1.3)
|
||||
x=int(img.shape[0])
|
||||
y=int(img.shape[1])
|
||||
z=int(img.shape[2])
|
||||
img=tf.image.random_crop(img, [int(x*0.90), int(y*0.90), z])
|
||||
img=tf.image.resize_images(img, (x, y))
|
||||
return(img)
|
||||
|
||||
fichier=open("log", "a")
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
|
||||
train_images=np.array(train_images, dtype=np.float32)
|
||||
train_images2=tf.map_fn(transform_img, train_images)
|
||||
train_images3=tf.map_fn(transform_img, train_images)
|
||||
train_images4=tf.map_fn(transform_img, train_images)
|
||||
train_images=tf.concat([train_images, train_images2, train_images3, train_images4], axis=0)
|
||||
train_labels=np.array(train_labels)
|
||||
train_labels=tf.concat([train_labels, train_labels, train_labels, train_labels], axis=0)
|
||||
|
||||
train_images=s.run(train_images)
|
||||
train_labels=s.run(train_labels)
|
||||
train_images, train_labels=shuffle(train_images, train_labels)
|
||||
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
print("> Entrainement", id_entrainement)
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
images: train_images[batch:batch+taille_batch],
|
||||
labels: train_labels[batch:batch+taille_batch],
|
||||
is_training: True
|
||||
})
|
||||
print(" entrainement OK")
|
||||
tab_accuracy_train=[]
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
images: train_images[batch:batch+taille_batch],
|
||||
labels: train_labels[batch:batch+taille_batch],
|
||||
is_training: False
|
||||
})
|
||||
tab_accuracy_train.append(p)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(test_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
images: test_images[batch:batch+taille_batch],
|
||||
labels: test_labels[batch:batch+taille_batch],
|
||||
is_training: False
|
||||
})
|
||||
tab_accuracy_test.append(p)
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
fichier.write("{:d}:{:f}:{:f}\n".format(id_entrainement, np.mean(tab_accuracy_train), np.mean(tab_accuracy_test)))
|
||||
fichier.close()
|
||||
|
||||
|
||||
BIN
Tensorflow/tutoriel10/Figure_1.png
Normal file
|
After Width: | Height: | Size: 84 KiB |
15
Tensorflow/tutoriel10/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Tutoriel tensorflow
|
||||
## Surapprentissage: complétion du dataset
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=rLSnx0LiObo
|
||||
|
||||
## CIFAR10
|
||||
|
||||
N'oubliez pas de récuperer la base cifar10 (binary version) à l'adresse suivante:
|
||||
https://www.cs.toronto.edu/~kriz/cifar.html
|
||||
|
||||
### Courbes d'erreur sur la base de validation:
|
||||

|
||||
|
||||
L'apprentissage prend 2h50 sur une GeForce 1080
|
||||
|
||||
102
Tensorflow/tutoriel10/vgg.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
def convolution(couche_prec, taille_noyau, nbr_noyau):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(couche_prec.get_shape()[-1]), nbr_noyau)))
|
||||
b=np.zeros(nbr_noyau)
|
||||
result=tf.nn.conv2d(couche_prec, w, strides=[1, 1, 1, 1], padding='SAME')+b
|
||||
return result
|
||||
|
||||
def fc(couche_prec, nbr_neurone):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(int(couche_prec.get_shape()[-1]), nbr_neurone), dtype=tf.float32))
|
||||
b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)
|
||||
result=tf.matmul(couche_prec, w)+b
|
||||
return result
|
||||
|
||||
def vggnet(nbr_classes, learning_rate=1E-3, momentum=0.99):
|
||||
ph_images=tf.placeholder(shape=(None, 32, 32, 3), dtype=tf.float32, name='images')
|
||||
ph_labels=tf.placeholder(shape=(None, nbr_classes), dtype=tf.float32)
|
||||
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
|
||||
|
||||
result=convolution(ph_images, 3, 64)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.layers.dropout(result, 0.2, training=ph_is_training)
|
||||
result=tf.nn.relu(result)
|
||||
result=convolution(result, 3, 64)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.2, training=ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 128)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.2, training=ph_is_training)
|
||||
result=convolution(result, 3, 128)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.3, training=ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 256)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.3, training=ph_is_training)
|
||||
result=convolution(result, 3, 256)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.3, training=ph_is_training)
|
||||
result=convolution(result, 3, 256)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.3, training=ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 512)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.3, training=ph_is_training)
|
||||
result=convolution(result, 3, 512)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.4, training=ph_is_training)
|
||||
result=convolution(result, 3, 512)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.4, training=ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 512)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.4, training=ph_is_training)
|
||||
result=convolution(result, 3, 512)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.4, training=ph_is_training)
|
||||
result=convolution(result, 3, 512)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.nn.relu(result)
|
||||
result=tf.layers.dropout(result, 0.5, training=ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result=tf.contrib.layers.flatten(result)
|
||||
|
||||
result=fc(result, 1024)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.layers.dropout(result, 0.5, training=ph_is_training)
|
||||
result=tf.nn.relu(result)
|
||||
result=fc(result, 1024)
|
||||
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
|
||||
result=tf.layers.dropout(result, 0.5, training=ph_is_training)
|
||||
result=tf.nn.relu(result)
|
||||
result=fc(result, nbr_classes)
|
||||
socs=tf.nn.softmax(result, name="sortie")
|
||||
|
||||
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)
|
||||
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(extra_update_ops):
|
||||
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(socs, 1), tf.argmax(ph_labels, 1)), tf.float32))
|
||||
|
||||
return ph_images, ph_labels, ph_is_training, socs, train, accuracy, tf.train.Saver()
|
||||
|
||||
11
Tensorflow/tutoriel15/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau GoogleNet (inception v1)
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=b4vv_vLVyho
|
||||
|
||||
## STL10
|
||||
|
||||
N'oubliez pas de récupérer la base stl10 (binary version) à l'adresse suivante:
|
||||
https://cs.stanford.edu/~acoates/stl10/
|
||||
|
||||
|
||||
55
Tensorflow/tutoriel15/STL_10_inception.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import random
|
||||
from sklearn.utils import shuffle
|
||||
import common
|
||||
|
||||
taille_batch=100
|
||||
nbr_entrainement=400
|
||||
learning_rate=1E-3
|
||||
|
||||
labels, train_images, train_labels, test_images, test_labels=common.stl10("stl10_binary")
|
||||
train_images=train_images/255
|
||||
test_images=test_images/255
|
||||
|
||||
ph_images, ph_labels, ph_is_training, ph_learning_rate, socs, train, accuracy, saver=common.inception_v1(10)
|
||||
|
||||
fichier=open("log", "a")
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
print("> Entrainement", id_entrainement)
|
||||
if not id_entrainement%10:
|
||||
learning_rate*=0.99
|
||||
print("lr:", learning_rate)
|
||||
train_images, train_labels=shuffle(train_images, train_labels)
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_labels: train_labels[batch:batch+taille_batch],
|
||||
ph_learning_rate: learning_rate,
|
||||
ph_is_training: True
|
||||
})
|
||||
print(" entrainement OK")
|
||||
tab_accuracy_train=[]
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_labels: train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_train.append(p)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(test_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: test_images[batch:batch+taille_batch],
|
||||
ph_labels: test_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_test.append(p)
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
fichier.write("{:d}:{:f}:{:f}\n".format(id_entrainement, np.mean(tab_accuracy_train), np.mean(tab_accuracy_test)))
|
||||
fichier.close()
|
||||
105
Tensorflow/tutoriel15/common.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from sklearn.utils import shuffle
|
||||
|
||||
def stl10(path):
|
||||
labels=['avion', 'oiseau', 'voiture', 'chat', 'cerf', 'chien', 'cheval', 'singe', 'bateau', 'camion']
|
||||
train_images=np.fromfile(path+"/train_X.bin", dtype=np.uint8).reshape(-1, 3, 96, 96).transpose(0, 2, 3, 1)
|
||||
train_labels=np.eye(10)[np.fromfile(path+"/train_y.bin", dtype=np.uint8)-1]
|
||||
train_images, train_labels=shuffle(train_images, train_labels)
|
||||
test_images=np.fromfile(path+"/test_X.bin", dtype=np.uint8).reshape(-1, 3, 96, 96).transpose(0, 2, 3, 1)
|
||||
test_labels=np.eye(10)[np.fromfile(path+"/test_y.bin", dtype=np.uint8)-1]
|
||||
return labels, train_images, train_labels, test_images, test_labels
|
||||
|
||||
def couche_convolution(couche_prec, taille_noyau, nbr_noyau, stride, b_norm, f_activation, training):
|
||||
w_filtre=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(couche_prec.get_shape()[-1]), nbr_noyau)))
|
||||
b_filtre=np.zeros(nbr_noyau)
|
||||
result=tf.nn.conv2d(couche_prec, w_filtre, strides=[1, stride, stride, 1], padding='SAME')+b_filtre
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def couche_fc(couche_prec, nbr_neurone, b_norm, f_activation, training):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(int(couche_prec.get_shape()[-1]), nbr_neurone), dtype=tf.float32))
|
||||
b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)
|
||||
result=tf.matmul(couche_prec, w)+b
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def b_inception_v1(input, nbr_1, nbr_3r, nbr_3, nbr_5r, nbr_5, nbr_pool, training):
|
||||
result1=couche_convolution(input, 1, nbr_1, 1, True, tf.nn.relu, training)
|
||||
|
||||
result2=couche_convolution(input, 1, nbr_3r, 1, True, tf.nn.relu, training)
|
||||
result2=couche_convolution(result2, 3, nbr_3, 1, True, tf.nn.relu, training)
|
||||
|
||||
result3=couche_convolution(input, 1, nbr_5r, 1, True, tf.nn.relu, training)
|
||||
result3=couche_convolution(result3, 5, nbr_5, 1, True, tf.nn.relu, training)
|
||||
|
||||
result4=tf.nn.max_pool(input, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
|
||||
result4=couche_convolution(result4, 1, nbr_pool, 1, True, tf.nn.relu, training)
|
||||
|
||||
result=tf.concat([result1, result2, result3, result4], 3)
|
||||
return result
|
||||
|
||||
def aux(input, training, nbr_classes):
|
||||
result=tf.nn.avg_pool(input, ksize=[1, 5, 5, 1], strides=[1, 3, 3, 1], padding='VALID')
|
||||
result=couche_convolution(result, 1, 128, 1, True, tf.nn.relu, training)
|
||||
result=tf.contrib.layers.flatten(result)
|
||||
result=couche_fc(result, 1000, True, tf.nn.relu, training)
|
||||
result=tf.layers.dropout(result, 0.7, training=training)
|
||||
result=couche_fc(result, nbr_classes, False, None, training)
|
||||
return result
|
||||
|
||||
def inception_v1(nbr_classes):
|
||||
ph_images=tf.placeholder(shape=(None, 96, 96, 3), dtype=tf.float32)
|
||||
ph_labels=tf.placeholder(shape=(None, nbr_classes), dtype=tf.float32)
|
||||
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
|
||||
ph_learning_rate=tf.placeholder(dtype=tf.float32)
|
||||
|
||||
result=couche_convolution(ph_images, 5, 64, 2, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
#result=couche_convolution(result, 3, 64, 2, True, tf.nn.relu, ph_is_training)
|
||||
result=couche_convolution(result, 3, 192, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result=b_inception_v1(result, 64, 96, 128, 16, 32, 32, ph_is_training)
|
||||
result=b_inception_v1(result, 128, 128, 192, 32, 96, 64, ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result=b_inception_v1(result, 192, 96, 208, 16, 48, 64, ph_is_training)
|
||||
|
||||
aux1=aux(result, ph_is_training, nbr_classes)
|
||||
|
||||
result=b_inception_v1(result, 160, 112, 224, 24, 64, 64, ph_is_training)
|
||||
result=b_inception_v1(result, 128, 128, 256, 24, 64, 64, ph_is_training)
|
||||
result=b_inception_v1(result, 112, 144, 288, 32, 64, 64, ph_is_training)
|
||||
|
||||
aux2=aux(result, ph_is_training, nbr_classes)
|
||||
|
||||
result=b_inception_v1(result, 256, 160, 320, 32, 128, 128, ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result=b_inception_v1(result, 256, 160, 320, 32, 128, 128, ph_is_training)
|
||||
result=b_inception_v1(result, 384, 192, 384, 48, 128, 128, ph_is_training)
|
||||
taille=result.get_shape()[1]
|
||||
result=tf.nn.avg_pool(result, ksize=[1, taille, taille, 1], strides=[1, 1, 1, 1], padding='SAME')
|
||||
|
||||
result=tf.contrib.layers.flatten(result)
|
||||
|
||||
result=couche_fc(result, 1000, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.layers.dropout(result, 0.4, training=ph_is_training)
|
||||
result=couche_fc(result, nbr_classes, False, None, ph_is_training)
|
||||
socs=tf.nn.softmax(result)
|
||||
|
||||
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)+\
|
||||
0.3*tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=aux1)+\
|
||||
0.3*tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=aux2)
|
||||
|
||||
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(extra_update_ops):
|
||||
train=tf.train.RMSPropOptimizer(ph_learning_rate).minimize(loss)
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(socs, 1), tf.argmax(ph_labels, 1)), tf.float32))
|
||||
|
||||
return ph_images, ph_labels, ph_is_training, ph_learning_rate, socs, train, accuracy, tf.train.Saver()
|
||||
BIN
Tensorflow/tutoriel16/Figure_1.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
Tensorflow/tutoriel16/Figure_1M.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
Tensorflow/tutoriel16/Figure_2.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
Tensorflow/tutoriel16/Figure_2M.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
Tensorflow/tutoriel16/Figure_3.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
Tensorflow/tutoriel16/Figure_3M.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
30
Tensorflow/tutoriel16/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau ResNet
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=yvzY1JP0OFY
|
||||
|
||||
## STL10
|
||||
|
||||
N'oubliez pas de récupérer la base stl10 (binary version) à l'adresse suivante:
|
||||
https://cs.stanford.edu/~acoates/stl10/
|
||||
|
||||
### Courbes d'erreur avec le block "1"
|
||||

|
||||
|
||||
### Courbes d'erreur avec le block "2"
|
||||

|
||||
|
||||
### Courbes d'erreur avec le block "3"
|
||||

|
||||
|
||||
### Courbes d'erreur avec le block "1M"
|
||||

|
||||
|
||||
### Courbes d'erreur avec le block "2M"
|
||||

|
||||
|
||||
### Courbes d'erreur avec le block "3M"
|
||||

|
||||
|
||||
L'apprentissage prend plus de 6h sur une GeForce 1080
|
||||
|
||||
53
Tensorflow/tutoriel16/STL10_resnet.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import random
|
||||
from sklearn.utils import shuffle
|
||||
import common
|
||||
|
||||
taille_batch=55
|
||||
nbr_entrainement=400
|
||||
learning_rate=1E-3
|
||||
|
||||
labels, train_images, train_labels, test_images, test_labels=common.stl10("stl10_binary")
|
||||
train_images=train_images/255
|
||||
test_images=test_images/255
|
||||
|
||||
ph_images, ph_labels, ph_is_training, socs, train, accuracy, saver=common.resnet(10, common.b_resnet_3M, learning_rate)
|
||||
|
||||
fichier=open("log", "a")
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
print("> Entrainement", id_entrainement)
|
||||
train_images, train_labels=shuffle(train_images, train_labels)
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_labels: train_labels[batch:batch+taille_batch],
|
||||
ph_is_training: True
|
||||
})
|
||||
print(" entrainement OK")
|
||||
tab_accuracy_train=[]
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_labels: train_labels[batch:batch+taille_batch],
|
||||
ph_is_training: True
|
||||
})
|
||||
tab_accuracy_train.append(p)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(test_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: test_images[batch:batch+taille_batch],
|
||||
ph_labels: test_labels[batch:batch+taille_batch],
|
||||
ph_is_training: True
|
||||
})
|
||||
tab_accuracy_test.append(p)
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
fichier.write("{:d}:{:f}:{:f}\n".format(id_entrainement, np.mean(tab_accuracy_train), np.mean(tab_accuracy_test)))
|
||||
fichier.close()
|
||||
201
Tensorflow/tutoriel16/common.py
Normal file
@@ -0,0 +1,201 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
|
||||
def stl10(path):
|
||||
labels=['avion', 'oiseau', 'voiture', 'chat', 'cerf', 'chien', 'cheval', 'singe', 'bateau', 'camion']
|
||||
train_images=np.fromfile(path+"/train_X.bin", dtype=np.uint8).reshape(-1, 3, 96, 96).transpose(0, 2, 3, 1)
|
||||
train_labels=np.eye(10)[np.fromfile(path+"/train_y.bin", dtype=np.uint8)-1]
|
||||
train_images, train_labels=shuffle(train_images, train_labels)
|
||||
test_images=np.fromfile(path+"/test_X.bin", dtype=np.uint8).reshape(-1, 3, 96, 96).transpose(0, 2, 3, 1)
|
||||
test_labels=np.eye(10)[np.fromfile(path+"/test_y.bin", dtype=np.uint8)-1]
|
||||
return labels, train_images, train_labels, test_images, test_labels
|
||||
|
||||
def convolution(input, taille_noyau, nbr_noyau, stride, b_norm, f_activation, training):
|
||||
w_filtre=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(input.get_shape()[-1]), nbr_noyau)))
|
||||
b_filtre=np.zeros(nbr_noyau)
|
||||
result=tf.nn.conv2d(input, w_filtre, strides=[1, stride, stride, 1], padding='SAME')+b_filtre
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def fc(input, nbr_neurone, b_norm, f_activation, training):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(int(input.get_shape()[-1]), nbr_neurone), dtype=tf.float32))
|
||||
b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)
|
||||
result=tf.matmul(input, w)+b
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def b_resnet_1(input, kernel, nbr_cc, reduce, training, dropout=None):
|
||||
if reduce is True:
|
||||
stride=2
|
||||
result2=convolution(input, 1, nbr_cc[-1], stride, True, tf.nn.relu, training)
|
||||
else:
|
||||
stride=1
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(input, 1, nbr_cc[-1], stride, True, tf.nn.relu, training)
|
||||
else:
|
||||
result2=input
|
||||
result=input
|
||||
for shift in range(len(kernel)-1):
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], stride, True, tf.nn.relu, training)
|
||||
stride=1
|
||||
result=convolution(result, kernel[len(kernel)-1], nbr_cc[len(kernel)-1], stride, True, None, training)
|
||||
result=result+result2
|
||||
result=tf.nn.relu(result)
|
||||
if dropout is not None:
|
||||
result=tf.layers.dropout(result, dropout)
|
||||
return result
|
||||
|
||||
def b_resnet_1M(input, kernel, nbr_cc, reduce, training, dropout=None):
|
||||
if reduce is True:
|
||||
result=tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result2=tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(result2, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result=input
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(input, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result2=input
|
||||
for shift in range(len(kernel)-1):
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], 1, True, tf.nn.relu, training)
|
||||
result=convolution(result, kernel[len(kernel)-1], nbr_cc[len(kernel)-1], 1, True, None, training)
|
||||
result=result+result2
|
||||
result=tf.nn.relu(result)
|
||||
if dropout is not None:
|
||||
result=tf.layers.dropout(result, dropout)
|
||||
return result
|
||||
|
||||
def b_resnet_2(input, kernel, nbr_cc, reduce, training, dropout=None):
|
||||
if reduce is True:
|
||||
stride=2
|
||||
result2=convolution(input, 1, nbr_cc[-1], stride, True, tf.nn.relu, training)
|
||||
else:
|
||||
stride=1
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(input, 1, nbr_cc[-1], stride, True, tf.nn.relu, training)
|
||||
else:
|
||||
result2=input
|
||||
result=input
|
||||
for shift in range(len(kernel)-1):
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], stride, True, tf.nn.relu, training)
|
||||
stride=1
|
||||
result=convolution(result, kernel[len(kernel)-1], nbr_cc[len(kernel)-1], stride, False, None, training)
|
||||
result=result+result2
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
result=tf.nn.relu(result)
|
||||
if dropout is not None:
|
||||
result=tf.layers.dropout(result, dropout)
|
||||
return result
|
||||
|
||||
def b_resnet_2M(input, kernel, nbr_cc, reduce, training, dropout=None):
|
||||
if reduce is True:
|
||||
result=tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result2=tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(result2, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result=input
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(input, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result2=input
|
||||
for shift in range(len(kernel)-1):
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], 1, True, tf.nn.relu, training)
|
||||
result=convolution(result, kernel[len(kernel)-1], nbr_cc[len(kernel)-1], 1, False, None, training)
|
||||
result=result+result2
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
result=tf.nn.relu(result)
|
||||
if dropout is not None:
|
||||
result=tf.layers.dropout(result, dropout)
|
||||
return result
|
||||
|
||||
def b_resnet_3(input, kernel, nbr_cc, reduce, training, dropout=None):
|
||||
if reduce is True:
|
||||
stride=2
|
||||
result2=convolution(input, 1, nbr_cc[-1], stride, True, tf.nn.relu, training)
|
||||
else:
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(input, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result2=input
|
||||
stride=1
|
||||
result=input
|
||||
for shift in range(len(kernel)-1):
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], stride, True, tf.nn.relu, training)
|
||||
stride=1
|
||||
shift=len(kernel)-1
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], stride, True, None, training)
|
||||
result=result+result2
|
||||
if dropout is not None:
|
||||
result=tf.layers.dropout(result, dropout)
|
||||
return result
|
||||
|
||||
def b_resnet_3M(input, kernel, nbr_cc, reduce, training, dropout=None):
|
||||
if reduce is True:
|
||||
result =tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
result2=tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(result2, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result=convolution(input, kernel[0], nbr_cc[0], 1, True, tf.nn.relu, training)
|
||||
if nbr_cc[-1]!=int(input.get_shape()[-1]):
|
||||
result2=convolution(input, 1, nbr_cc[-1], 1, True, tf.nn.relu, training)
|
||||
else:
|
||||
result2=input
|
||||
for shift in range(1, len(kernel)):
|
||||
result=convolution(result, kernel[shift], nbr_cc[shift], 1, True, tf.nn.relu, training)
|
||||
result=result+result2
|
||||
if dropout is not None:
|
||||
result=tf.layers.dropout(result, dropout)
|
||||
return result
|
||||
|
||||
def resnet(nbr_classes, b_resnet, learning_rate):
|
||||
ph_images=tf.placeholder(shape=(None, 96, 96, 3), dtype=tf.float32)
|
||||
ph_labels=tf.placeholder(shape=(None, nbr_classes), dtype=tf.float32)
|
||||
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
|
||||
|
||||
#result=convolution(ph_images, 7, 64, 2, True, tf.nn.relu, ph_is_training)
|
||||
result=convolution(ph_images, 5, 64, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=b_resnet(result, [1, 3, 1], [64, 64, 256], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [64, 64, 256], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [64, 64, 256], False, ph_is_training, None)
|
||||
|
||||
result=b_resnet(result, [1, 3, 1], [128, 128, 512], True, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [128, 128, 512], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [128, 128, 512], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [128, 128, 512], False, ph_is_training, None)
|
||||
|
||||
result=b_resnet(result, [1, 3, 1], [256, 256, 1024], True, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [256, 256, 1024], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [256, 256, 1024], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [256, 256, 1024], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [256, 256, 1024], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [256, 256, 1024], False, ph_is_training, None)
|
||||
|
||||
result=b_resnet(result, [1, 3, 1], [512, 512, 2048], True, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [512, 512, 2048], False, ph_is_training, None)
|
||||
result=b_resnet(result, [1, 3, 1], [512, 512, 2048], False, ph_is_training, None)
|
||||
taille=result.get_shape()[1]
|
||||
result=tf.nn.avg_pool(result, ksize=[1, taille, taille, 1], strides=[1, 1, 1, 1], padding='SAME')
|
||||
|
||||
result=tf.contrib.layers.flatten(result)
|
||||
result=fc(result, nbr_classes, False, None, ph_is_training)
|
||||
socs=tf.nn.softmax(result)
|
||||
|
||||
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)
|
||||
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(extra_update_ops):
|
||||
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(socs, 1), tf.argmax(ph_labels, 1)), tf.float32))
|
||||
|
||||
return ph_images, ph_labels, ph_is_training, socs, train, accuracy, tf.train.Saver()
|
||||
6
Tensorflow/tutoriel19-1/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau Unet: segmentation d'image
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=PrZ3r33gewQ
|
||||
|
||||
|
||||
35
Tensorflow/tutoriel19-1/detection.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import tensorflow as tf
|
||||
import os
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
width=160
|
||||
height=120
|
||||
|
||||
dir='dataE/'
|
||||
|
||||
with tf.Session() as s:
|
||||
saver=tf.train.import_meta_graph('./mon_modele/modele.meta')
|
||||
saver.restore(s, tf.train.latest_checkpoint('./mon_modele/'))
|
||||
graph=tf.get_default_graph()
|
||||
images=graph.get_tensor_by_name("entree:0")
|
||||
sortie=graph.get_tensor_by_name("sortie:0")
|
||||
|
||||
for file in os.listdir(dir+'CameraRGB/'):
|
||||
img=cv2.resize(cv2.imread(dir+'CameraRGB/'+file), (width, height))/255
|
||||
cv2.imshow("image", img)
|
||||
m=cv2.resize(cv2.imread(dir+'CameraSeg/'+file)[:,:,2], (width, height))
|
||||
m[m==7]=255
|
||||
m[m!=255]=0
|
||||
cv2.imshow("mask 7", m)
|
||||
m=cv2.resize(cv2.imread(dir+'CameraSeg/'+file)[:,:,2], (width, height))
|
||||
m[m==9]=255
|
||||
m[m!=255]=0
|
||||
cv2.imshow("mask 9", m)
|
||||
prediction=s.run(sortie, feed_dict={images:[img]})
|
||||
cv2.imshow("mask prediction 7", prediction[0][:,:,0])
|
||||
cv2.imshow("mask prediction 9", prediction[0][:,:,1])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
|
||||
13
Tensorflow/tutoriel19-1/read_image.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import cv2
|
||||
import os
|
||||
|
||||
dir='dataA/'
|
||||
|
||||
for file in os.listdir(dir+'CameraRGB/'):
|
||||
img=cv2.imread(dir+'CameraRGB/'+file)
|
||||
cv2.imshow("image", img)
|
||||
mask=cv2.imread(dir+'CameraSeg/'+file)
|
||||
cv2.imshow("mask", mask*25)
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
147
Tensorflow/tutoriel19-1/train.py
Normal file
@@ -0,0 +1,147 @@
|
||||
import cv2
|
||||
import os
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
dir_img="CameraRGB/"
|
||||
dir_mask="CameraSeg/"
|
||||
|
||||
width=160
|
||||
height=120
|
||||
|
||||
taille_batch=50
|
||||
nbr_entrainement=100
|
||||
|
||||
def convolution(input, taille_noyau, nbr_cc, stride, b_norm=False, f_activation=None, training=False):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(input.get_shape()[-1]), nbr_cc)))
|
||||
b=np.zeros(nbr_cc)
|
||||
result=tf.nn.conv2d(input, w, strides=[1, stride, stride, 1], padding='SAME')
|
||||
result=tf.nn.bias_add(result, b)
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def deconvolution(input, taille_noyau, nbr_cc, stride, b_norm=False, f_activation=None, training=False):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, nbr_cc, int(input.get_shape()[-1]))))
|
||||
b=np.zeros(nbr_cc)
|
||||
out_h=int(input.get_shape()[1])*stride
|
||||
out_w=int(input.get_shape()[2])*stride
|
||||
b_size=tf.shape(input)[0]
|
||||
result=tf.nn.conv2d_transpose(input, w, output_shape=[b_size, out_h, out_w, nbr_cc], strides=[1, stride, stride, 1], padding='SAME')
|
||||
result=tf.nn.bias_add(result, b)
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def unet(nbr_mask, size, learning_rate=1E-3):
|
||||
ph_images=tf.placeholder(shape=(None, size[0], size[1], size[2]), dtype=tf.float32, name='entree')
|
||||
ph_masks=tf.placeholder(shape=(None, size[0], size[1], nbr_mask), dtype=tf.float32)
|
||||
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
|
||||
|
||||
result=convolution(ph_images, 3, 16, 1, True, tf.nn.relu, ph_is_training)
|
||||
c1=convolution(result, 3, 16, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.nn.max_pool(c1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training)
|
||||
c2=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.nn.max_pool(c2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training)
|
||||
c3=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.nn.max_pool(c3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training)
|
||||
|
||||
d3=deconvolution(result, 3, 128, 2, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.concat((d3, c3), axis=3)
|
||||
|
||||
result=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training)
|
||||
|
||||
d2=deconvolution(result, 3, 64, 2, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.concat((d2, c2), axis=3)
|
||||
|
||||
result=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training)
|
||||
|
||||
d1=deconvolution(result, 3, 32, 2, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.concat((d1, c1), axis=3)
|
||||
|
||||
result=convolution(result, 3, 16, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=convolution(result, 3, 16, 1, True, tf.nn.relu, ph_is_training)
|
||||
|
||||
result=convolution(result, 1, nbr_mask, 1, False, None, ph_is_training)
|
||||
|
||||
mask=tf.nn.sigmoid(result, name="sortie")
|
||||
loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=ph_masks, logits=result))
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.round(mask), ph_masks), tf.float32))
|
||||
|
||||
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(extra_update_ops):
|
||||
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
|
||||
|
||||
return ph_images, ph_masks, ph_is_training, mask, train, accuracy, tf.train.Saver()
|
||||
|
||||
ph_images, ph_masks, ph_is_training, mask, train, accuracy, saver=unet(2, (height, width, 3))
|
||||
|
||||
tab_img=[]
|
||||
tab_mask=[]
|
||||
#for dir in ['dataA/', 'dataB/', 'dataC/', 'dataD/', 'dataE/']:
|
||||
for dir in ['dataA/', 'dataB/', 'dataC/', 'dataD/']:
|
||||
for file in os.listdir(dir+dir_img):
|
||||
tab_img.append(cv2.resize(cv2.imread(dir+dir_img+file), (width, height))/255)
|
||||
img_mask=cv2.resize(cv2.imread(dir+dir_mask+file), (width, height))[:,:,2]
|
||||
img_mask_result=np.zeros(shape=(height, width, 2), dtype=np.float32)
|
||||
img_mask_result[:,:,0][img_mask==7]=1.
|
||||
img_mask_result[:,:,1][img_mask==9]=1.
|
||||
tab_mask.append(img_mask_result)
|
||||
if False:
|
||||
cv2.imshow("mask 7", img_mask_result[:,:,0]*255)
|
||||
cv2.imshow("mask 9", img_mask_result[:,:,1]*255)
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
|
||||
tab_img=np.array(tab_img)
|
||||
tab_mask=np.array(tab_mask)
|
||||
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_img, tab_mask, test_size=.05)
|
||||
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
print("> Entrainement", id_entrainement)
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_masks: train_labels[batch:batch+taille_batch],
|
||||
ph_is_training: True
|
||||
})
|
||||
print(" entrainement OK")
|
||||
tab_accuracy_train=[]
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_masks: train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_train.append(p)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(test_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: test_images[batch:batch+taille_batch],
|
||||
ph_masks: test_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_test.append(p)
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
saver.save(s, './mon_modele/modele')
|
||||
7
Tensorflow/tutoriel19-2/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau Unet: segmentation d'image partie 2
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=OiqiMn-s73U
|
||||
|
||||
|
||||
|
||||
40
Tensorflow/tutoriel19-2/detection.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import tensorflow as tf
|
||||
import os
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
width=200
|
||||
height=125
|
||||
|
||||
dir='dataC/'
|
||||
|
||||
tab_color=[(255, 0, 0), (255, 0, 255)]
|
||||
tab_label=['Voiture', 'Signalisation']
|
||||
tab_value=[0.2, 0.2]
|
||||
tab_surface=[500, 200]
|
||||
|
||||
with tf.Session() as s:
|
||||
saver=tf.train.import_meta_graph('./mon_modele/modele.meta')
|
||||
saver.restore(s, tf.train.latest_checkpoint('./mon_modele/'))
|
||||
graph=tf.get_default_graph()
|
||||
images=graph.get_tensor_by_name("entree:0")
|
||||
sortie=graph.get_tensor_by_name("sortie:0")
|
||||
l=os.listdir(dir+"CameraRGB/")
|
||||
l=sorted(l)
|
||||
for file in l:
|
||||
img=cv2.imread(dir+"CameraRGB/"+file)[0:500, :]
|
||||
prediction=s.run(sortie, feed_dict={images: [cv2.resize(img, (width, height))/255]})
|
||||
for m in range(prediction[0].shape[-1]):
|
||||
mask=np.zeros(shape=(prediction[0].shape[0], prediction[0].shape[1]))
|
||||
mask[prediction[0][:, :, m]>tab_value[m]]=1.
|
||||
mask=cv2.resize(mask, (4*width, 4*height))
|
||||
elements=cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
|
||||
for e in elements:
|
||||
if cv2.contourArea(e)>tab_surface[m]:
|
||||
x,y,w,h = cv2.boundingRect(e)
|
||||
cv2.putText(img, tab_label[m], (x, y-10), cv2.FONT_HERSHEY_DUPLEX, 0.7, tab_color[m], 2)
|
||||
cv2.rectangle(img, (x, y), (x+w, y+h), tab_color[m], 2)
|
||||
cv2.imshow("Resultat", img)
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
156
Tensorflow/tutoriel19-2/train.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import cv2
|
||||
import os
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
dir_img="CameraRGB/"
|
||||
dir_mask="CameraSeg/"
|
||||
|
||||
width=200
|
||||
height=125
|
||||
|
||||
taille_batch=100
|
||||
nbr_entrainement=100
|
||||
|
||||
def crop(tensor1, tensor2):
|
||||
offsets=(0, (int(tensor1.get_shape()[1])-int(tensor2.get_shape()[1]))//2, (int(tensor1.get_shape()[2])-int(tensor2.get_shape()[2]))//2, 0)
|
||||
size=(-1, int(tensor2.get_shape()[1]), int(tensor2.get_shape()[2]), -1)
|
||||
return tf.slice(tensor1, offsets, size)
|
||||
|
||||
def convolution(input, taille_noyau, nbr_cc, stride, b_norm=False, f_activation=None, training=False, padding='SAME'):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(input.get_shape()[-1]), nbr_cc)))
|
||||
b=np.zeros(nbr_cc)
|
||||
result=tf.nn.conv2d(input, w, strides=[1, stride, stride, 1], padding=padding)
|
||||
result=tf.nn.bias_add(result, b)
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def deconvolution(input, taille_noyau, nbr_cc, stride, b_norm=False, f_activation=None, training=False, padding='SAME'):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, nbr_cc, int(input.get_shape()[-1]))))
|
||||
b=np.zeros(nbr_cc)
|
||||
if padding == 'VALID':
|
||||
out_h=(int(input.get_shape()[1])-1)*stride+taille_noyau
|
||||
out_w=(int(input.get_shape()[2])-1)*stride+taille_noyau
|
||||
elif padding == 'SAME':
|
||||
out_h=(int(input.get_shape()[1])-1)*stride+1
|
||||
out_w=(int(input.get_shape()[2])-1)*stride+1
|
||||
else:
|
||||
quit("erreur padding")
|
||||
b_size=tf.shape(input)[0]
|
||||
result=tf.nn.conv2d_transpose(input, w, output_shape=[b_size, out_h, out_w, nbr_cc], strides=[1, stride, stride, 1], padding=padding)
|
||||
result=tf.nn.bias_add(result, b)
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def unet(nbr_mask, size, padding='SAME', learning_rate=1E-3):
|
||||
ph_images=tf.placeholder(shape=(None, size[0], size[1], size[2]), dtype=tf.float32, name='entree')
|
||||
ph_masks=tf.placeholder(shape=(None, size[0], size[1], nbr_mask), dtype=tf.float32)
|
||||
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
|
||||
|
||||
result=convolution(ph_images, 3, 32, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
c1=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=tf.nn.max_pool(c1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
c2=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=tf.nn.max_pool(c2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
c3=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=tf.nn.max_pool(c3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 3, 256, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=convolution(result, 3, 256, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
|
||||
d3=deconvolution(result, 3, 256, 2, True, tf.nn.relu, ph_is_training, padding)
|
||||
c3=crop(c3, d3)
|
||||
result=tf.concat((d3, c3), axis=3)
|
||||
|
||||
result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
|
||||
d2=deconvolution(result, 3, 128, 2, True, tf.nn.relu, ph_is_training, padding)
|
||||
c2=crop(c2, d2)
|
||||
result=tf.concat((d2, c2), axis=3)
|
||||
|
||||
result=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=convolution(result, 3, 64, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
|
||||
d1=deconvolution(result, 3, 64, 2, True, tf.nn.relu, ph_is_training, padding)
|
||||
c1=crop(c1, d1)
|
||||
result=tf.concat((d1, c1), axis=3)
|
||||
|
||||
result=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
result=convolution(result, 3, 32, 1, True, tf.nn.relu, ph_is_training, padding)
|
||||
|
||||
result=convolution(result, 1, nbr_mask, 1, False, None, ph_is_training, padding)
|
||||
result=tf.image.resize_images(result, (ph_masks.get_shape()[1], ph_masks.get_shape()[2]))
|
||||
mask=tf.nn.sigmoid(result, name="sortie")
|
||||
|
||||
loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=ph_masks, logits=result))
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.round(mask), ph_masks), tf.float32))
|
||||
|
||||
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(extra_update_ops):
|
||||
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
|
||||
|
||||
return ph_images, ph_masks, ph_is_training, mask, train, accuracy, tf.train.Saver()
|
||||
|
||||
ph_images, ph_masks, ph_is_training, mask, train, accuracy, saver=unet(2, (height, width, 3), 'VALID')
|
||||
|
||||
tab_img=[]
|
||||
tab_mask=[]
|
||||
#for dir in ['../tutoriel19-1/dataA/', '../tutoriel19-1/dataB/', '../tutoriel19-1/dataC/', '../tutoriel19-1/dataD/', '../tutoriel19-1/dataE/']:
|
||||
for dir in ['../tutoriel19-1/dataA/', '../tutoriel19-1/dataB/', '../tutoriel19-1/dataC/', '../tutoriel19-1/dataD/']:
|
||||
for file in os.listdir(dir+dir_img):
|
||||
tab_img.append(cv2.resize(cv2.imread(dir+dir_img+file)[0:500, 0:800], (width, height))/255)
|
||||
img_mask=cv2.resize(cv2.imread(dir+dir_mask+file)[0:500, 0:800], (width, height))[:,:,2]
|
||||
img_mask_result=np.zeros(shape=(height, width, 2), dtype=np.float32)
|
||||
img_mask_result[:,:,0][img_mask==10]=1.
|
||||
img_mask_result[:,:,1][img_mask==12]=1.
|
||||
tab_mask.append(img_mask_result)
|
||||
|
||||
tab_img=np.array(tab_img)
|
||||
tab_mask=np.array(tab_mask)
|
||||
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_img, tab_mask, test_size=.05)
|
||||
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
print("> Entrainement", id_entrainement)
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_masks: train_labels[batch:batch+taille_batch],
|
||||
ph_is_training: True
|
||||
})
|
||||
print(" entrainement OK")
|
||||
tab_accuracy_train=[]
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: train_images[batch:batch+taille_batch],
|
||||
ph_masks: train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_train.append(p)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(test_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
ph_images: test_images[batch:batch+taille_batch],
|
||||
ph_masks: test_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_test.append(p)
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
saver.save(s, './mon_modele/modele')
|
||||
94
Tensorflow/tutoriel2/MNIST_convolution.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plot
|
||||
import cv2
|
||||
|
||||
def convolution(couche_prec, taille_noyau, nbr_noyau):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(couche_prec.get_shape()[-1]), nbr_noyau)))
|
||||
b=np.zeros(nbr_noyau)
|
||||
result=tf.nn.conv2d(couche_prec, w, strides=[1, 1, 1, 1], padding='SAME')+b
|
||||
return result
|
||||
|
||||
def fc(couche_prec, nbr_neurone):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(int(couche_prec.get_shape()[-1]), nbr_neurone), dtype=tf.float32))
|
||||
b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)
|
||||
result=tf.matmul(couche_prec, w)+b
|
||||
return result
|
||||
|
||||
taille_batch=100
|
||||
nbr_entrainement=3
|
||||
learning_rate=0.001
|
||||
|
||||
mnist_train_images=np.fromfile("mnist/train-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 28, 28, 1)/255
|
||||
mnist_train_labels=np.eye(10)[np.fromfile("mnist/train-labels-idx1-ubyte", dtype=np.uint8)[8:]]
|
||||
mnist_test_images=np.fromfile("mnist/t10k-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 28, 28, 1)/255
|
||||
mnist_test_labels=np.eye(10)[np.fromfile("mnist/t10k-labels-idx1-ubyte", dtype=np.uint8)[8:]]
|
||||
|
||||
ph_images=tf.placeholder(shape=(None, 28, 28, 1), dtype=tf.float32)
|
||||
ph_labels=tf.placeholder(shape=(None, 10), dtype=tf.float32)
|
||||
|
||||
result=convolution(ph_images, 5, 32)
|
||||
result=convolution(result, 5, 32)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=convolution(result, 5, 128)
|
||||
result=convolution(result, 5, 128)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=tf.contrib.layers.flatten(result)
|
||||
|
||||
result=fc(result, 512)
|
||||
result=tf.nn.sigmoid(result)
|
||||
result=fc(result, 10)
|
||||
scso=tf.nn.softmax(result)
|
||||
|
||||
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)
|
||||
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(scso, 1), tf.argmax(ph_labels, 1)), tf.float32))
|
||||
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
tab_accuracy_train=[]
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(mnist_train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
ph_images: mnist_train_images[batch:batch+taille_batch],
|
||||
ph_labels: mnist_train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
for batch in np.arange(0, len(mnist_train_images), taille_batch):
|
||||
precision=s.run(accuracy, feed_dict={
|
||||
ph_images: mnist_train_images[batch:batch+taille_batch],
|
||||
ph_labels: mnist_train_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_train.append(precision)
|
||||
for batch in np.arange(0, len(mnist_test_images), taille_batch):
|
||||
precision=s.run(accuracy, feed_dict={
|
||||
ph_images: mnist_test_images[batch:batch+taille_batch],
|
||||
ph_labels: mnist_test_labels[batch:batch+taille_batch]
|
||||
})
|
||||
tab_accuracy_test.append(precision)
|
||||
print("> Entrainement", id_entrainement)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
|
||||
plot.ylim(0, 1)
|
||||
plot.grid()
|
||||
plot.plot(tab_train, label="Train error")
|
||||
plot.plot(tab_test, label="Test error")
|
||||
plot.legend(loc="upper right")
|
||||
plot.show()
|
||||
|
||||
resulat=s.run(scso, feed_dict={ph_images: mnist_test_images[0:taille_batch]})
|
||||
np.set_printoptions(formatter={'float': '{:0.3f}'.format})
|
||||
for image in range(taille_batch):
|
||||
print("image", image)
|
||||
print("sortie du réseau:", resulat[image], np.argmax(resulat[image]))
|
||||
print("sortie attendue :", mnist_test_labels[image], np.argmax(mnist_test_labels[image]))
|
||||
cv2.imshow('image', mnist_test_images[image])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
25
Tensorflow/tutoriel2/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réalisation d'un réseau convolutif et utilisation sur la base MNIST
|
||||
|
||||
La vidéo du tutoriel se trouve à l'adresse suivante:
|
||||
https://www.youtube.com/watch?v=mUyRdiQRJBI
|
||||
|
||||
Si vous souhaitez me soutenir: <https://fr.tipeee.com/l42-project>
|
||||
|
||||
Le code de cette vidéo est écrit pour la version 1.X de tensorflow (je recommande la version 1.13.1), pour l'installer, il suffit de taper la commande suivante :
|
||||
|
||||
`# pip install tensorflow==1.13.1`
|
||||
|
||||
ou la version GPU:
|
||||
|
||||
`# pip install tensorflow-gpu==1.13.1`
|
||||
|
||||
Pour utiliser ce programme, vous devez récuperer les fichiers MNIST sur le site suivant:
|
||||
http://yann.lecun.com/exdb/mnist/
|
||||
et les placer dans le repertoire ./mnist
|
||||
|
||||
La courbe d'erreur après 200 cycles d'apprentissage est la suivante :
|
||||
|
||||

|
||||
|
||||
L'apprentissage prend environ 35 minutes sur une GeForce 1080
|
||||
BIN
Tensorflow/tutoriel2/graph_error.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
200
Tensorflow/tutoriel2/log_error
Normal file
@@ -0,0 +1,200 @@
|
||||
0:0.394567:0.402900
|
||||
1:0.487783:0.498400
|
||||
2:0.556483:0.562900
|
||||
3:0.666317:0.667800
|
||||
4:0.719817:0.723000
|
||||
5:0.665250:0.665400
|
||||
6:0.632883:0.637300
|
||||
7:0.645533:0.653300
|
||||
8:0.582667:0.595900
|
||||
9:0.682800:0.680900
|
||||
10:0.679067:0.680400
|
||||
11:0.689933:0.694900
|
||||
12:0.608850:0.616400
|
||||
13:0.646267:0.648700
|
||||
14:0.700233:0.705700
|
||||
15:0.604517:0.604800
|
||||
16:0.703417:0.706600
|
||||
17:0.744233:0.751700
|
||||
18:0.767233:0.770000
|
||||
19:0.664367:0.665900
|
||||
20:0.703983:0.703200
|
||||
21:0.711650:0.715200
|
||||
22:0.681983:0.687100
|
||||
23:0.729483:0.737800
|
||||
24:0.694517:0.694500
|
||||
25:0.729733:0.735300
|
||||
26:0.727667:0.736100
|
||||
27:0.727283:0.726800
|
||||
28:0.728900:0.734200
|
||||
29:0.673317:0.678600
|
||||
30:0.631750:0.637900
|
||||
31:0.695350:0.702800
|
||||
32:0.697967:0.706800
|
||||
33:0.688217:0.698100
|
||||
34:0.668683:0.669900
|
||||
35:0.712167:0.717200
|
||||
36:0.633217:0.635100
|
||||
37:0.729250:0.731600
|
||||
38:0.769667:0.775600
|
||||
39:0.787717:0.795900
|
||||
40:0.710100:0.717700
|
||||
41:0.752083:0.759900
|
||||
42:0.747650:0.753200
|
||||
43:0.770250:0.775600
|
||||
44:0.776183:0.783900
|
||||
45:0.776567:0.783000
|
||||
46:0.779767:0.786700
|
||||
47:0.777167:0.782000
|
||||
48:0.730217:0.735000
|
||||
49:0.757633:0.759300
|
||||
50:0.732167:0.741700
|
||||
51:0.744833:0.751500
|
||||
52:0.764850:0.766600
|
||||
53:0.767033:0.773100
|
||||
54:0.776567:0.783200
|
||||
55:0.788350:0.794400
|
||||
56:0.736117:0.738100
|
||||
57:0.767867:0.771600
|
||||
58:0.751817:0.759400
|
||||
59:0.750967:0.761000
|
||||
60:0.727200:0.727100
|
||||
61:0.746750:0.743000
|
||||
62:0.760017:0.756500
|
||||
63:0.768400:0.764600
|
||||
64:0.774700:0.773100
|
||||
65:0.779467:0.775300
|
||||
66:0.782733:0.779900
|
||||
67:0.785433:0.782000
|
||||
68:0.787467:0.784300
|
||||
69:0.789333:0.786200
|
||||
70:0.790650:0.787000
|
||||
71:0.791417:0.788500
|
||||
72:0.792683:0.789700
|
||||
73:0.793350:0.789700
|
||||
74:0.794217:0.790800
|
||||
75:0.794433:0.791200
|
||||
76:0.795067:0.791600
|
||||
77:0.795550:0.792600
|
||||
78:0.795933:0.792400
|
||||
79:0.796700:0.792300
|
||||
80:0.797150:0.792600
|
||||
81:0.797333:0.792700
|
||||
82:0.797817:0.793100
|
||||
83:0.798417:0.793000
|
||||
84:0.798617:0.793400
|
||||
85:0.798833:0.793800
|
||||
86:0.799083:0.794000
|
||||
87:0.799467:0.794100
|
||||
88:0.799567:0.794500
|
||||
89:0.799817:0.795200
|
||||
90:0.799850:0.795400
|
||||
91:0.800100:0.795300
|
||||
92:0.800283:0.795200
|
||||
93:0.800467:0.794800
|
||||
94:0.800600:0.795000
|
||||
95:0.800750:0.795300
|
||||
96:0.800850:0.795400
|
||||
97:0.801067:0.795900
|
||||
98:0.801067:0.795600
|
||||
99:0.801317:0.795500
|
||||
100:0.801300:0.795600
|
||||
101:0.801617:0.795700
|
||||
102:0.801717:0.796000
|
||||
103:0.801783:0.796200
|
||||
104:0.801867:0.796100
|
||||
105:0.802117:0.796300
|
||||
106:0.802133:0.796600
|
||||
107:0.802167:0.796500
|
||||
108:0.802300:0.796500
|
||||
109:0.802383:0.796300
|
||||
110:0.802400:0.796300
|
||||
111:0.802450:0.796200
|
||||
112:0.802517:0.796300
|
||||
113:0.802483:0.796300
|
||||
114:0.802533:0.796500
|
||||
115:0.802700:0.796400
|
||||
116:0.802583:0.796600
|
||||
117:0.802533:0.796500
|
||||
118:0.802633:0.796500
|
||||
119:0.802650:0.796600
|
||||
120:0.802483:0.796600
|
||||
121:0.802517:0.796600
|
||||
122:0.802567:0.796600
|
||||
123:0.802667:0.796600
|
||||
124:0.802700:0.796600
|
||||
125:0.802850:0.796800
|
||||
126:0.802767:0.796800
|
||||
127:0.802750:0.796700
|
||||
128:0.802833:0.796700
|
||||
129:0.802883:0.796600
|
||||
130:0.802917:0.796600
|
||||
131:0.802883:0.796700
|
||||
132:0.802967:0.797000
|
||||
133:0.802967:0.797100
|
||||
134:0.803083:0.797000
|
||||
135:0.803083:0.797000
|
||||
136:0.803050:0.797200
|
||||
137:0.803150:0.797300
|
||||
138:0.803200:0.797300
|
||||
139:0.803300:0.797400
|
||||
140:0.803283:0.797400
|
||||
141:0.803350:0.797300
|
||||
142:0.803400:0.797300
|
||||
143:0.803417:0.797100
|
||||
144:0.803417:0.797200
|
||||
145:0.803400:0.797200
|
||||
146:0.803433:0.797100
|
||||
147:0.803600:0.797100
|
||||
148:0.803650:0.797200
|
||||
149:0.803667:0.797200
|
||||
150:0.803717:0.797200
|
||||
151:0.803750:0.797200
|
||||
152:0.803750:0.797100
|
||||
153:0.803783:0.797100
|
||||
154:0.803717:0.797100
|
||||
155:0.803750:0.797000
|
||||
156:0.803733:0.797000
|
||||
157:0.803783:0.797000
|
||||
158:0.803817:0.797100
|
||||
159:0.803950:0.797200
|
||||
160:0.804083:0.797000
|
||||
161:0.804183:0.797000
|
||||
162:0.804183:0.797200
|
||||
163:0.804350:0.797300
|
||||
164:0.804400:0.797300
|
||||
165:0.804400:0.797200
|
||||
166:0.804400:0.797100
|
||||
167:0.804467:0.797100
|
||||
168:0.804417:0.797000
|
||||
169:0.804383:0.797000
|
||||
170:0.804400:0.797000
|
||||
171:0.804433:0.796900
|
||||
172:0.804433:0.796800
|
||||
173:0.804483:0.796800
|
||||
174:0.804467:0.796800
|
||||
175:0.804450:0.796900
|
||||
176:0.804483:0.796800
|
||||
177:0.804467:0.796700
|
||||
178:0.804517:0.796400
|
||||
179:0.804533:0.796500
|
||||
180:0.804600:0.796500
|
||||
181:0.804550:0.796400
|
||||
182:0.804517:0.796300
|
||||
183:0.804583:0.796300
|
||||
184:0.804617:0.796400
|
||||
185:0.804633:0.796400
|
||||
186:0.804633:0.796500
|
||||
187:0.804633:0.796600
|
||||
188:0.804717:0.796800
|
||||
189:0.804683:0.796900
|
||||
190:0.804783:0.796900
|
||||
191:0.804850:0.796900
|
||||
192:0.804950:0.796900
|
||||
193:0.804983:0.796800
|
||||
194:0.804967:0.796800
|
||||
195:0.804967:0.796900
|
||||
196:0.804967:0.797000
|
||||
197:0.804967:0.797000
|
||||
198:0.805000:0.797000
|
||||
199:0.805000:0.797200
|
||||
5
Tensorflow/tutoriel23/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel tensorflow
|
||||
## Tensorflow 2.0
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=-lz3zHRTfow
|
||||
|
||||
74
Tensorflow/tutoriel23/mnist.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
batch_size=64
|
||||
nbr_entrainement=5
|
||||
|
||||
(x_train, y_train), (x_test, y_test)=tf.keras.datasets.mnist.load_data()
|
||||
x_train=(x_train.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
x_test=(x_test.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
|
||||
|
||||
model = models.Sequential([
|
||||
layers.Conv2D(64, 3, strides=2, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Conv2D(128, 3, strides=2, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Flatten(),
|
||||
layers.Dense(512, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Dense(10, activation='softmax')
|
||||
])
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam()
|
||||
loss_object=tf.keras.losses.SparseCategoricalCrossentropy()
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
train_accuracy=tf.keras.metrics.SparseCategoricalAccuracy()
|
||||
test_loss=tf.keras.metrics.Mean()
|
||||
test_accuracy=tf.keras.metrics.SparseCategoricalAccuracy()
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model(images)
|
||||
loss=loss_object(labels, predictions)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
train_accuracy(labels, predictions)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}, loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
train_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
train_loss.reset_states()
|
||||
train_accuracy.reset_states()
|
||||
|
||||
def test(test_ds):
|
||||
start=time.time()
|
||||
for test_images, test_labels in test_ds:
|
||||
predictions=model(test_images)
|
||||
t_loss=loss_object(test_labels, predictions)
|
||||
test_loss(t_loss)
|
||||
test_accuracy(test_labels, predictions)
|
||||
message='Loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(test_loss.result(),
|
||||
test_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
|
||||
print("Entrainement")
|
||||
train(train_ds, nbr_entrainement)
|
||||
|
||||
print("Jeu de test")
|
||||
test(test_ds)
|
||||
|
||||
34
Tensorflow/tutoriel23/test_vitesse_1.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
from tensorflow.keras import layers
|
||||
|
||||
mnist = tf.keras.datasets.mnist
|
||||
|
||||
(x_train, y_train),(x_test, y_test)=mnist.load_data()
|
||||
batch_size=64
|
||||
epochs=5
|
||||
|
||||
(x_train, y_train), (x_test, y_test)=mnist.load_data()
|
||||
x_train=(x_train.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
x_test=(x_test.reshape(-1, 28, 28, 1)/255).astype(np.float32)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
|
||||
|
||||
model = tf.keras.models.Sequential([
|
||||
layers.Conv2D(64, 3, strides=2, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Conv2D(128, 3, strides=2, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Flatten(),
|
||||
layers.Dense(512, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Dense(10, activation='softmax')
|
||||
])
|
||||
|
||||
model.compile(optimizer='adam',
|
||||
loss='sparse_categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(x_train, y_train, epochs=epochs)
|
||||
#model.evaluate(x_test, y_test)
|
||||
58
Tensorflow/tutoriel23/test_vitesse_2.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
|
||||
batch_size=16
|
||||
epochs=5
|
||||
|
||||
def stl10(path):
|
||||
labels=['avion', 'oiseau', 'voiture', 'chat', 'cerf', 'chien', 'cheval', 'singe', 'bateau', 'camion']
|
||||
train_images=np.fromfile(path+"/train_X.bin", dtype=np.uint8).reshape(-1, 3, 96, 96).transpose(0, 2, 3, 1)
|
||||
train_labels=np.fromfile(path+"/train_y.bin", dtype=np.uint8)-1
|
||||
train_images, train_labels=shuffle(train_images, train_labels)
|
||||
test_images=np.fromfile(path+"/test_X.bin", dtype=np.uint8).reshape(-1, 3, 96, 96).transpose(0, 2, 3, 1)
|
||||
test_labels=np.fromfile(path+"/test_y.bin", dtype=np.uint8)-1
|
||||
return labels, train_images, train_labels, test_images, test_labels
|
||||
|
||||
labels, x_train, y_train, x_test, y_test=stl10("stl10_binary")
|
||||
x_train=(x_train/255).astype(np.float32)
|
||||
x_test=(x_test/255).astype(np.float32)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
|
||||
|
||||
model=models.Sequential([
|
||||
layers.Conv2D(256, 5, strides=1),
|
||||
layers.BatchNormalization(),
|
||||
layers.Activation('relu'),
|
||||
layers.MaxPool2D(pool_size=2, strides=2),
|
||||
|
||||
layers.Conv2D(512, 5, strides=1),
|
||||
layers.BatchNormalization(),
|
||||
layers.Activation('relu'),
|
||||
layers.MaxPool2D(pool_size=2, strides=2),
|
||||
|
||||
layers.Conv2D(1024, 5, strides=1),
|
||||
layers.BatchNormalization(),
|
||||
layers.Activation('relu'),
|
||||
layers.MaxPool2D(pool_size=2, strides=2),
|
||||
|
||||
layers.Conv2D(2048, 5, strides=1),
|
||||
layers.BatchNormalization(),
|
||||
layers.Activation('relu'),
|
||||
layers.MaxPool2D(pool_size=2, strides=2),
|
||||
|
||||
layers.Flatten(),
|
||||
layers.Dense(1024, activation='relu'),
|
||||
layers.BatchNormalization(),
|
||||
layers.Dense(10, activation='softmax')
|
||||
])
|
||||
|
||||
model.compile(optimizer='adam',
|
||||
loss='sparse_categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(x_train, y_train, epochs=epochs)
|
||||
#model.evaluate(x_test, y_test)
|
||||
#model.summary()
|
||||
139
Tensorflow/tutoriel24/GAN_entrainement.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers
|
||||
import numpy as np
|
||||
import os
|
||||
import cv2
|
||||
import time
|
||||
import math
|
||||
import re
|
||||
|
||||
taille_batch=128
|
||||
nbr_entrainement=10000
|
||||
bruit_dim=100
|
||||
nbr_exemples=36
|
||||
dir_faces='faces/'
|
||||
|
||||
def generateur_model():
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Dense(4*4*1024, use_bias=False, input_shape=(bruit_dim,)))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.LeakyReLU())
|
||||
model.add(layers.Reshape((4, 4, 1024)))
|
||||
|
||||
model.add(layers.Conv2DTranspose(512, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.LeakyReLU())
|
||||
|
||||
model.add(layers.Conv2DTranspose(256, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.LeakyReLU())
|
||||
|
||||
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
|
||||
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
|
||||
model.add(layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
def discriminateur_model():
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[128, 128, 3]))
|
||||
model.add(layers.LeakyReLU())
|
||||
model.add(layers.BatchNormalization())
|
||||
|
||||
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
|
||||
model.add(layers.LeakyReLU())
|
||||
model.add(layers.BatchNormalization())
|
||||
|
||||
model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'))
|
||||
model.add(layers.LeakyReLU())
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dropout(0.2))
|
||||
|
||||
model.add(layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same'))
|
||||
model.add(layers.LeakyReLU())
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dropout(0.3))
|
||||
|
||||
model.add(layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same'))
|
||||
model.add(layers.LeakyReLU())
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dropout(0.3))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(1024, activation=tf.nn.relu))
|
||||
model.add(layers.Dense(1))
|
||||
|
||||
return model
|
||||
|
||||
@tf.function
|
||||
def train_step(vrais_visages):
|
||||
bruit=tf.random.normal([taille_batch, bruit_dim])
|
||||
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
||||
faux_visages=generateur(bruit, training=True)
|
||||
|
||||
prediction_vrais_visages=discriminateur(vrais_visages, training=True)
|
||||
prediction_faux_visages =discriminateur(faux_visages , training=True)
|
||||
|
||||
generateur_loss =cross_entropy(tf.ones_like (prediction_faux_visages ), prediction_faux_visages)
|
||||
discriminateur_loss=cross_entropy(tf.ones_like (prediction_vrais_visages), prediction_vrais_visages)+\
|
||||
cross_entropy(tf.zeros_like(prediction_faux_visages ), prediction_faux_visages )
|
||||
|
||||
gradients_generateur =gen_tape.gradient(generateur_loss , generateur.trainable_variables)
|
||||
gradients_discriminateur=disc_tape.gradient(discriminateur_loss, discriminateur.trainable_variables)
|
||||
|
||||
generateur_optimizer.apply_gradients (zip(gradients_generateur , generateur.trainable_variables))
|
||||
discriminateur_optimizer.apply_gradients(zip(gradients_discriminateur, discriminateur.trainable_variables))
|
||||
|
||||
def train(dataset, nbr_entrainement, bruit_pour_exemple=None):
|
||||
m=0
|
||||
for file in sorted(os.listdir('.')):
|
||||
f=re.search('img_(.+?).png', file)
|
||||
if f:
|
||||
m=int(f.group(1))
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training_checkpoints/"))
|
||||
for entrainement in range(m, nbr_entrainement):
|
||||
start=time.time()
|
||||
for image_batch in dataset:
|
||||
train_step(image_batch)
|
||||
if bruit_pour_exemple is not None:
|
||||
generatation_exemples(generateur, entrainement+1, bruit_pour_exemple)
|
||||
if (entrainement+1)%100==0:
|
||||
checkpoint.save(file_prefix="./training_checkpoints/ckpt")
|
||||
print ('Entrainement {}: temps {} secondes'.format(entrainement+1, time.time()-start))
|
||||
|
||||
def generatation_exemples(model, entrainement, bruit_pour_exemple):
|
||||
test_images=model(bruit_pour_exemple, training=False)
|
||||
n=int(math.sqrt(len(bruit_pour_exemple)))
|
||||
tab_img_test=np.zeros(shape=(128*n, 128*n, 3), dtype=np.float32)
|
||||
for i in range(n):
|
||||
for j in range(n):
|
||||
tab_img_test[i*128:(i+1)*128, j*128:(j+1)*128, :]=test_images[i*n+j]
|
||||
cv2.imwrite('img_{:05d}.png'.format(entrainement), tab_img_test*255)
|
||||
|
||||
train_images=[]
|
||||
for file in os.listdir(dir_faces):
|
||||
if file.endswith("jpg"):
|
||||
img=cv2.imread(dir_faces+file, cv2.IMREAD_COLOR)
|
||||
if img is not None:
|
||||
train_images.append(cv2.resize(img, (128, 128)))
|
||||
train_images=np.array(train_images, dtype=np.float32)/255
|
||||
|
||||
generateur=generateur_model()
|
||||
discriminateur=discriminateur_model()
|
||||
cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
||||
generateur_optimizer=tf.keras.optimizers.Adam(1e-4)
|
||||
discriminateur_optimizer=tf.keras.optimizers.Adam(1e-4)
|
||||
checkpoint=tf.train.Checkpoint(generateur_optimizer=generateur_optimizer,
|
||||
discriminateur_optimizer=discriminateur_optimizer,
|
||||
generateur=generateur,
|
||||
discriminateur=discriminateur)
|
||||
bruit_pour_exemple=tf.random.normal([nbr_exemples, bruit_dim])
|
||||
train_dataset=tf.data.Dataset.from_tensor_slices(train_images).batch(taille_batch)
|
||||
train(train_dataset ,nbr_entrainement, bruit_pour_exemple)
|
||||
80
Tensorflow/tutoriel24/GAN_gen.py
Normal file
@@ -0,0 +1,80 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers
|
||||
import numpy as np
|
||||
import os
|
||||
import cv2
|
||||
import time
|
||||
import math
|
||||
import re
|
||||
|
||||
batch_size=128
|
||||
epochs=10000
|
||||
noise_dim=100
|
||||
num_examples_to_generate=36
|
||||
|
||||
def make_generator_model():
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Dense(4*4*1024, use_bias=False, input_shape=(noise_dim,)))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.LeakyReLU())
|
||||
|
||||
model.add(layers.Reshape((4, 4, 1024)))
|
||||
|
||||
model.add(layers.Conv2DTranspose(512, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.LeakyReLU())
|
||||
|
||||
model.add(layers.Conv2DTranspose(256, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.LeakyReLU())
|
||||
|
||||
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
|
||||
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
||||
model.add(layers.BatchNormalization())
|
||||
|
||||
model.add(layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
generator=make_generator_model()
|
||||
checkpoint=tf.train.Checkpoint(generator=generator)
|
||||
|
||||
checkpoint.restore("./training_checkpoints/ckpt-90")
|
||||
|
||||
d=50
|
||||
flag=1
|
||||
while True:
|
||||
if flag:
|
||||
bruit=tf.random.normal([1, noise_dim])
|
||||
test_images=generator(bruit, training=False)
|
||||
v=bruit[0][d]
|
||||
img=np.float32(test_images[0])
|
||||
print(">>> {}:{:6.8f}".format(d, v))
|
||||
cv2.imshow("image", img)
|
||||
flag=1
|
||||
key=cv2.waitKey()
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('o'):
|
||||
d=min(100, d+1)
|
||||
flag=0
|
||||
if key==ord('l'):
|
||||
d=max(1, d-1)
|
||||
flag=0
|
||||
if key==ord('p'):
|
||||
u=np.zeros([1, noise_dim], dtype=np.float32)
|
||||
u[0][d]=0.5
|
||||
tu=tf.convert_to_tensor(u, dtype=tf.float32)
|
||||
bruit=tf.add(bruit, tu)
|
||||
flag=0
|
||||
if key==ord('m'):
|
||||
u=np.zeros([1, noise_dim], dtype=np.float32)
|
||||
u[0][d]=-0.5
|
||||
tu=tf.convert_to_tensor(u, dtype=tf.float32)
|
||||
bruit=tf.add(bruit, tu)
|
||||
flag=0
|
||||
|
||||
4
Tensorflow/tutoriel24/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau GAN: générons des visages !
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=_5OCETzAVDs
|
||||
5
Tensorflow/tutoriel27-2/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau Yolo: non maximum suppression
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=nu7stszOKJA
|
||||
|
||||
201
Tensorflow/tutoriel27-2/common.py
Normal file
@@ -0,0 +1,201 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import json
|
||||
import random
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import config
|
||||
|
||||
def sigmoid(x):
|
||||
x=np.clip(x, -50, 50)
|
||||
return 1/(1+np.exp(-x))
|
||||
|
||||
def softmax(x):
|
||||
e=np.exp(x)
|
||||
e_sum=np.sum(e)
|
||||
return e/e_sum
|
||||
|
||||
def prepare_image(image, labels, grille=True):
|
||||
img=image.copy()
|
||||
|
||||
if grille is True:
|
||||
for x in range(config.r_x, config.largeur+config.r_x, config.r_x):
|
||||
for y in range(config.r_y, config.hauteur+config.r_y, config.r_y):
|
||||
cv2.line(img, (0, y), (x, y), (0, 0, 0), 1)
|
||||
cv2.line(img, (x, 0), (x, y), (0, 0, 0), 1)
|
||||
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for box in range(config.nbr_boxes):
|
||||
if labels[y, x, box, 4]:
|
||||
ids=np.argmax(labels[y, x, box, 5:])
|
||||
x_center=int(labels[y, x, box, 0]*config.r_x)
|
||||
y_center=int(labels[y, x, box, 1]*config.r_y)
|
||||
w_2=int(labels[y, x, box, 2]*config.r_x/2)
|
||||
h_2=int(labels[y, x, box, 3]*config.r_y/2)
|
||||
x_min=x_center-w_2
|
||||
y_min=y_center-h_2
|
||||
x_max=x_center+w_2
|
||||
y_max=y_center+h_2
|
||||
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), list(config.dict.values())[ids], 1)
|
||||
cv2.circle(img, (x_center, y_center), 1, list(config.dict.values())[ids], 2)
|
||||
|
||||
return img
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def intersection_over_union(boxA, boxB):
|
||||
xA=np.maximum(boxA[0], boxB[0])
|
||||
yA=np.maximum(boxA[1], boxB[1])
|
||||
xB=np.minimum(boxA[2], boxB[2])
|
||||
yB=np.minimum(boxA[3], boxB[3])
|
||||
interArea=np.maximum(0, xB-xA)*np.maximum(0, yB-yA)
|
||||
boxAArea=(boxA[2]-boxA[0])*(boxA[3]-boxA[1])
|
||||
boxBArea=(boxB[2]-boxB[0])*(boxB[3]-boxB[1])
|
||||
return interArea/(boxAArea+boxBArea-interArea)
|
||||
|
||||
def prepare_labels(fichier_image, objects, coeff=None):
|
||||
image=cv2.imread(fichier_image)
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for o in objects:
|
||||
if config.dict2.index(o['category'])==4:
|
||||
trophozoite=1
|
||||
break
|
||||
if trophozoite==0:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
if coeff is None:
|
||||
coeff=random.uniform(1.1, 2.5)
|
||||
image_r=cv2.resize(image, (int(coeff*config.largeur), int(coeff*config.hauteur)))
|
||||
image_r=gamma(image_r, random.uniform(0.7, 1.3), np.random.randint(60)-30)
|
||||
image_r=bruit(image_r)
|
||||
|
||||
if coeff==1:
|
||||
shift_x=0
|
||||
shift_y=0
|
||||
else:
|
||||
shift_x=np.random.randint(image_r.shape[1]-config.largeur)
|
||||
shift_y=np.random.randint(image_r.shape[0]-config.hauteur)
|
||||
|
||||
ratio_x=coeff*config.largeur/image.shape[1]
|
||||
ratio_y=coeff*config.hauteur/image.shape[0]
|
||||
|
||||
flip=np.random.randint(4)
|
||||
if flip!=3:
|
||||
image_r=cv2.flip(image_r, flip-1)
|
||||
|
||||
label =np.zeros((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes), dtype=np.float32)
|
||||
label2=np.zeros((config.max_objet, 7), dtype=np.float32)
|
||||
|
||||
nbr_objet=0
|
||||
for o in objects:
|
||||
id_class=config.dict2.index(o['category'])
|
||||
box=o['bounding_box']
|
||||
|
||||
if flip==3:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==2:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==1:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
if flip==0:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
|
||||
if x_min<shift_x or y_min<shift_y or x_max>(shift_x+config.largeur) or y_max>(shift_y+config.hauteur):
|
||||
continue
|
||||
x_min=(x_min-shift_x)/config.r_x
|
||||
y_min=(y_min-shift_y)/config.r_y
|
||||
x_max=(x_max-shift_x)/config.r_x
|
||||
y_max=(y_max-shift_y)/config.r_y
|
||||
|
||||
area=(x_max-x_min)*(y_max-y_min)
|
||||
label2[nbr_objet]=[x_min, y_min, x_max, y_max, area, 1, id_class]
|
||||
|
||||
x_centre=int(x_min+(x_max-x_min)/2)
|
||||
y_centre=int(y_min+(y_max-y_min)/2)
|
||||
x_cell=int(x_centre)
|
||||
y_cell=int(y_centre)
|
||||
|
||||
a_x_min=x_centre-config.anchors[:, 0]/2
|
||||
a_y_min=y_centre-config.anchors[:, 1]/2
|
||||
a_x_max=x_centre+config.anchors[:, 0]/2
|
||||
a_y_max=y_centre+config.anchors[:, 1]/2
|
||||
|
||||
id_a=0
|
||||
best_iou=0
|
||||
for i in range(len(config.anchors)):
|
||||
iou=intersection_over_union([x_min, y_min, x_max, y_max], [a_x_min[i], a_y_min[i], a_x_max[i], a_y_max[i]])
|
||||
if iou>best_iou:
|
||||
best_iou=iou
|
||||
id_a=i
|
||||
|
||||
label[y_cell, x_cell, id_a, 0]=(x_max+x_min)/2
|
||||
label[y_cell, x_cell, id_a, 1]=(y_max+y_min)/2
|
||||
label[y_cell, x_cell, id_a, 2]=x_max-x_min
|
||||
label[y_cell, x_cell, id_a, 3]=y_max-y_min
|
||||
label[y_cell, x_cell, id_a, 4]=1.
|
||||
label[y_cell, x_cell, id_a, 5+id_class]=1.
|
||||
|
||||
nbr_objet=nbr_objet+1
|
||||
if nbr_objet==config.max_objet:
|
||||
print("Nbr objet max atteind !!!!!")
|
||||
break
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if np.argmax(label[y, x, b, 5:])==4:
|
||||
trophozoite=1
|
||||
if not trophozoite:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
return image_r[shift_y:shift_y+config.hauteur, shift_x:shift_x+config.largeur], label, label2
|
||||
|
||||
def read_json(file, nbr=1, nbr_fichier=None):
|
||||
images=[]
|
||||
labels=[]
|
||||
labels2=[]
|
||||
with open(file) as json_file:
|
||||
data=json.load(json_file)
|
||||
id=0
|
||||
for p in data:
|
||||
print(id, p['image']['pathname'])
|
||||
id+=1
|
||||
for i in range(nbr):
|
||||
image, label, label2=prepare_labels("./{}".format(p['image']['pathname']), p['objects'])
|
||||
if image is not None:
|
||||
images.append(image)
|
||||
labels.append(label)
|
||||
labels2.append(label2)
|
||||
if nbr_fichier is not None:
|
||||
if id==nbr_fichier:
|
||||
break
|
||||
images=np.array(images)
|
||||
labels=np.array(labels)
|
||||
labels2=np.array(labels2)
|
||||
return images, labels, labels2
|
||||
33
Tensorflow/tutoriel27-2/config.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
|
||||
dict={'leukocyte': (255, 255, 0 ),
|
||||
'red blood cell':(0 , 0 , 255),
|
||||
'ring': (0 , 255, 0 ),
|
||||
'schizont': (255, 0 , 255),
|
||||
'trophozoite': (255, 0 , 0 ),
|
||||
'difficult': (0 , 0 , 0 ),
|
||||
'gametocyte': (0 , 255, 255)}
|
||||
dict2=[]
|
||||
for d in dict:
|
||||
dict2.append(d)
|
||||
|
||||
largeur=256
|
||||
hauteur=192
|
||||
cellule_x=16
|
||||
cellule_y=12
|
||||
nbr_classes=len(dict)
|
||||
r_x=int(largeur/cellule_x)
|
||||
r_y=int(hauteur/cellule_y)
|
||||
max_objet=60
|
||||
|
||||
anchors=np.array([[3.0, 1.5], [2.0, 2.0], [1.5, 3.0]])
|
||||
nbr_boxes=len(anchors)
|
||||
|
||||
batch_size=16
|
||||
|
||||
lambda_coord=5
|
||||
lambda_noobj=0.5
|
||||
#lambda_coord=1
|
||||
#lambda_noobj=1
|
||||
|
||||
seuil_iou_loss=0.6
|
||||
16
Tensorflow/tutoriel27-2/images.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 10, 10)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
for i in range(len(images)):
|
||||
image=common.prepare_image(images[i], labels[i], False)
|
||||
cv2.imshow("image", cv2.resize(image, (2*config.largeur, 2*config.hauteur)))
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
79
Tensorflow/tutoriel27-2/inference.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels, labels2=common.read_json('test.json', 5, 30)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
grid=np.meshgrid(np.arange(config.cellule_x, dtype=np.float32), np.arange(config.cellule_y, dtype=np.float32))
|
||||
grid=np.expand_dims(np.stack(grid, axis=-1), axis=2)
|
||||
grid=np.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
for i in range(len(images)):
|
||||
img=common.prepare_image(images[i], labels[i], False)
|
||||
img2=images[i].copy()
|
||||
predictions=model(np.array([images[i]]))
|
||||
pred_boxes=predictions[0, :, :, :, 0:4]
|
||||
pred_conf=common.sigmoid(predictions[0, :, :, :, 4])
|
||||
pred_classes=common.softmax(predictions[0, :, :, :, 5:])
|
||||
ids=np.argmax(pred_classes, axis=-1)
|
||||
|
||||
x_center=((grid[:, :, :, 0]+common.sigmoid(pred_boxes[:, :, :, 0]))*config.r_x)
|
||||
y_center=((grid[:, :, :, 1]+common.sigmoid(pred_boxes[:, :, :, 1]))*config.r_y)
|
||||
w=(np.exp(pred_boxes[:, :, :, 2])*config.anchors[:, 0]*config.r_x)
|
||||
h=(np.exp(pred_boxes[:, :, :, 3])*config.anchors[:, 1]*config.r_y)
|
||||
|
||||
x_min=(x_center-w/2).astype(np.int32)
|
||||
y_min=(y_center-h/2).astype(np.int32)
|
||||
x_max=(x_center+w/2).astype(np.int32)
|
||||
y_max=(y_center+h/2).astype(np.int32)
|
||||
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if pred_conf[y, x, b]>0.10:
|
||||
color=list(config.dict.values())[ids[y, x, b]]
|
||||
cv2.circle(images[i], (x_center[y, x, b], y_center[y, x, b]), 1, color, 2)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_max[y, x, b]), color, 1)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_min[y, x, b]-15), color, cv2.FILLED)
|
||||
cv2.putText(images[i], "{:3.0%}".format(pred_conf[y, x, b]), (x_min[y, x, b], y_min[y, x, b]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (255, 255, 255), 1)
|
||||
|
||||
tab_boxes=np.stack([y_min, x_min, y_max, x_max], axis=-1).reshape(-1, 4).astype(np.float32)
|
||||
pred_conf=pred_conf.reshape(-1)
|
||||
ids=ids.reshape(-1)
|
||||
tab_index=tf.image.non_max_suppression(tab_boxes, pred_conf, 42)
|
||||
|
||||
for id in tab_index:
|
||||
if pred_conf[id]>0.10:
|
||||
x_min=tab_boxes[id, 1]
|
||||
y_min=tab_boxes[id, 0]
|
||||
x_max=tab_boxes[id, 3]
|
||||
y_max=tab_boxes[id, 2]
|
||||
|
||||
color=list(config.dict.values())[ids[id]]
|
||||
cv2.rectangle(img2, (x_min, y_min), (x_max, y_max), color, 1)
|
||||
cv2.rectangle(img2, (x_min, y_min), (x_max, int(y_min-15)), color, cv2.FILLED)
|
||||
cv2.putText(img2, "{:3.0%}".format(pred_conf[id]), (x_min, int(y_min-5)), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (255, 255, 255), 1) # {%} ???
|
||||
|
||||
cv2.imshow("Inference", images[i])
|
||||
cv2.imshow("Bonne reponse", img)
|
||||
cv2.imshow("Non max suppression", img2)
|
||||
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
54
Tensorflow/tutoriel27-2/model.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import config
|
||||
|
||||
def block_resnet(input, filters, kernel_size, reduce=False):
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(input)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
|
||||
if reduce is True:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=2, padding='SAME')(result)
|
||||
else:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(result)
|
||||
|
||||
if input.shape[-1]==filters:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=input
|
||||
else:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=1, padding='SAME')(input)
|
||||
|
||||
result=layers.add([result, shortcut])
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
return result
|
||||
|
||||
def model(nbr_classes, nbr_boxes, cellule_y, cellule_x):
|
||||
entree=layers.Input(shape=(config.largeur, config.hauteur, 3), dtype='float32')
|
||||
|
||||
result=block_resnet(entree, 16, 3, False)
|
||||
result=block_resnet(result, 16, 3, True)
|
||||
|
||||
result=block_resnet(result, 32, 3, False)
|
||||
result=block_resnet(result, 32, 3, True)
|
||||
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, True)
|
||||
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, True)
|
||||
|
||||
result=layers.Conv2D(config.nbr_boxes*(5+config.nbr_classes), 1, padding='SAME')(result)
|
||||
sortie=layers.Reshape((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes))(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
|
||||
return model
|
||||
|
||||
117
Tensorflow/tutoriel27-2/train.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
batch_size=16
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 20)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
print("Nbr images:", len(images))
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((images, labels)).batch(batch_size)
|
||||
|
||||
def my_loss(labels, preds):
|
||||
grid=tf.meshgrid(tf.range(config.cellule_x, dtype=tf.float32), tf.range(config.cellule_y, dtype=tf.float32))
|
||||
grid=tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
|
||||
grid=tf.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
preds_xy =tf.math.sigmoid(preds[:, :, :, :, 0:2])+grid
|
||||
preds_wh =preds[:, :, :, :, 2:4]
|
||||
preds_conf =tf.math.sigmoid(preds[:, :, :, :, 4])
|
||||
preds_classe=tf.math.sigmoid(preds[:, :, :, :, 5:])
|
||||
|
||||
preds_wh_half=preds_wh/2
|
||||
preds_xymin=preds_xy-preds_wh_half
|
||||
preds_xymax=preds_xy+preds_wh_half
|
||||
preds_areas=preds_wh[:, :, :, :, 0]*preds_wh[:, :, :, :, 1]
|
||||
|
||||
l2_xy_min=labels2[:, :, 0:2]
|
||||
l2_xy_max=labels2[:, :, 2:4]
|
||||
l2_area =labels2[:, :, 4]
|
||||
|
||||
preds_xymin=tf.expand_dims(preds_xymin, 4)
|
||||
preds_xymax=tf.expand_dims(preds_xymax, 4)
|
||||
preds_areas=tf.expand_dims(preds_areas, 4)
|
||||
|
||||
labels_xy =labels[:, :, :, :, 0:2]
|
||||
labels_wh =tf.math.log(labels[:, :, :, :, 2:4]/config.anchors)
|
||||
labels_wh=tf.where(tf.math.is_inf(labels_wh), tf.zeros_like(labels_wh), labels_wh)
|
||||
|
||||
conf_mask_obj=labels[:, :, :, :, 4]
|
||||
labels_classe=labels[:, :, :, :, 5:]
|
||||
|
||||
conf_mask_noobj=[]
|
||||
for i in range(len(preds)):
|
||||
xy_min=tf.maximum(preds_xymin[i], l2_xy_min[i])
|
||||
xy_max=tf.minimum(preds_xymax[i], l2_xy_max[i])
|
||||
intersect_wh=tf.maximum(xy_max-xy_min, 0.)
|
||||
intersect_areas=intersect_wh[..., 0]*intersect_wh[..., 1]
|
||||
union_areas=preds_areas[i]+l2_area[i]-intersect_areas
|
||||
ious=tf.truediv(intersect_areas, union_areas)
|
||||
best_ious=tf.reduce_max(ious, axis=3)
|
||||
conf_mask_noobj.append(tf.cast(best_ious<config.seuil_iou_loss, tf.float32)*(1-conf_mask_obj[i]))
|
||||
conf_mask_noobj=tf.stack(conf_mask_noobj)
|
||||
|
||||
preds_x=preds_xy[..., 0]
|
||||
preds_y=preds_xy[..., 1]
|
||||
preds_w=preds_wh[..., 0]
|
||||
preds_h=preds_wh[..., 1]
|
||||
labels_x=labels_xy[..., 0]
|
||||
labels_y=labels_xy[..., 1]
|
||||
labels_w=labels_wh[..., 0]
|
||||
labels_h=labels_wh[..., 1]
|
||||
|
||||
loss_xy=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_x-labels_x)+tf.math.square(preds_y-labels_y)), axis=(1, 2, 3))
|
||||
loss_wh=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_w-labels_w)+tf.math.square(preds_h-labels_h)), axis=(1, 2, 3))
|
||||
|
||||
loss_conf_obj=tf.reduce_sum(conf_mask_obj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
loss_conf_noobj=tf.reduce_sum(conf_mask_noobj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
|
||||
loss_classe=tf.reduce_sum(tf.math.square(preds_classe-labels_classe), axis=4)
|
||||
loss_classe=tf.reduce_sum(conf_mask_obj*loss_classe, axis=(1, 2, 3))
|
||||
|
||||
loss=config.lambda_coord*loss_xy+config.lambda_coord*loss_wh+loss_conf_obj+config.lambda_noobj*loss_conf_noobj+loss_classe
|
||||
return loss
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
time.time()-start))
|
||||
if not entrainement%20:
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
train(train_ds, 400)
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
4
Tensorflow/tutoriel27-3/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau Yolo: F-beta score
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=NkY3Kh-4xN4
|
||||
201
Tensorflow/tutoriel27-3/common.py
Normal file
@@ -0,0 +1,201 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import json
|
||||
import random
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import config
|
||||
|
||||
def sigmoid(x):
|
||||
x=np.clip(x, -50, 50)
|
||||
return 1/(1+np.exp(-x))
|
||||
|
||||
def softmax(x):
|
||||
e=np.exp(x)
|
||||
e_sum=np.sum(e)
|
||||
return e/e_sum
|
||||
|
||||
def prepare_image(image, labels, grille=True):
|
||||
img=image.copy()
|
||||
|
||||
if grille is True:
|
||||
for x in range(config.r_x, config.largeur+config.r_x, config.r_x):
|
||||
for y in range(config.r_y, config.hauteur+config.r_y, config.r_y):
|
||||
cv2.line(img, (0, y), (x, y), (0, 0, 0), 1)
|
||||
cv2.line(img, (x, 0), (x, y), (0, 0, 0), 1)
|
||||
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for box in range(config.nbr_boxes):
|
||||
if labels[y, x, box, 4]:
|
||||
ids=np.argmax(labels[y, x, box, 5:])
|
||||
x_center=int(labels[y, x, box, 0]*config.r_x)
|
||||
y_center=int(labels[y, x, box, 1]*config.r_y)
|
||||
w_2=int(labels[y, x, box, 2]*config.r_x/2)
|
||||
h_2=int(labels[y, x, box, 3]*config.r_y/2)
|
||||
x_min=x_center-w_2
|
||||
y_min=y_center-h_2
|
||||
x_max=x_center+w_2
|
||||
y_max=y_center+h_2
|
||||
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), list(config.dict.values())[ids], 1)
|
||||
cv2.circle(img, (x_center, y_center), 1, list(config.dict.values())[ids], 2)
|
||||
|
||||
return img
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def intersection_over_union(boxA, boxB):
|
||||
xA=np.maximum(boxA[0], boxB[0])
|
||||
yA=np.maximum(boxA[1], boxB[1])
|
||||
xB=np.minimum(boxA[2], boxB[2])
|
||||
yB=np.minimum(boxA[3], boxB[3])
|
||||
interArea=np.maximum(0, xB-xA)*np.maximum(0, yB-yA)
|
||||
boxAArea=(boxA[2]-boxA[0])*(boxA[3]-boxA[1])
|
||||
boxBArea=(boxB[2]-boxB[0])*(boxB[3]-boxB[1])
|
||||
return interArea/(boxAArea+boxBArea-interArea)
|
||||
|
||||
def prepare_labels(fichier_image, objects, coeff=None):
|
||||
image=cv2.imread(fichier_image)
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for o in objects:
|
||||
if config.dict2.index(o['category'])==4:
|
||||
trophozoite=1
|
||||
break
|
||||
if trophozoite==0:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
if coeff is None:
|
||||
coeff=random.uniform(1.1, 2.5)
|
||||
image_r=cv2.resize(image, (int(coeff*config.largeur), int(coeff*config.hauteur)))
|
||||
image_r=gamma(image_r, random.uniform(0.7, 1.3), np.random.randint(60)-30)
|
||||
image_r=bruit(image_r)
|
||||
|
||||
if coeff==1:
|
||||
shift_x=0
|
||||
shift_y=0
|
||||
else:
|
||||
shift_x=np.random.randint(image_r.shape[1]-config.largeur)
|
||||
shift_y=np.random.randint(image_r.shape[0]-config.hauteur)
|
||||
|
||||
ratio_x=coeff*config.largeur/image.shape[1]
|
||||
ratio_y=coeff*config.hauteur/image.shape[0]
|
||||
|
||||
flip=np.random.randint(4)
|
||||
if flip!=3:
|
||||
image_r=cv2.flip(image_r, flip-1)
|
||||
|
||||
label =np.zeros((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes), dtype=np.float32)
|
||||
label2=np.zeros((config.max_objet, 7), dtype=np.float32)
|
||||
|
||||
nbr_objet=0
|
||||
for o in objects:
|
||||
id_class=config.dict2.index(o['category'])
|
||||
box=o['bounding_box']
|
||||
|
||||
if flip==3:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==2:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==1:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
if flip==0:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
|
||||
if x_min<shift_x or y_min<shift_y or x_max>(shift_x+config.largeur) or y_max>(shift_y+config.hauteur):
|
||||
continue
|
||||
x_min=(x_min-shift_x)/config.r_x
|
||||
y_min=(y_min-shift_y)/config.r_y
|
||||
x_max=(x_max-shift_x)/config.r_x
|
||||
y_max=(y_max-shift_y)/config.r_y
|
||||
|
||||
area=(x_max-x_min)*(y_max-y_min)
|
||||
label2[nbr_objet]=[x_min, y_min, x_max, y_max, area, 1, id_class]
|
||||
|
||||
x_centre=int(x_min+(x_max-x_min)/2)
|
||||
y_centre=int(y_min+(y_max-y_min)/2)
|
||||
x_cell=int(x_centre)
|
||||
y_cell=int(y_centre)
|
||||
|
||||
a_x_min=x_centre-config.anchors[:, 0]/2
|
||||
a_y_min=y_centre-config.anchors[:, 1]/2
|
||||
a_x_max=x_centre+config.anchors[:, 0]/2
|
||||
a_y_max=y_centre+config.anchors[:, 1]/2
|
||||
|
||||
id_a=0
|
||||
best_iou=0
|
||||
for i in range(len(config.anchors)):
|
||||
iou=intersection_over_union([x_min, y_min, x_max, y_max], [a_x_min[i], a_y_min[i], a_x_max[i], a_y_max[i]])
|
||||
if iou>best_iou:
|
||||
best_iou=iou
|
||||
id_a=i
|
||||
|
||||
label[y_cell, x_cell, id_a, 0]=(x_max+x_min)/2
|
||||
label[y_cell, x_cell, id_a, 1]=(y_max+y_min)/2
|
||||
label[y_cell, x_cell, id_a, 2]=x_max-x_min
|
||||
label[y_cell, x_cell, id_a, 3]=y_max-y_min
|
||||
label[y_cell, x_cell, id_a, 4]=1.
|
||||
label[y_cell, x_cell, id_a, 5+id_class]=1.
|
||||
|
||||
nbr_objet=nbr_objet+1
|
||||
if nbr_objet==config.max_objet:
|
||||
print("Nbr objet max atteind !!!!!")
|
||||
break
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if np.argmax(label[y, x, b, 5:])==4:
|
||||
trophozoite=1
|
||||
if not trophozoite:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
return image_r[shift_y:shift_y+config.hauteur, shift_x:shift_x+config.largeur], label, label2
|
||||
|
||||
def read_json(file, nbr=1, nbr_fichier=None):
|
||||
images=[]
|
||||
labels=[]
|
||||
labels2=[]
|
||||
with open(file) as json_file:
|
||||
data=json.load(json_file)
|
||||
id=0
|
||||
for p in data:
|
||||
print(id, p['image']['pathname'])
|
||||
id+=1
|
||||
for i in range(nbr):
|
||||
image, label, label2=prepare_labels("./{}".format(p['image']['pathname']), p['objects'])
|
||||
if image is not None:
|
||||
images.append(image)
|
||||
labels.append(label)
|
||||
labels2.append(label2)
|
||||
if nbr_fichier is not None:
|
||||
if id==nbr_fichier:
|
||||
break
|
||||
images=np.array(images)
|
||||
labels=np.array(labels)
|
||||
labels2=np.array(labels2)
|
||||
return images, labels, labels2
|
||||
33
Tensorflow/tutoriel27-3/config.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
|
||||
dict={'leukocyte': (255, 255, 0 ),
|
||||
'red blood cell':(0 , 0 , 255),
|
||||
'ring': (0 , 255, 0 ),
|
||||
'schizont': (255, 0 , 255),
|
||||
'trophozoite': (255, 0 , 0 ),
|
||||
'difficult': (0 , 0 , 0 ),
|
||||
'gametocyte': (0 , 255, 255)}
|
||||
dict2=[]
|
||||
for d in dict:
|
||||
dict2.append(d)
|
||||
|
||||
largeur=256
|
||||
hauteur=192
|
||||
cellule_x=16
|
||||
cellule_y=12
|
||||
nbr_classes=len(dict)
|
||||
r_x=int(largeur/cellule_x)
|
||||
r_y=int(hauteur/cellule_y)
|
||||
max_objet=60
|
||||
|
||||
anchors=np.array([[3.0, 1.5], [2.0, 2.0], [1.5, 3.0]])
|
||||
nbr_boxes=len(anchors)
|
||||
|
||||
batch_size=16
|
||||
|
||||
lambda_coord=5
|
||||
lambda_noobj=0.5
|
||||
#lambda_coord=1
|
||||
#lambda_noobj=1
|
||||
|
||||
seuil_iou_loss=0.6
|
||||
16
Tensorflow/tutoriel27-3/images.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 10, 10)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
for i in range(len(images)):
|
||||
image=common.prepare_image(images[i], labels[i], False)
|
||||
cv2.imshow("image", cv2.resize(image, (2*config.largeur, 2*config.hauteur)))
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
79
Tensorflow/tutoriel27-3/inference.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels, labels2=common.read_json('test.json', 5, 30)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
grid=np.meshgrid(np.arange(config.cellule_x, dtype=np.float32), np.arange(config.cellule_y, dtype=np.float32))
|
||||
grid=np.expand_dims(np.stack(grid, axis=-1), axis=2)
|
||||
grid=np.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
for i in range(len(images)):
|
||||
img=common.prepare_image(images[i], labels[i], False)
|
||||
img2=images[i].copy()
|
||||
predictions=model(np.array([images[i]]))
|
||||
pred_boxes=predictions[0, :, :, :, 0:4]
|
||||
pred_conf=common.sigmoid(predictions[0, :, :, :, 4])
|
||||
pred_classes=common.softmax(predictions[0, :, :, :, 5:])
|
||||
ids=np.argmax(pred_classes, axis=-1)
|
||||
|
||||
x_center=((grid[:, :, :, 0]+common.sigmoid(pred_boxes[:, :, :, 0]))*config.r_x)
|
||||
y_center=((grid[:, :, :, 1]+common.sigmoid(pred_boxes[:, :, :, 1]))*config.r_y)
|
||||
w=(np.exp(pred_boxes[:, :, :, 2])*config.anchors[:, 0]*config.r_x)
|
||||
h=(np.exp(pred_boxes[:, :, :, 3])*config.anchors[:, 1]*config.r_y)
|
||||
|
||||
x_min=(x_center-w/2).astype(np.int32)
|
||||
y_min=(y_center-h/2).astype(np.int32)
|
||||
x_max=(x_center+w/2).astype(np.int32)
|
||||
y_max=(y_center+h/2).astype(np.int32)
|
||||
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if pred_conf[y, x, b]>0.10:
|
||||
color=list(config.dict.values())[ids[y, x, b]]
|
||||
cv2.circle(images[i], (x_center[y, x, b], y_center[y, x, b]), 1, color, 2)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_max[y, x, b]), color, 1)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_min[y, x, b]-15), color, cv2.FILLED)
|
||||
cv2.putText(images[i], "{:3.0%}".format(pred_conf[y, x, b]), (x_min[y, x, b], y_min[y, x, b]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (255, 255, 255), 1)
|
||||
|
||||
tab_boxes=np.stack([y_min, x_min, y_max, x_max], axis=-1).reshape(-1, 4).astype(np.float32)
|
||||
pred_conf=pred_conf.reshape(-1)
|
||||
ids=ids.reshape(-1)
|
||||
tab_index=tf.image.non_max_suppression(tab_boxes, pred_conf, 42)
|
||||
|
||||
for id in tab_index:
|
||||
if pred_conf[id]>0.10:
|
||||
x_min=tab_boxes[id, 1]
|
||||
y_min=tab_boxes[id, 0]
|
||||
x_max=tab_boxes[id, 3]
|
||||
y_max=tab_boxes[id, 2]
|
||||
|
||||
color=list(config.dict.values())[ids[id]]
|
||||
cv2.rectangle(img2, (x_min, y_min), (x_max, y_max), color, 1)
|
||||
cv2.rectangle(img2, (x_min, y_min), (x_max, int(y_min-15)), color, cv2.FILLED)
|
||||
cv2.putText(img2, "{:3.0%}".format(pred_conf[id]), (x_min, int(y_min-5)), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (255, 255, 255), 1) # {%} ???
|
||||
|
||||
cv2.imshow("Inference", images[i])
|
||||
cv2.imshow("Bonne reponse", img)
|
||||
cv2.imshow("Non max suppression", img2)
|
||||
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
111
Tensorflow/tutoriel27-3/map.py
Normal file
@@ -0,0 +1,111 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels, labels2=common.read_json('test.json', 10)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
dataset=tf.data.Dataset.from_tensor_slices((images, labels)).batch(config.batch_size)
|
||||
|
||||
def calcul_map(model, dataset, beta=1., seuil=0.5):
|
||||
grid=np.meshgrid(np.arange(config.cellule_x, dtype=np.float32), np.arange(config.cellule_y, dtype=np.float32))
|
||||
grid=np.expand_dims(np.stack(grid, axis=-1), axis=2)
|
||||
grid=np.tile(grid, (1, 1, 1, config.nbr_boxes, 1))
|
||||
|
||||
index_labels2=0
|
||||
labels2_=labels2*[config.r_x, config.r_y, config.r_x, config.r_y, 1, 1, 1]
|
||||
score=[]
|
||||
tab_nbr_reponse=[]
|
||||
tab_tp=[]
|
||||
tab_true_boxes=[]
|
||||
|
||||
for images, labels in dataset:
|
||||
predictions=np.array(model(images))
|
||||
|
||||
pred_conf=common.sigmoid(predictions[:, :, :, :, 4])
|
||||
pred_classes=common.softmax(predictions[:, :, :, :, 5:])
|
||||
pred_ids=np.argmax(pred_classes, axis=-1)
|
||||
|
||||
x_center=((grid[:, :, :, :, 0]+common.sigmoid(predictions[:, :, :, :, 0]))*config.r_x)
|
||||
y_center=((grid[:, :, :, :, 1]+common.sigmoid(predictions[:, :, :, :, 1]))*config.r_y)
|
||||
w=(np.exp(predictions[:, :, :, :, 2])*config.anchors[:, 0]*config.r_x)
|
||||
h=(np.exp(predictions[:, :, :, :, 3])*config.anchors[:, 1]*config.r_y)
|
||||
|
||||
x_min=x_center-w/2
|
||||
y_min=y_center-h/2
|
||||
x_max=x_center+w/2
|
||||
y_max=y_center+h/2
|
||||
|
||||
tab_boxes=np.stack([y_min, x_min, y_max, x_max], axis=-1).astype(np.float32)
|
||||
tab_boxes=tab_boxes.reshape(-1, config.cellule_y*config.cellule_x*config.nbr_boxes, 4)
|
||||
pred_conf=pred_conf.reshape(-1, config.cellule_y*config.cellule_x*config.nbr_boxes)
|
||||
pred_ids=pred_ids.reshape(-1, config.cellule_y*config.cellule_x*config.nbr_boxes)
|
||||
|
||||
for p in range(len(predictions)):
|
||||
nbr_reponse=np.zeros(config.nbr_classes)
|
||||
tp=np.zeros(config.nbr_classes)
|
||||
nbr_true_boxes=np.zeros(config.nbr_classes)
|
||||
tab_index=tf.image.non_max_suppression(tab_boxes[p], pred_conf[p], 100)
|
||||
for id in tab_index:
|
||||
if pred_conf[p, id]>0.10:
|
||||
nbr_reponse[pred_ids[p, id]]+=1
|
||||
for box in labels2_[index_labels2]:
|
||||
if not box[5]:
|
||||
break
|
||||
b1=[tab_boxes[p, id, 1], tab_boxes[p, id, 0], tab_boxes[p, id, 3], tab_boxes[p, id, 2]]
|
||||
iou=common.intersection_over_union(b1, box)
|
||||
if iou>seuil and box[6]==pred_ids[p, id]:
|
||||
tp[pred_ids[p, id]]+=1
|
||||
|
||||
for box in labels2[index_labels2]:
|
||||
if not box[5]:
|
||||
break
|
||||
nbr_true_boxes[int(box[6])]+=1
|
||||
|
||||
tab_nbr_reponse.append(nbr_reponse)
|
||||
tab_tp.append(tp)
|
||||
tab_true_boxes.append(nbr_true_boxes)
|
||||
|
||||
index_labels2=index_labels2+1
|
||||
|
||||
tab_nbr_reponse=np.array(tab_nbr_reponse)
|
||||
tab_tp=np.array(tab_tp)
|
||||
tab_true_boxes=np.array(tab_true_boxes)
|
||||
|
||||
########################
|
||||
precision_globule_rouge=tab_tp[:, 1]/(tab_nbr_reponse[:, 1]+1E-7)
|
||||
precision_trophozoite=tab_tp[:, 4]/(tab_nbr_reponse[:, 4]+1E-7)
|
||||
|
||||
rappel_globule_rouge=tab_tp[:, 1]/(tab_true_boxes[:, 1]+1E-7)
|
||||
rappel_trophozoite=tab_tp[:, 4]/(tab_true_boxes[:, 4]+1E-7)
|
||||
|
||||
print("F1 score globule rouge", np.mean(2*precision_globule_rouge*rappel_globule_rouge/(precision_globule_rouge+rappel_globule_rouge+1E-7)))
|
||||
print("F1 score trophozoite", np.mean(2*precision_trophozoite*rappel_trophozoite/(precision_trophozoite+rappel_trophozoite+1E-7)))
|
||||
|
||||
precision=(precision_globule_rouge+precision_trophozoite)/2
|
||||
rappel=(rappel_globule_rouge+rappel_trophozoite)/2
|
||||
|
||||
score=np.mean((1+beta*beta)*precision*rappel/(beta*beta*precision+rappel+1E-7))
|
||||
print("SCORE (globule rouge/trophozoite)", score)
|
||||
########################
|
||||
|
||||
precision=tab_tp/(tab_nbr_reponse+1E-7)
|
||||
rappel=tab_tp/(tab_true_boxes+1E-7)
|
||||
score=np.mean((1+beta*beta)*precision*rappel/(beta*beta*precision+rappel+1E-7))
|
||||
|
||||
return score
|
||||
|
||||
score=calcul_map(model, dataset)
|
||||
print("Resultat", score)
|
||||
54
Tensorflow/tutoriel27-3/model.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import config
|
||||
|
||||
def block_resnet(input, filters, kernel_size, reduce=False):
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(input)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
|
||||
if reduce is True:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=2, padding='SAME')(result)
|
||||
else:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(result)
|
||||
|
||||
if input.shape[-1]==filters:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=input
|
||||
else:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=1, padding='SAME')(input)
|
||||
|
||||
result=layers.add([result, shortcut])
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
return result
|
||||
|
||||
def model(nbr_classes, nbr_boxes, cellule_y, cellule_x):
|
||||
entree=layers.Input(shape=(config.largeur, config.hauteur, 3), dtype='float32')
|
||||
|
||||
result=block_resnet(entree, 16, 3, False)
|
||||
result=block_resnet(result, 16, 3, True)
|
||||
|
||||
result=block_resnet(result, 32, 3, False)
|
||||
result=block_resnet(result, 32, 3, True)
|
||||
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, True)
|
||||
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, True)
|
||||
|
||||
result=layers.Conv2D(nbr_boxes*(5+nbr_classes), 1, padding='SAME')(result)
|
||||
sortie=layers.Reshape((cellule_y, cellule_x, nbr_boxes, 5+nbr_classes))(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
|
||||
return model
|
||||
|
||||
117
Tensorflow/tutoriel27-3/train.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
batch_size=16
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 20)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
print("Nbr images:", len(images))
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((images, labels)).batch(batch_size)
|
||||
|
||||
def my_loss(labels, preds):
|
||||
grid=tf.meshgrid(tf.range(config.cellule_x, dtype=tf.float32), tf.range(config.cellule_y, dtype=tf.float32))
|
||||
grid=tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
|
||||
grid=tf.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
preds_xy =tf.math.sigmoid(preds[:, :, :, :, 0:2])+grid
|
||||
preds_wh =preds[:, :, :, :, 2:4]
|
||||
preds_conf =tf.math.sigmoid(preds[:, :, :, :, 4])
|
||||
preds_classe=tf.math.sigmoid(preds[:, :, :, :, 5:])
|
||||
|
||||
preds_wh_half=preds_wh/2
|
||||
preds_xymin=preds_xy-preds_wh_half
|
||||
preds_xymax=preds_xy+preds_wh_half
|
||||
preds_areas=preds_wh[:, :, :, :, 0]*preds_wh[:, :, :, :, 1]
|
||||
|
||||
l2_xy_min=labels2[:, :, 0:2]
|
||||
l2_xy_max=labels2[:, :, 2:4]
|
||||
l2_area =labels2[:, :, 4]
|
||||
|
||||
preds_xymin=tf.expand_dims(preds_xymin, 4)
|
||||
preds_xymax=tf.expand_dims(preds_xymax, 4)
|
||||
preds_areas=tf.expand_dims(preds_areas, 4)
|
||||
|
||||
labels_xy =labels[:, :, :, :, 0:2]
|
||||
labels_wh =tf.math.log(labels[:, :, :, :, 2:4]/config.anchors)
|
||||
labels_wh=tf.where(tf.math.is_inf(labels_wh), tf.zeros_like(labels_wh), labels_wh)
|
||||
|
||||
conf_mask_obj=labels[:, :, :, :, 4]
|
||||
labels_classe=labels[:, :, :, :, 5:]
|
||||
|
||||
conf_mask_noobj=[]
|
||||
for i in range(len(preds)):
|
||||
xy_min=tf.maximum(preds_xymin[i], l2_xy_min[i])
|
||||
xy_max=tf.minimum(preds_xymax[i], l2_xy_max[i])
|
||||
intersect_wh=tf.maximum(xy_max-xy_min, 0.)
|
||||
intersect_areas=intersect_wh[..., 0]*intersect_wh[..., 1]
|
||||
union_areas=preds_areas[i]+l2_area[i]-intersect_areas
|
||||
ious=tf.truediv(intersect_areas, union_areas)
|
||||
best_ious=tf.reduce_max(ious, axis=3)
|
||||
conf_mask_noobj.append(tf.cast(best_ious<config.seuil_iou_loss, tf.float32)*(1-conf_mask_obj[i]))
|
||||
conf_mask_noobj=tf.stack(conf_mask_noobj)
|
||||
|
||||
preds_x=preds_xy[..., 0]
|
||||
preds_y=preds_xy[..., 1]
|
||||
preds_w=preds_wh[..., 0]
|
||||
preds_h=preds_wh[..., 1]
|
||||
labels_x=labels_xy[..., 0]
|
||||
labels_y=labels_xy[..., 1]
|
||||
labels_w=labels_wh[..., 0]
|
||||
labels_h=labels_wh[..., 1]
|
||||
|
||||
loss_xy=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_x-labels_x)+tf.math.square(preds_y-labels_y)), axis=(1, 2, 3))
|
||||
loss_wh=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_w-labels_w)+tf.math.square(preds_h-labels_h)), axis=(1, 2, 3))
|
||||
|
||||
loss_conf_obj=tf.reduce_sum(conf_mask_obj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
loss_conf_noobj=tf.reduce_sum(conf_mask_noobj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
|
||||
loss_classe=tf.reduce_sum(tf.math.square(preds_classe-labels_classe), axis=4)
|
||||
loss_classe=tf.reduce_sum(conf_mask_obj*loss_classe, axis=(1, 2, 3))
|
||||
|
||||
loss=config.lambda_coord*loss_xy+config.lambda_coord*loss_wh+loss_conf_obj+config.lambda_noobj*loss_conf_noobj+loss_classe
|
||||
return loss
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
time.time()-start))
|
||||
if not entrainement%20:
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
train(train_ds, 400)
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
4
Tensorflow/tutoriel27/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau Yolo
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=oQ0436IJUWc
|
||||
201
Tensorflow/tutoriel27/common.py
Normal file
@@ -0,0 +1,201 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import json
|
||||
import random
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import config
|
||||
|
||||
def sigmoid(x):
|
||||
x=np.clip(x, -50, 50)
|
||||
return 1/(1+np.exp(-x))
|
||||
|
||||
def softmax(x):
|
||||
e=np.exp(x)
|
||||
e_sum=np.sum(e)
|
||||
return e/e_sum
|
||||
|
||||
def prepare_image(image, labels, grille=True):
|
||||
img=image.copy()
|
||||
|
||||
if grille is True:
|
||||
for x in range(config.r_x, config.largeur+config.r_x, config.r_x):
|
||||
for y in range(config.r_y, config.hauteur+config.r_y, config.r_y):
|
||||
cv2.line(img, (0, y), (x, y), (0, 0, 0), 1)
|
||||
cv2.line(img, (x, 0), (x, y), (0, 0, 0), 1)
|
||||
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for box in range(config.nbr_boxes):
|
||||
if labels[y, x, box, 4]:
|
||||
ids=np.argmax(labels[y, x, box, 5:])
|
||||
x_center=int(labels[y, x, box, 0]*config.r_x)
|
||||
y_center=int(labels[y, x, box, 1]*config.r_y)
|
||||
w_2=int(labels[y, x, box, 2]*config.r_x/2)
|
||||
h_2=int(labels[y, x, box, 3]*config.r_y/2)
|
||||
x_min=x_center-w_2
|
||||
y_min=y_center-h_2
|
||||
x_max=x_center+w_2
|
||||
y_max=y_center+h_2
|
||||
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), list(config.dict.values())[ids], 1)
|
||||
cv2.circle(img, (x_center, y_center), 1, list(config.dict.values())[ids], 2)
|
||||
|
||||
return img
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def intersection_over_union(boxA, boxB):
|
||||
xA=np.maximum(boxA[0], boxB[0])
|
||||
yA=np.maximum(boxA[1], boxB[1])
|
||||
xB=np.minimum(boxA[2], boxB[2])
|
||||
yB=np.minimum(boxA[3], boxB[3])
|
||||
interArea=np.maximum(0, xB-xA)*np.maximum(0, yB-yA)
|
||||
boxAArea=(boxA[2]-boxA[0])*(boxA[3]-boxA[1])
|
||||
boxBArea=(boxB[2]-boxB[0])*(boxB[3]-boxB[1])
|
||||
return interArea/(boxAArea+boxBArea-interArea)
|
||||
|
||||
def prepare_labels(fichier_image, objects, coeff=None):
|
||||
image=cv2.imread(fichier_image)
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for o in objects:
|
||||
if config.dict2.index(o['category'])==4:
|
||||
trophozoite=1
|
||||
break
|
||||
if trophozoite==0:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
if coeff is None:
|
||||
coeff=random.uniform(1.1, 2.5)
|
||||
image_r=cv2.resize(image, (int(coeff*config.largeur), int(coeff*config.hauteur)))
|
||||
image_r=gamma(image_r, random.uniform(0.7, 1.3), np.random.randint(60)-30)
|
||||
image_r=bruit(image_r)
|
||||
|
||||
if coeff==1:
|
||||
shift_x=0
|
||||
shift_y=0
|
||||
else:
|
||||
shift_x=np.random.randint(image_r.shape[1]-config.largeur)
|
||||
shift_y=np.random.randint(image_r.shape[0]-config.hauteur)
|
||||
|
||||
ratio_x=coeff*config.largeur/image.shape[1]
|
||||
ratio_y=coeff*config.hauteur/image.shape[0]
|
||||
|
||||
flip=np.random.randint(4)
|
||||
if flip!=3:
|
||||
image_r=cv2.flip(image_r, flip-1)
|
||||
|
||||
label =np.zeros((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes), dtype=np.float32)
|
||||
label2=np.zeros((config.max_objet, 7), dtype=np.float32)
|
||||
|
||||
nbr_objet=0
|
||||
for o in objects:
|
||||
id_class=config.dict2.index(o['category'])
|
||||
box=o['bounding_box']
|
||||
|
||||
if flip==3:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==2:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==1:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
if flip==0:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
|
||||
if x_min<shift_x or y_min<shift_y or x_max>(shift_x+config.largeur) or y_max>(shift_y+config.hauteur):
|
||||
continue
|
||||
x_min=(x_min-shift_x)/config.r_x
|
||||
y_min=(y_min-shift_y)/config.r_y
|
||||
x_max=(x_max-shift_x)/config.r_x
|
||||
y_max=(y_max-shift_y)/config.r_y
|
||||
|
||||
area=(x_max-x_min)*(y_max-y_min)
|
||||
label2[nbr_objet]=[x_min, y_min, x_max, y_max, area, 1, id_class]
|
||||
|
||||
x_centre=int(x_min+(x_max-x_min)/2)
|
||||
y_centre=int(y_min+(y_max-y_min)/2)
|
||||
x_cell=int(x_centre)
|
||||
y_cell=int(y_centre)
|
||||
|
||||
a_x_min=x_centre-config.anchors[:, 0]/2
|
||||
a_y_min=y_centre-config.anchors[:, 1]/2
|
||||
a_x_max=x_centre+config.anchors[:, 0]/2
|
||||
a_y_max=y_centre+config.anchors[:, 1]/2
|
||||
|
||||
id_a=0
|
||||
best_iou=0
|
||||
for i in range(len(config.anchors)):
|
||||
iou=intersection_over_union([x_min, y_min, x_max, y_max], [a_x_min[i], a_y_min[i], a_x_max[i], a_y_max[i]])
|
||||
if iou>best_iou:
|
||||
best_iou=iou
|
||||
id_a=i
|
||||
|
||||
label[y_cell, x_cell, id_a, 0]=(x_max+x_min)/2
|
||||
label[y_cell, x_cell, id_a, 1]=(y_max+y_min)/2
|
||||
label[y_cell, x_cell, id_a, 2]=x_max-x_min
|
||||
label[y_cell, x_cell, id_a, 3]=y_max-y_min
|
||||
label[y_cell, x_cell, id_a, 4]=1.
|
||||
label[y_cell, x_cell, id_a, 5+id_class]=1.
|
||||
|
||||
nbr_objet=nbr_objet+1
|
||||
if nbr_objet==config.max_objet:
|
||||
print("Nbr objet max atteind !!!!!")
|
||||
break
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if np.argmax(label[y, x, b, 5:])==4:
|
||||
trophozoite=1
|
||||
if not trophozoite:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
return image_r[shift_y:shift_y+config.hauteur, shift_x:shift_x+config.largeur], label, label2
|
||||
|
||||
def read_json(file, nbr=1, nbr_fichier=None):
|
||||
images=[]
|
||||
labels=[]
|
||||
labels2=[]
|
||||
with open(file) as json_file:
|
||||
data=json.load(json_file)
|
||||
id=0
|
||||
for p in data:
|
||||
print(id, p['image']['pathname'])
|
||||
id+=1
|
||||
for i in range(nbr):
|
||||
image, label, label2=prepare_labels("./{}".format(p['image']['pathname']), p['objects'])
|
||||
if image is not None:
|
||||
images.append(image)
|
||||
labels.append(label)
|
||||
labels2.append(label2)
|
||||
if nbr_fichier is not None:
|
||||
if id==nbr_fichier:
|
||||
break
|
||||
images=np.array(images)
|
||||
labels=np.array(labels)
|
||||
labels2=np.array(labels2)
|
||||
return images, labels, labels2
|
||||
33
Tensorflow/tutoriel27/config.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
|
||||
dict={'leukocyte': (255, 255, 0 ),
|
||||
'red blood cell':(0 , 0 , 255),
|
||||
'ring': (0 , 255, 0 ),
|
||||
'schizont': (255, 0 , 255),
|
||||
'trophozoite': (255, 0 , 0 ),
|
||||
'difficult': (0 , 0 , 0 ),
|
||||
'gametocyte': (0 , 255, 255)}
|
||||
dict2=[]
|
||||
for d in dict:
|
||||
dict2.append(d)
|
||||
|
||||
largeur=256
|
||||
hauteur=192
|
||||
cellule_x=16
|
||||
cellule_y=12
|
||||
nbr_classes=len(dict)
|
||||
r_x=int(largeur/cellule_x)
|
||||
r_y=int(hauteur/cellule_y)
|
||||
max_objet=60
|
||||
|
||||
anchors=np.array([[3.0, 1.5], [2.0, 2.0], [1.5, 3.0]])
|
||||
nbr_boxes=len(anchors)
|
||||
|
||||
batch_size=16
|
||||
|
||||
lambda_coord=5
|
||||
lambda_noobj=0.5
|
||||
#lambda_coord=1
|
||||
#lambda_noobj=1
|
||||
|
||||
seuil_iou_loss=0.6
|
||||
16
Tensorflow/tutoriel27/images.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 10, 10)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
for i in range(len(images)):
|
||||
image=common.prepare_image(images[i], labels[i], False)
|
||||
cv2.imshow("image", cv2.resize(image, (2*config.largeur, 2*config.hauteur)))
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
63
Tensorflow/tutoriel27/inference.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels, labels2=common.read_json('test.json', 5)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
grid=np.meshgrid(np.arange(config.cellule_x, dtype=np.float32), np.arange(config.cellule_y, dtype=np.float32))
|
||||
grid=np.expand_dims(np.stack(grid, axis=-1), axis=2)
|
||||
grid=np.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
for i in range(len(images)):
|
||||
img=common.prepare_image(images[i], labels[i], False)
|
||||
predictions=model(np.array([images[i]]))
|
||||
|
||||
pred_boxes=predictions[0, :, :, :, 0:4]
|
||||
pred_conf=common.sigmoid(predictions[0, :, :, :, 4])
|
||||
pred_classes=common.softmax(predictions[0, :, :, :, 5:])
|
||||
ids=np.argmax(pred_classes, axis=-1)
|
||||
|
||||
x_center=((grid[:, :, :, 0]+common.sigmoid(pred_boxes[:, :, :, 0]))*config.r_x)
|
||||
y_center=((grid[:, :, :, 1]+common.sigmoid(pred_boxes[:, :, :, 1]))*config.r_y)
|
||||
w=(np.exp(pred_boxes[:, :, :, 2])*config.anchors[:, 0]*config.r_x)
|
||||
h=(np.exp(pred_boxes[:, :, :, 3])*config.anchors[:, 1]*config.r_y)
|
||||
|
||||
x_min=(x_center-w/2).astype(np.int32)
|
||||
y_min=(y_center-h/2).astype(np.int32)
|
||||
x_max=(x_center+w/2).astype(np.int32)
|
||||
y_max=(y_center+h/2).astype(np.int32)
|
||||
|
||||
tab_boxes=[]
|
||||
conf=[]
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if pred_conf[y, x, b]>0.10:
|
||||
color=list(config.dict.values())[ids[y, x, b]]
|
||||
cv2.circle(images[i], (x_center[y, x, b], y_center[y, x, b]), 1, color, 2)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_max[y, x, b]), color, 1)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_min[y, x, b]-15), color, cv2.FILLED)
|
||||
cv2.putText(images[i], "{:3.0%}".format(pred_conf[y, x, b]), (x_min[y, x, b], y_min[y, x, b]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (255, 255, 255), 1)
|
||||
|
||||
cv2.imshow("Inference", images[i])
|
||||
cv2.imshow("Bonne reponse", img)
|
||||
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
54
Tensorflow/tutoriel27/model.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import config
|
||||
|
||||
def block_resnet(input, filters, kernel_size, reduce=False):
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(input)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
|
||||
if reduce is True:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=2, padding='SAME')(result)
|
||||
else:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(result)
|
||||
|
||||
if input.shape[-1]==filters:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=input
|
||||
else:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=1, padding='SAME')(input)
|
||||
|
||||
result=layers.add([result, shortcut])
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
return result
|
||||
|
||||
def model(nbr_classes, nbr_boxes, cellule_y, cellule_x):
|
||||
entree=layers.Input(shape=(config.largeur, config.hauteur, 3), dtype='float32')
|
||||
|
||||
result=block_resnet(entree, 16, 3, False)
|
||||
result=block_resnet(result, 16, 3, True)
|
||||
|
||||
result=block_resnet(result, 32, 3, False)
|
||||
result=block_resnet(result, 32, 3, True)
|
||||
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, True)
|
||||
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, True)
|
||||
|
||||
result=layers.Conv2D(config.nbr_boxes*(5+config.nbr_classes), 1, padding='SAME')(result)
|
||||
sortie=layers.Reshape((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes))(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
|
||||
return model
|
||||
|
||||
117
Tensorflow/tutoriel27/train.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
batch_size=16
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 20)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
print("Nbr images:", len(images))
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((images, labels)).batch(batch_size)
|
||||
|
||||
def my_loss(labels, preds):
|
||||
grid=tf.meshgrid(tf.range(config.cellule_x, dtype=tf.float32), tf.range(config.cellule_y, dtype=tf.float32))
|
||||
grid=tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
|
||||
grid=tf.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
preds_xy =tf.math.sigmoid(preds[:, :, :, :, 0:2])+grid
|
||||
preds_wh =preds[:, :, :, :, 2:4]
|
||||
preds_conf =tf.math.sigmoid(preds[:, :, :, :, 4])
|
||||
preds_classe=tf.math.sigmoid(preds[:, :, :, :, 5:])
|
||||
|
||||
preds_wh_half=preds_wh/2
|
||||
preds_xymin=preds_xy-preds_wh_half
|
||||
preds_xymax=preds_xy+preds_wh_half
|
||||
preds_areas=preds_wh[:, :, :, :, 0]*preds_wh[:, :, :, :, 1]
|
||||
|
||||
l2_xy_min=labels2[:, :, 0:2]
|
||||
l2_xy_max=labels2[:, :, 2:4]
|
||||
l2_area =labels2[:, :, 4]
|
||||
|
||||
preds_xymin=tf.expand_dims(preds_xymin, 4)
|
||||
preds_xymax=tf.expand_dims(preds_xymax, 4)
|
||||
preds_areas=tf.expand_dims(preds_areas, 4)
|
||||
|
||||
labels_xy =labels[:, :, :, :, 0:2]
|
||||
labels_wh =tf.math.log(labels[:, :, :, :, 2:4]/config.anchors)
|
||||
labels_wh=tf.where(tf.math.is_inf(labels_wh), tf.zeros_like(labels_wh), labels_wh)
|
||||
|
||||
conf_mask_obj=labels[:, :, :, :, 4]
|
||||
labels_classe=labels[:, :, :, :, 5:]
|
||||
|
||||
conf_mask_noobj=[]
|
||||
for i in range(len(preds)):
|
||||
xy_min=tf.maximum(preds_xymin[i], l2_xy_min[i])
|
||||
xy_max=tf.minimum(preds_xymax[i], l2_xy_max[i])
|
||||
intersect_wh=tf.maximum(xy_max-xy_min, 0.)
|
||||
intersect_areas=intersect_wh[..., 0]*intersect_wh[..., 1]
|
||||
union_areas=preds_areas[i]+l2_area[i]-intersect_areas
|
||||
ious=tf.truediv(intersect_areas, union_areas)
|
||||
best_ious=tf.reduce_max(ious, axis=3)
|
||||
conf_mask_noobj.append(tf.cast(best_ious<config.seuil_iou_loss, tf.float32)*(1-conf_mask_obj[i]))
|
||||
conf_mask_noobj=tf.stack(conf_mask_noobj)
|
||||
|
||||
preds_x=preds_xy[..., 0]
|
||||
preds_y=preds_xy[..., 1]
|
||||
preds_w=preds_wh[..., 0]
|
||||
preds_h=preds_wh[..., 1]
|
||||
labels_x=labels_xy[..., 0]
|
||||
labels_y=labels_xy[..., 1]
|
||||
labels_w=labels_wh[..., 0]
|
||||
labels_h=labels_wh[..., 1]
|
||||
|
||||
loss_xy=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_x-labels_x)+tf.math.square(preds_y-labels_y)), axis=(1, 2, 3))
|
||||
loss_wh=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_w-labels_w)+tf.math.square(preds_h-labels_h)), axis=(1, 2, 3))
|
||||
|
||||
loss_conf_obj=tf.reduce_sum(conf_mask_obj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
loss_conf_noobj=tf.reduce_sum(conf_mask_noobj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
|
||||
loss_classe=tf.reduce_sum(tf.math.square(preds_classe-labels_classe), axis=4)
|
||||
loss_classe=tf.reduce_sum(conf_mask_obj*loss_classe, axis=(1, 2, 3))
|
||||
|
||||
loss=config.lambda_coord*loss_xy+config.lambda_coord*loss_wh+loss_conf_obj+config.lambda_noobj*loss_conf_noobj+loss_classe
|
||||
return loss
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
time.time()-start))
|
||||
if not entrainement%20:
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
train(train_ds, 400)
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
7
Tensorflow/tutoriel30/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Tutoriel Keras
|
||||
## Réseau binaire avec Keras
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=YLhDlHhthIM
|
||||
|
||||
|
||||
|
||||
5
Tensorflow/tutoriel30/config.py
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
size=100
|
||||
|
||||
dir_neg=".\\images_negatives\\"
|
||||
dir_pos=".\\images_positives\\"
|
||||
41
Tensorflow/tutoriel30/enregistrement.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
import config
|
||||
|
||||
saut=10
|
||||
|
||||
dir=config.dir_neg
|
||||
#dir=config.dir_pos
|
||||
os.makedirs(dir, exist_ok=True)
|
||||
|
||||
id=0
|
||||
while os.path.isfile(dir+"image-{:d}.png".format(id)):
|
||||
id+=1
|
||||
id*=saut
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
width=int(cap.get(3))
|
||||
enregistre=0
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
|
||||
if enregistre:
|
||||
if not id%saut:
|
||||
id_=int(id/saut)
|
||||
fichier=dir+"image-{:d}.png".format(id_)
|
||||
print("Création du fichier", fichier)
|
||||
cv2.imwrite(fichier, frame)
|
||||
id+=1
|
||||
|
||||
cv2.rectangle(frame, (0, 0), (width, 30), (100, 100, 100), cv2.FILLED)
|
||||
cv2.putText(frame, "[e] enregistrement repertoire: {} [q] quitter".format(dir), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 0)
|
||||
if enregistre:
|
||||
cv2.circle(frame, (width-20, 15), 5, (0, 0, 255), 8)
|
||||
|
||||
cv2.imshow('Camera', frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('e'):
|
||||
enregistre=not enregistre
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
24
Tensorflow/tutoriel30/inference.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
import numpy as np
|
||||
import config
|
||||
|
||||
my_model=tf.keras.models.load_model('saved_model\\my_model')
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
width=cap.get(3)
|
||||
height=cap.get(4)
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
img=cv2.resize(frame, (config.size, config.size))/255
|
||||
img=np.array([img], dtype=np.float32)
|
||||
prediction=my_model.predict(img)
|
||||
if prediction[0][0]>0.3:
|
||||
color=(0, 255, 0)
|
||||
else:
|
||||
color=(0, 0, 255)
|
||||
cv2.rectangle(frame, (0, int(height)-30), (int(width*prediction[0][0]), int(height)), color, cv2.FILLED)
|
||||
cv2.imshow('Camera', frame)
|
||||
if cv2.waitKey(1)&0xFF==ord('q'):
|
||||
quit()
|
||||
40
Tensorflow/tutoriel30/model.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
# Fonction d'activation à tester: sigmoid, tanh, relu,
|
||||
|
||||
def model(size, nbr_cc):
|
||||
entree=layers.Input(shape=(size, size, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr_cc, 3, activation='relu', padding='same')(entree)
|
||||
result=layers.Conv2D(nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
sortie=layers.Dense(1, activation='sigmoid')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
38
Tensorflow/tutoriel30/photo.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
import config
|
||||
|
||||
os.makedirs(config.dir_pos, exist_ok=True)
|
||||
id_pos=0
|
||||
while os.path.isfile(config.dir_pos+"image-{:d}.png".format(id_pos)):
|
||||
id_pos+=1
|
||||
|
||||
os.makedirs(config.dir_neg, exist_ok=True)
|
||||
id_neg=0
|
||||
while os.path.isfile(config.dir_neg+"image-{:d}.png".format(id_neg)):
|
||||
id_neg+=1
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
width=int(cap.get(3))
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
|
||||
cv2.rectangle(frame, (0, 0), (width, 30), (100, 100, 100), cv2.FILLED)
|
||||
cv2.putText(frame, "[p] photo positive [n] photo negative [q] quitter", (10, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
|
||||
|
||||
cv2.imshow('Camera', frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('p'):
|
||||
fichier=config.dir_pos+"image-{:d}.png".format(id_pos)
|
||||
print("Création du fichier", fichier)
|
||||
cv2.imwrite(fichier, frame)
|
||||
id_pos+=1
|
||||
if key==ord('n'):
|
||||
fichier=config.dir_neg+"image-{:d}.png".format(id_neg)
|
||||
print("Création du fichier", fichier)
|
||||
cv2.imwrite(fichier, frame)
|
||||
id_neg+=1
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
50
Tensorflow/tutoriel30/train.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import glob
|
||||
import cv2
|
||||
import model
|
||||
import config
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
def complete_dataset(files, value):
|
||||
for image in glob.glob(files):
|
||||
img=cv2.imread(image)
|
||||
img=cv2.resize(img, (config.size, config.size))
|
||||
tab_images.append(img)
|
||||
tab_labels.append([value])
|
||||
img=cv2.flip(img, 1)
|
||||
tab_images.append(img)
|
||||
tab_labels.append([value])
|
||||
img=cv2.flip(img, 0)
|
||||
tab_images.append(img)
|
||||
tab_labels.append([value])
|
||||
|
||||
complete_dataset(config.dir_pos+'\\*.png', 1.)
|
||||
complete_dataset(config.dir_neg+'\\*.png', 0.)
|
||||
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32)
|
||||
|
||||
index=np.random.permutation(len(tab_images))
|
||||
tab_images=tab_images[index]
|
||||
tab_labels=tab_labels[index]
|
||||
|
||||
#for i in range(len(tab_images)):
|
||||
# cv2.imshow('Camera', tab_images[i])
|
||||
# print("Label", tab_labels[i])
|
||||
# if cv2.waitKey()&0xFF==ord('q'):
|
||||
# quit()
|
||||
|
||||
model=model.model(config.size, 8)
|
||||
|
||||
model.compile(optimizer='adam',
|
||||
loss='binary_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
model.fit(tab_images,
|
||||
tab_labels,
|
||||
validation_split=0.05,
|
||||
batch_size=64,
|
||||
epochs=30)
|
||||
model.save('saved_model\\my_model')
|
||||
5
Tensorflow/tutoriel32/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel Keras
|
||||
## Classer de l'information avec keras (exemple avec le cancer de la peau)
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=_X_yOC7zh4c
|
||||
|
||||
43
Tensorflow/tutoriel32/model.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
# Fonction d'activation à tester: sigmoid, tanh, relu,
|
||||
|
||||
def model(nbr_sortie, nbr_cc):
|
||||
entree=layers.Input(shape=(75, 100, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr_cc, 5, activation='relu', padding='same')(entree)
|
||||
result=layers.Conv2D(nbr_cc, 5, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
sortie=layers.Dense(nbr_sortie, activation='softmax')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
95
Tensorflow/tutoriel32/predict.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import random
|
||||
import tensorflow as tf
|
||||
import csv
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model
|
||||
import os
|
||||
os.environ['CUDA_VISIBLE_DEVICES']='-1'
|
||||
|
||||
fichier='ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv'
|
||||
dir_images='ISIC2018_Task3_Training_Input/'
|
||||
|
||||
labels=['Melanoma',
|
||||
'Melanocytic nevus',
|
||||
'Basal cell carcinoma',
|
||||
'Actinic keratosis',
|
||||
'Benign keratosis',
|
||||
'Dermatofibroma',
|
||||
'Vascular lesion']
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
next(lignes, None)
|
||||
for ligne in lignes:
|
||||
label=np.array(ligne[1:], dtype=np.float32)
|
||||
img=cv2.imread(dir_images+ligne[0]+'.jpg')
|
||||
img=cv2.resize(img, (100, 75))
|
||||
if img is None:
|
||||
print("XXX")
|
||||
quit()
|
||||
tab_labels.append(label)
|
||||
tab_images.append(img)
|
||||
|
||||
if label[1]:
|
||||
continue
|
||||
|
||||
flag=0
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=rotateImage(img, angle)
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
|
||||
if not flag%3 and (label[0] or label[4]):
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
flag+=1
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 1)
|
||||
tab_images.append(i)
|
||||
|
||||
if label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, -1)
|
||||
tab_images.append(i)
|
||||
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
|
||||
indices=np.random.permutation(len(tab_labels))
|
||||
tab_labels=tab_labels[indices]
|
||||
tab_images=tab_images[indices]
|
||||
|
||||
print("SOMME", np.sum(tab_labels, axis=0))
|
||||
|
||||
model=tf.keras.models.load_model('my_model/')
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("image", tab_images[i])
|
||||
prediction=model.predict(np.array([tab_images[i]], dtype=np.float32))
|
||||
print("Bonne reponse:{}, Reponse du réseau:{}".format(labels[np.argmax(tab_labels[i])], labels[np.argmax(prediction[0])]))
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
82
Tensorflow/tutoriel32/train.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import random
|
||||
import tensorflow as tf
|
||||
import csv
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model
|
||||
|
||||
fichier='ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv'
|
||||
dir_images='ISIC2018_Task3_Training_Input/'
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
next(lignes, None)
|
||||
for ligne in lignes:
|
||||
label=np.array(ligne[1:], dtype=np.float32)
|
||||
img=cv2.imread(dir_images+ligne[0]+'.jpg')
|
||||
if img is None:
|
||||
print("Image absente", dir_images+ligne[0]+'.jpg')
|
||||
quit()
|
||||
img=cv2.resize(img, (100, 75))
|
||||
tab_labels.append(label)
|
||||
tab_images.append(img)
|
||||
|
||||
if label[1]:
|
||||
continue
|
||||
|
||||
flag=0
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=rotateImage(img, angle)
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
|
||||
if not flag%3 and (label[0] or label[4]):
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
flag+=1
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 1)
|
||||
tab_images.append(i)
|
||||
|
||||
if label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, -1)
|
||||
tab_images.append(i)
|
||||
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
|
||||
indices=np.random.permutation(len(tab_labels))
|
||||
tab_labels=tab_labels[indices]
|
||||
tab_images=tab_images[indices]
|
||||
|
||||
print("SOMME", np.sum(tab_labels, axis=0))
|
||||
|
||||
model=model.model(7, 8)
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
|
||||
model.compile(optimizer=optimizer,
|
||||
loss='categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
model.fit(tab_images,
|
||||
tab_labels,
|
||||
validation_split=0.05,
|
||||
batch_size=16,
|
||||
epochs=30)
|
||||
model.save('my_model/')
|
||||
5
Tensorflow/tutoriel33/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel Keras
|
||||
## Utilisation des Callbacks
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=_cBmcp5zi2w
|
||||
|
||||
68
Tensorflow/tutoriel33/graph.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.ticker as mtick
|
||||
import csv
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
if len(sys.argv)!=2:
|
||||
print("Usage:", sys.argv[0], "<fichier csv>")
|
||||
quit()
|
||||
fichier=sys.argv[1]
|
||||
|
||||
def calc(tab_data, fenetre):
|
||||
tab_m=[]
|
||||
for i in range(len(tab_data)-fenetre):
|
||||
m=np.mean(tab_data[i:i+fenetre])
|
||||
tab_m.append(m)
|
||||
return tab_m
|
||||
|
||||
x=[]
|
||||
accuracy=[]
|
||||
loss=[]
|
||||
val_accuracy=[]
|
||||
val_loss=[]
|
||||
fenetre=50
|
||||
|
||||
val=0
|
||||
with open(fichier,'r') as csvfile:
|
||||
plots=csv.reader(csvfile, delimiter=',')
|
||||
next(plots)
|
||||
for row in plots:
|
||||
x.append(float(row[0]))
|
||||
accuracy.append(float(row[1]))
|
||||
loss.append(float(row[2]))
|
||||
if len(row)==5:
|
||||
val_accuracy.append(float(row[3]))
|
||||
val_loss.append(float(row[4]))
|
||||
val=1
|
||||
|
||||
|
||||
fig, (ax1, ax2)=plt.subplots(2)
|
||||
fig.set_size_inches(9, 7, forward=True)
|
||||
|
||||
ax1.set_ylim([0, 1.0])
|
||||
ax1.grid(which='both')
|
||||
ax1.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
|
||||
ln=ax1.plot(x, accuracy, label='Accuracy')
|
||||
|
||||
ax1_=ax1.twinx()
|
||||
ax1_.set_ylim([0.0, 2.0])
|
||||
ln_=ax1_.plot(x, loss, label='Loss', color='red')
|
||||
|
||||
lns=ln+ln_
|
||||
labs=[l.get_label() for l in lns]
|
||||
|
||||
ax2.set_ylim([0, 1.0])
|
||||
ax2.grid(which='both')
|
||||
ax2.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
|
||||
ln=ax2.plot(x, val_accuracy, label='Val accuracy')
|
||||
|
||||
ax2_=ax2.twinx()
|
||||
ax2_.set_ylim([0.0, 2.0])
|
||||
ln_=ax2_.plot(x, val_loss, label='Val loss', color='red')
|
||||
|
||||
lns=ln+ln_
|
||||
labs=[l.get_label() for l in lns]
|
||||
ax2.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, -0.1), fancybox=True, shadow=True, ncol=5)
|
||||
|
||||
plt.show()
|
||||
78
Tensorflow/tutoriel33/model.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
# Fonction d'activation à tester: sigmoid, tanh, relu,
|
||||
|
||||
def model(nbr_sortie, nbr_cc):
|
||||
entree=layers.Input(shape=(75, 100, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr_cc, 5, activation='relu', padding='same')(entree)
|
||||
result=layers.Conv2D(nbr_cc, 5, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.MaxPool2D()(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
sortie=layers.Dense(nbr_sortie, activation='softmax')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def model2(nbr_sortie, nbr_cc):
|
||||
entree=layers.Input(shape=(75, 100, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr_cc, 5, activation='relu', padding='same')(entree)
|
||||
result=layers.Conv2D(nbr_cc, 5, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same')(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
result=layers.Dense(1024, activation='relu')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
sortie=layers.Dense(nbr_sortie, activation='softmax')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
93
Tensorflow/tutoriel33/predict.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import random
|
||||
import tensorflow as tf
|
||||
import csv
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model
|
||||
|
||||
fichier='ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv'
|
||||
dir_images='ISIC2018_Task3_Training_Input/'
|
||||
|
||||
labels=['Melanoma',
|
||||
'Melanocytic nevus',
|
||||
'Basal cell carcinoma',
|
||||
'Actinic keratosis',
|
||||
'Benign keratosis',
|
||||
'Dermatofibroma',
|
||||
'Vascular lesion']
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
next(lignes, None)
|
||||
for ligne in lignes:
|
||||
label=np.array(ligne[1:], dtype=np.float32)
|
||||
img=cv2.imread(dir_images+ligne[0]+'.jpg')
|
||||
img=cv2.resize(img, (100, 75))
|
||||
if img is None:
|
||||
print("XXX")
|
||||
quit()
|
||||
tab_labels.append(label)
|
||||
tab_images.append(img)
|
||||
|
||||
if label[1]:
|
||||
continue
|
||||
|
||||
flag=0
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=rotateImage(img, angle)
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
|
||||
if not flag%3 and (label[0] or label[4]):
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
flag+=1
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 1)
|
||||
tab_images.append(i)
|
||||
|
||||
if label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, -1)
|
||||
tab_images.append(i)
|
||||
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
|
||||
indices=np.random.permutation(len(tab_labels))
|
||||
tab_labels=tab_labels[indices]
|
||||
tab_images=tab_images[indices]
|
||||
|
||||
print("SOMME", np.sum(tab_labels, axis=0))
|
||||
|
||||
model=tf.keras.models.load_model('my_model/')
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("image", tab_images[i])
|
||||
prediction=model.predict(np.array([tab_images[i]], dtype=np.float32))
|
||||
print("Bonne reponse:{}, Reponse du réseau:{}".format(labels[np.argmax(tab_labels[i])], labels[np.argmax(prediction[0])]))
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
87
Tensorflow/tutoriel33/train.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import random
|
||||
import tensorflow as tf
|
||||
import csv
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model
|
||||
|
||||
fichier='ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv'
|
||||
dir_images='ISIC2018_Task3_Training_Input/'
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
next(lignes, None)
|
||||
for ligne in lignes:
|
||||
label=np.array(ligne[1:], dtype=np.float32)
|
||||
img=cv2.imread(dir_images+ligne[0]+'.jpg')
|
||||
if img is None:
|
||||
print("Image absente", dir_images+ligne[0]+'.jpg')
|
||||
quit()
|
||||
img=cv2.resize(img, (100, 75))
|
||||
tab_labels.append(label)
|
||||
tab_images.append(img)
|
||||
|
||||
if label[1]:
|
||||
continue
|
||||
|
||||
flag=0
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=rotateImage(img, angle)
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
|
||||
if not flag%3 and (label[0] or label[4]):
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
flag+=1
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 1)
|
||||
tab_images.append(i)
|
||||
|
||||
if label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, -1)
|
||||
tab_images.append(i)
|
||||
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
|
||||
indices=np.random.permutation(len(tab_labels))
|
||||
tab_labels=tab_labels[indices]
|
||||
tab_images=tab_images[indices]
|
||||
|
||||
print("SOMME", np.sum(tab_labels, axis=0))
|
||||
|
||||
model=model.model(7, 8)
|
||||
#optimizer=tf.keras.optimizers.RMSprop(learning_rate=1E-4)
|
||||
#optimizer=tf.keras.optimizers.SGD(learning_rate=1E-4)
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
csv_logger=tf.keras.callbacks.CSVLogger('training.log')
|
||||
|
||||
model.compile(optimizer=optimizer,
|
||||
loss='categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(tab_images,
|
||||
tab_labels,
|
||||
validation_split=0.05,
|
||||
batch_size=64,
|
||||
epochs=300,
|
||||
callbacks=[csv_logger])
|
||||
|
||||
|
||||
90
Tensorflow/tutoriel33/train2.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import random
|
||||
import tensorflow as tf
|
||||
import csv
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model
|
||||
|
||||
fichier='ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv'
|
||||
dir_images='ISIC2018_Task3_Training_Input/'
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
with open(fichier, newline='') as csvfile:
|
||||
lignes=csv.reader(csvfile, delimiter=',')
|
||||
next(lignes, None)
|
||||
for ligne in lignes:
|
||||
label=np.array(ligne[1:], dtype=np.float32)
|
||||
img=cv2.imread(dir_images+ligne[0]+'.jpg')
|
||||
if img is None:
|
||||
print("Image absente", dir_images+ligne[0]+'.jpg')
|
||||
quit()
|
||||
img=cv2.resize(img, (100, 75))
|
||||
tab_labels.append(label)
|
||||
tab_images.append(img)
|
||||
|
||||
if label[1]:
|
||||
continue
|
||||
|
||||
flag=0
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=rotateImage(img, angle)
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
|
||||
if not flag%3 and (label[0] or label[4]):
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 0)
|
||||
tab_images.append(i)
|
||||
flag+=1
|
||||
|
||||
if label[2] or label[3] or label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, 1)
|
||||
tab_images.append(i)
|
||||
|
||||
if label[5] or label[6]:
|
||||
tab_labels.append(label)
|
||||
i=cv2.flip(img_r, -1)
|
||||
tab_images.append(i)
|
||||
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
|
||||
indices=np.random.permutation(len(tab_labels))
|
||||
tab_labels=tab_labels[indices]
|
||||
tab_images=tab_images[indices]
|
||||
|
||||
print("SOMME", np.sum(tab_labels, axis=0))
|
||||
|
||||
model=model.model(7, 8)
|
||||
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1E-4)
|
||||
csv_logger=tf.keras.callbacks.CSVLogger('training.log')
|
||||
|
||||
class my_callback(tf.keras.callbacks.Callback):
|
||||
def on_epoch_end(self, epoch, logs=None):
|
||||
if epoch>=30 and not epoch%10:
|
||||
model.save('my_model/{:d}'.format(epoch))
|
||||
|
||||
model.compile(optimizer=optimizer,
|
||||
loss='categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(tab_images,
|
||||
tab_labels,
|
||||
validation_split=0.05,
|
||||
batch_size=64,
|
||||
epochs=300,
|
||||
callbacks=[csv_logger, my_callback()])
|
||||
|
||||
|
||||
5
Tensorflow/tutoriel35/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel Tensorflow
|
||||
## GAN conditionnel
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=9Je5f8UwQ98
|
||||
|
||||
102
Tensorflow/tutoriel35/gan.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import tensorflow as tf
|
||||
import glob
|
||||
import numpy as np
|
||||
import os
|
||||
from tensorflow.keras import layers, models
|
||||
import time
|
||||
import cv2
|
||||
import model
|
||||
|
||||
batch_size=256
|
||||
epochs=500
|
||||
noise_dim=100
|
||||
tab_size=5
|
||||
num_examples_to_generate=tab_size*tab_size
|
||||
dir_images='images_gan'
|
||||
checkpoint_dir='./training_checkpoints_gan'
|
||||
checkpoint_prefix=os.path.join(checkpoint_dir, "ckpt")
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
os.mkdir(dir_images)
|
||||
|
||||
(train_images, train_labels), (test_images, test_labels)=tf.keras.datasets.mnist.load_data()
|
||||
|
||||
train_images=train_images.reshape(-1, 28, 28, 1).astype('float32')
|
||||
train_images=(train_images-127.5)/127.5
|
||||
|
||||
train_dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(len(train_images)).batch(batch_size)
|
||||
|
||||
def discriminator_loss(real_output, fake_output):
|
||||
real_loss=cross_entropy(tf.ones_like(real_output), real_output)
|
||||
fake_loss=cross_entropy(tf.zeros_like(fake_output), fake_output)
|
||||
total_loss=real_loss+fake_loss
|
||||
return total_loss
|
||||
|
||||
def generator_loss(fake_output):
|
||||
return cross_entropy(tf.ones_like(fake_output), fake_output)
|
||||
|
||||
generator=model.generator_model()
|
||||
discriminator=model.discriminator_model()
|
||||
|
||||
cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
||||
|
||||
train_generator_loss=tf.keras.metrics.Mean()
|
||||
train_discriminator_loss=tf.keras.metrics.Mean()
|
||||
|
||||
generator_optimizer=tf.keras.optimizers.Adam(1e-4)
|
||||
discriminator_optimizer=tf.keras.optimizers.Adam(1e-4)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,
|
||||
discriminator_optimizer=discriminator_optimizer,
|
||||
generator=generator,
|
||||
discriminator=discriminator)
|
||||
|
||||
seed=tf.random.normal([num_examples_to_generate, noise_dim])
|
||||
|
||||
@tf.function
|
||||
def train_step(images):
|
||||
noise=tf.random.normal([batch_size, noise_dim])
|
||||
|
||||
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
||||
generated_images=generator(noise, training=True)
|
||||
|
||||
real_output=discriminator(images, training=True)
|
||||
fake_output=discriminator(generated_images, training=True)
|
||||
|
||||
gen_loss=generator_loss(fake_output)
|
||||
disc_loss=discriminator_loss(real_output, fake_output)
|
||||
|
||||
train_generator_loss(gen_loss)
|
||||
train_discriminator_loss(disc_loss)
|
||||
|
||||
gradients_of_generator=gen_tape.gradient(gen_loss, generator.trainable_variables)
|
||||
gradients_of_discriminator=disc_tape.gradient(disc_loss, discriminator.trainable_variables)
|
||||
|
||||
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
|
||||
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
|
||||
|
||||
def train(dataset, epochs):
|
||||
for epoch in range(epochs):
|
||||
start=time.time()
|
||||
for image_batch in dataset:
|
||||
train_step(image_batch)
|
||||
generate_and_save_images(generator, epoch+1, seed)
|
||||
if (epoch+1)%15==0:
|
||||
checkpoint.save(file_prefix=checkpoint_prefix)
|
||||
print ('Epoch {}: loss generator: {:.4f} loss discriminator: {:.4f} {:.4f} sec'.format(epoch+1,
|
||||
train_generator_loss.result(),
|
||||
train_discriminator_loss.result(),
|
||||
time.time()-start))
|
||||
train_generator_loss.reset_states()
|
||||
train_discriminator_loss.reset_states()
|
||||
|
||||
def generate_and_save_images(model, epoch, test_input):
|
||||
labels=tf.one_hot(tf.range(0, num_examples_to_generate, 1)%10, 10)
|
||||
predictions=model([test_input, labels], training=False)
|
||||
img=np.empty(shape=(tab_size*28, tab_size*28), dtype=np.float32)
|
||||
for i in range(tab_size):
|
||||
for j in range(tab_size):
|
||||
img[j*28:(j+1)*28, i*28:(i+1)*28]=predictions[j*tab_size+i, :, :, 0]*127.5+127.5
|
||||
cv2.imwrite('{}/image_{:04d}.png'.format(dir_images, epoch), img)
|
||||
|
||||
train(train_dataset, epochs)
|
||||
95
Tensorflow/tutoriel35/gan_cond.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import os
|
||||
from tensorflow.keras import layers, models
|
||||
import time
|
||||
import cv2
|
||||
import model_cond
|
||||
|
||||
batch_size=256
|
||||
epochs=500
|
||||
noise_dim=100
|
||||
tab_size=6
|
||||
num_examples_to_generate=tab_size*tab_size
|
||||
dir_images='images_gan_cond'
|
||||
checkpoint_dir='./training_checkpoints_gan_cond'
|
||||
checkpoint_prefix=os.path.join(checkpoint_dir, "ckpt")
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
os.mkdir(dir_images)
|
||||
|
||||
(train_images, train_labels), (test_images, test_labels)=tf.keras.datasets.mnist.load_data()
|
||||
|
||||
train_labels=tf.one_hot(train_labels, 10)
|
||||
|
||||
train_images=train_images.reshape(-1, 28, 28, 1).astype('float32')
|
||||
train_images=(train_images-127.5)/127.5
|
||||
|
||||
train_dataset=tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(len(train_images)).batch(batch_size)
|
||||
|
||||
def discriminator_loss(real_output, fake_output):
|
||||
real_loss=cross_entropy(tf.ones_like(real_output), real_output)
|
||||
fake_loss=cross_entropy(tf.zeros_like(fake_output), fake_output)
|
||||
total_loss=real_loss+fake_loss
|
||||
return total_loss
|
||||
|
||||
def generator_loss(fake_output):
|
||||
return cross_entropy(tf.ones_like(fake_output), fake_output)
|
||||
|
||||
generator=model_cond.generator_model()
|
||||
discriminator=model_cond.discriminator_model()
|
||||
|
||||
cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
||||
|
||||
generator_optimizer=tf.keras.optimizers.Adam(1E-4)
|
||||
discriminator_optimizer=tf.keras.optimizers.Adam(1E-4)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,
|
||||
discriminator_optimizer=discriminator_optimizer,
|
||||
generator=generator,
|
||||
discriminator=discriminator)
|
||||
|
||||
|
||||
seed=tf.random.normal([num_examples_to_generate, noise_dim])
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
noise=tf.random.normal([len(labels), noise_dim])
|
||||
generated_labels=tf.random.uniform(shape=[len(labels)], minval=0, maxval=10, dtype=tf.dtypes.int32)
|
||||
generated_labels=tf.one_hot(generated_labels, 10)
|
||||
|
||||
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
||||
generated_images=generator([noise, generated_labels], training=True)
|
||||
|
||||
real_output=discriminator([images, labels], training=True)
|
||||
fake_output=discriminator([generated_images, generated_labels], training=True)
|
||||
|
||||
gen_loss=generator_loss(fake_output)
|
||||
disc_loss=discriminator_loss(real_output, fake_output)
|
||||
|
||||
gradients_of_generator=gen_tape.gradient(gen_loss, generator.trainable_variables)
|
||||
gradients_of_discriminator=disc_tape.gradient(disc_loss, discriminator.trainable_variables)
|
||||
|
||||
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
|
||||
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
|
||||
|
||||
def train(dataset, epochs):
|
||||
for epoch in range(epochs):
|
||||
start=time.time()
|
||||
for image_batch, label_batch in dataset:
|
||||
train_step(image_batch, label_batch)
|
||||
generate_and_save_images(generator, epoch+1, seed)
|
||||
if (epoch+1)%15==0:
|
||||
checkpoint.save(file_prefix=checkpoint_prefix)
|
||||
print ('Time for epoch {} is {} sec'.format(epoch+1, time.time()-start))
|
||||
|
||||
def generate_and_save_images(model, epoch, test_input):
|
||||
labels=tf.one_hot(tf.range(0, num_examples_to_generate, 1)%10, 10)
|
||||
predictions=model([test_input, labels], training=False)
|
||||
img=np.empty(shape=(tab_size*28, tab_size*28), dtype=np.float32)
|
||||
for i in range(tab_size):
|
||||
for j in range(tab_size):
|
||||
img[j*28:(j+1)*28, i*28:(i+1)*28]=predictions[j*tab_size+i, :, :, 0]*127.5+127.5
|
||||
cv2.imwrite('{}/image_{:04d}.png'.format(dir_images, epoch), img)
|
||||
|
||||
train(train_dataset, epochs)
|
||||
34
Tensorflow/tutoriel35/genere.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import time, threading
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model_cond
|
||||
|
||||
noise_dim=100
|
||||
|
||||
generator=model_cond.generator_model()
|
||||
checkpoint=tf.train.Checkpoint(generator=generator)
|
||||
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir='./training_checkpoints_gan_cond/'))
|
||||
|
||||
marge=20
|
||||
|
||||
while True:
|
||||
chiffres=input("Entrez une serie de chiffre:")
|
||||
try:
|
||||
chiffres_int=int(chiffres)
|
||||
except:
|
||||
continue
|
||||
|
||||
liste_chiffres=[]
|
||||
while (chiffres_int):
|
||||
liste_chiffres.append(chiffres_int%10)
|
||||
chiffres_int=int(chiffres_int/10)
|
||||
seed=tf.random.normal([len(liste_chiffres), noise_dim])
|
||||
labels=tf.one_hot(liste_chiffres, 10)
|
||||
image=np.zeros(shape=(28+2*marge, len(liste_chiffres)*28+2*marge), dtype=np.float32)
|
||||
prediction=generator([seed, labels], training=False)
|
||||
for i in range(len(prediction)):
|
||||
image[marge:marge+28, marge+i*28:marge+(i+1)*28]=prediction[len(liste_chiffres)-i-1, :, :, 0]*127.5+127.5
|
||||
cv2.imshow("Image", image.astype(np.uint8))
|
||||
key=cv2.waitKey(10)
|
||||
68
Tensorflow/tutoriel35/horloge.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import tensorflow as tf
|
||||
import time, threading
|
||||
import numpy as np
|
||||
import cv2
|
||||
import model_cond
|
||||
|
||||
noise_dim=100
|
||||
|
||||
generator=model_cond.generator_model()
|
||||
checkpoint=tf.train.Checkpoint(generator=generator)
|
||||
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir='./training_checkpoints_gan_cond/'))
|
||||
|
||||
marge=20
|
||||
marge2=5
|
||||
image=np.zeros(shape=(28+2*marge, 6*28+2*marge+4*marge2), dtype=np.float32)
|
||||
old_h1=old_h2=old_m1=old_m2=old_s1=old_s2=-1
|
||||
cont=1
|
||||
|
||||
def foo():
|
||||
global old_h1, old_h2, old_m1, old_m2, old_s1, old_s2
|
||||
|
||||
if cont:
|
||||
threading.Timer(1, foo).start()
|
||||
seed=tf.random.normal([6, noise_dim])
|
||||
heure=time.strftime('%H:%M:%S')
|
||||
print(heure)
|
||||
h1=int(int(heure.split(':')[0])/10)
|
||||
h2=int(heure.split(':')[0])%10
|
||||
m1=int(int(heure.split(':')[1])/10)
|
||||
m2=int(heure.split(':')[1])%10
|
||||
s1=int(int(heure.split(':')[2])/10)
|
||||
s2=int(heure.split(':')[2])%10
|
||||
labels=tf.one_hot([h1, h2, m1, m2, s1, s2], 10)
|
||||
|
||||
prediction=generator([seed, labels], training=False)
|
||||
if h1!=old_h1:
|
||||
image[0+marge:28+marge, 0*28+marge:1*28+marge]=prediction[0, :, :, 0]*127.5+127.5
|
||||
if h2!=old_h2:
|
||||
image[0+marge:28+marge, 1*28+marge:2*28+marge]=prediction[1, :, :, 0]*127.5+127.5
|
||||
if m1!=old_m1:
|
||||
image[0+marge:28+marge, 2*28+marge+2*marge2:3*28+marge+2*marge2]=prediction[2, :, :, 0]*127.5+127.5
|
||||
if m2!=old_m2:
|
||||
image[0+marge:28+marge, 3*28+marge+2*marge2:4*28+marge+2*marge2]=prediction[3, :, :, 0]*127.5+127.5
|
||||
if s1!=old_s1:
|
||||
image[0+marge:28+marge, 4*28+marge+4*marge2:5*28+marge+4*marge2]=prediction[4, :, :, 0]*127.5+127.5
|
||||
if s2!=old_s2:
|
||||
image[0+marge:28+marge, 5*28+marge+4*marge2:6*28+marge+4*marge2]=prediction[5, :, :, 0]*127.5+127.5
|
||||
|
||||
cv2.circle(image, (marge+2*28+marge2, marge+8), 1, (255, 255, 255), 2)
|
||||
cv2.circle(image, (marge+2*28+marge2, marge+20), 1, (255, 255, 255), 2)
|
||||
cv2.circle(image, (marge+4*28+3*marge2, marge+8), 1, (255, 255, 255), 2)
|
||||
cv2.circle(image, (marge+4*28+3*marge2, marge+20), 1, (255, 255, 255), 2)
|
||||
|
||||
old_h1=h1
|
||||
old_h2=h2
|
||||
old_m1=m1
|
||||
old_m2=m2
|
||||
old_s1=s1
|
||||
old_s2=s2
|
||||
|
||||
foo()
|
||||
while True:
|
||||
cv2.imshow("Horloge", image.astype(np.uint8))
|
||||
key=cv2.waitKey(10)
|
||||
if key==ord('q')&0xFF:
|
||||
cv2.destroyAllWindows()
|
||||
cont=0
|
||||
quit()
|
||||
40
Tensorflow/tutoriel35/model.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
def generator_model():
|
||||
entree=layers.Input(shape=(100), dtype='float32')
|
||||
|
||||
result=layers.Dense(7*7*256, use_bias=False)(entree)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
result=layers.Reshape((7, 7, 256))(result)
|
||||
|
||||
result=layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
result=layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
|
||||
sortie=layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def discriminator_model():
|
||||
entree=layers.Input(shape=(28, 28, 1), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(entree)
|
||||
result=layers.LeakyReLU()(result)
|
||||
result=layers.Dropout(0.3)(result)
|
||||
|
||||
result=layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')(result)
|
||||
result=layers.LeakyReLU()(result)
|
||||
result=layers.Dropout(0.3)(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
sortie=layers.Dense(1)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||