Initial commit
This commit is contained in:
5
Tensorflow/concours_drive_2/README.md
Normal file
5
Tensorflow/concours_drive_2/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Concours DRIVE: Digital Retinal Images for Vessel Extraction
|
||||
Mesure de similarité: Dice et Jaccard à la rescousse !
|
||||
|
||||
La vidéo du tutoriel se trouve à l'adresse suivante:
|
||||
https://www.youtube.com/watch?v=26mpUDOS_IE
|
||||
87
Tensorflow/concours_drive_2/model.py
Normal file
87
Tensorflow/concours_drive_2/model.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
|
||||
def LossDice(y_true, y_pred):
|
||||
numerateur =tf.reduce_sum(y_true*y_pred, axis=(1, 2))
|
||||
denominateur=tf.reduce_sum(y_true+y_pred, axis=(1, 2))
|
||||
dice=2*numerateur/(denominateur+1E-4)
|
||||
return 1-dice
|
||||
|
||||
def LossJaccard(y_true, y_pred):
|
||||
intersection=tf.reduce_sum(y_true*y_pred, axis=(1, 2))
|
||||
union =tf.reduce_sum(y_true+y_pred, axis=(1, 2))
|
||||
jaccard=intersection/(union-intersection+1E-4)
|
||||
return 1-jaccard
|
||||
|
||||
def model(nbr):
|
||||
entree=layers.Input(shape=(576, 560, 3), dtype='float32')
|
||||
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(entree)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result1=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result1)
|
||||
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result2=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result2)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result3=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result3)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result4=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.MaxPool2D()(result4)
|
||||
|
||||
result=layers.Conv2D(8*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result4], axis=3)
|
||||
|
||||
result=layers.Conv2D(8*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result3], axis=3)
|
||||
|
||||
result=layers.Conv2D(4*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result2], axis=3)
|
||||
|
||||
result=layers.Conv2D(2*nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.UpSampling2D()(result)
|
||||
result=tf.concat([result, result1], axis=3)
|
||||
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(nbr, 3, activation='relu', padding='same')(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
sortie=layers.Conv2D(1, 1, activation='sigmoid', padding='same')(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
98
Tensorflow/concours_drive_2/train.py
Normal file
98
Tensorflow/concours_drive_2/train.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import tensorflow as tf
|
||||
from sklearn.model_selection import train_test_split
|
||||
from PIL import Image
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
import cv2
|
||||
import model
|
||||
import traitement_images as ti
|
||||
|
||||
dir_images='./training/images/'
|
||||
dir_mask ='./training/1st_manual/'
|
||||
|
||||
if not os.path.isdir(dir_images):
|
||||
quit("The directory {} doesn't exist !".format(dir_images))
|
||||
if not os.path.isdir(dir_mask):
|
||||
quit("The directory {} doesn't exist !".format(dir_mask))
|
||||
|
||||
tab_images=[]
|
||||
tab_masks=[]
|
||||
|
||||
list_file=os.listdir(dir_images)
|
||||
if list_file is None:
|
||||
quit("No file in {} !".format(dir_images))
|
||||
|
||||
for fichier in list_file:
|
||||
img_orig=cv2.imread(dir_images+fichier)
|
||||
tab_images.append(img_orig[:576, :560])
|
||||
num=fichier.split('_')[0]
|
||||
file_mask=dir_mask+num+'_manual1.gif'
|
||||
if not os.path.isfile(file_mask):
|
||||
quit("Mask of {} doesn't exist in {}".format(file_mask, dir_mask))
|
||||
img_mask_orig=np.array(Image.open(file_mask))
|
||||
tab_masks.append(img_mask_orig[:576, :560])
|
||||
|
||||
for angle in range(0, 360, 30):
|
||||
img_r=ti.rotateImage(img_orig, angle)
|
||||
img=img_r.copy()
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_mask=ti.rotateImage(img_mask_orig, angle)
|
||||
tab_masks.append(img_mask[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, 0)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, 0)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, 1)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, 1)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
img=cv2.flip(img_r, -1)
|
||||
img=ti.random_change(img)
|
||||
tab_images.append(img[:576, :560])
|
||||
img_m=cv2.flip(img_mask, -1)
|
||||
tab_masks.append(img_m[:576, :560])
|
||||
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_masks =np.array(tab_masks, dtype=np.float32)[:, :, :]/255
|
||||
|
||||
train_images, test_images, train_masks, test_masks=train_test_split(tab_images, tab_masks, test_size=0.05)
|
||||
|
||||
del tab_images
|
||||
del tab_masks
|
||||
|
||||
my_model=model.model(64)
|
||||
|
||||
my_model.compile(optimizer='adam',
|
||||
loss=model.LossDice,
|
||||
metrics=['accuracy'])
|
||||
my_model.fit(train_images,
|
||||
train_masks,
|
||||
epochs=20,
|
||||
batch_size=4,
|
||||
validation_data=(test_images, test_masks))
|
||||
|
||||
dir_test_images='./test/images/'
|
||||
|
||||
tab_test_images=[]
|
||||
tab_files=[]
|
||||
for fichier in os.listdir(dir_test_images):
|
||||
img=cv2.imread(dir_test_images+fichier)
|
||||
tab_test_images.append(img[:576, :560])
|
||||
tab_files.append(fichier.split('_')[0])
|
||||
|
||||
tab_test_images=np.array(tab_test_images, dtype=np.float32)/255
|
||||
tab_files=np.array(tab_files)
|
||||
|
||||
for id in range(len(tab_test_images)):
|
||||
mask=np.zeros((584, 565, 1), dtype=np.float32)
|
||||
prediction=my_model.predict(np.array([tab_test_images[id]]))
|
||||
mask[:576, :560]=prediction[0]*255
|
||||
cv2.imwrite("./predictions/"+str(tab_files[id])+".png", mask)
|
||||
|
||||
30
Tensorflow/concours_drive_2/traitement_images.py
Normal file
30
Tensorflow/concours_drive_2/traitement_images.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
def rotateImage(image, angle):
|
||||
image_center=tuple(np.array(image.shape[1::-1])/2)
|
||||
rot_mat=cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||||
result=cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
||||
return result
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def change_gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def color(image, alpha=20):
|
||||
n=[random.randint(-alpha, alpha), random.randint(-alpha, alpha),random.randint(-alpha, alpha)]
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def random_change(image):
|
||||
if np.random.randint(2):
|
||||
img=change_gamma(image, random.uniform(0.8, 1.2), np.random.randint(100)-50)
|
||||
if np.random.randint(2):
|
||||
img=bruit(image)
|
||||
if np.random.randint(2):
|
||||
img=color(image)
|
||||
return image
|
||||
Reference in New Issue
Block a user