Initial commit
This commit is contained in:
4
Tensorflow/tutoriel27/README.md
Normal file
4
Tensorflow/tutoriel27/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Tutoriel tensorflow
|
||||
## Réseau Yolo
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=oQ0436IJUWc
|
||||
201
Tensorflow/tutoriel27/common.py
Normal file
201
Tensorflow/tutoriel27/common.py
Normal file
@@ -0,0 +1,201 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import json
|
||||
import random
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import config
|
||||
|
||||
def sigmoid(x):
|
||||
x=np.clip(x, -50, 50)
|
||||
return 1/(1+np.exp(-x))
|
||||
|
||||
def softmax(x):
|
||||
e=np.exp(x)
|
||||
e_sum=np.sum(e)
|
||||
return e/e_sum
|
||||
|
||||
def prepare_image(image, labels, grille=True):
|
||||
img=image.copy()
|
||||
|
||||
if grille is True:
|
||||
for x in range(config.r_x, config.largeur+config.r_x, config.r_x):
|
||||
for y in range(config.r_y, config.hauteur+config.r_y, config.r_y):
|
||||
cv2.line(img, (0, y), (x, y), (0, 0, 0), 1)
|
||||
cv2.line(img, (x, 0), (x, y), (0, 0, 0), 1)
|
||||
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for box in range(config.nbr_boxes):
|
||||
if labels[y, x, box, 4]:
|
||||
ids=np.argmax(labels[y, x, box, 5:])
|
||||
x_center=int(labels[y, x, box, 0]*config.r_x)
|
||||
y_center=int(labels[y, x, box, 1]*config.r_y)
|
||||
w_2=int(labels[y, x, box, 2]*config.r_x/2)
|
||||
h_2=int(labels[y, x, box, 3]*config.r_y/2)
|
||||
x_min=x_center-w_2
|
||||
y_min=y_center-h_2
|
||||
x_max=x_center+w_2
|
||||
y_max=y_center+h_2
|
||||
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), list(config.dict.values())[ids], 1)
|
||||
cv2.circle(img, (x_center, y_center), 1, list(config.dict.values())[ids], 2)
|
||||
|
||||
return img
|
||||
|
||||
def bruit(image):
|
||||
h, w, c=image.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def intersection_over_union(boxA, boxB):
|
||||
xA=np.maximum(boxA[0], boxB[0])
|
||||
yA=np.maximum(boxA[1], boxB[1])
|
||||
xB=np.minimum(boxA[2], boxB[2])
|
||||
yB=np.minimum(boxA[3], boxB[3])
|
||||
interArea=np.maximum(0, xB-xA)*np.maximum(0, yB-yA)
|
||||
boxAArea=(boxA[2]-boxA[0])*(boxA[3]-boxA[1])
|
||||
boxBArea=(boxB[2]-boxB[0])*(boxB[3]-boxB[1])
|
||||
return interArea/(boxAArea+boxBArea-interArea)
|
||||
|
||||
def prepare_labels(fichier_image, objects, coeff=None):
|
||||
image=cv2.imread(fichier_image)
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for o in objects:
|
||||
if config.dict2.index(o['category'])==4:
|
||||
trophozoite=1
|
||||
break
|
||||
if trophozoite==0:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
if coeff is None:
|
||||
coeff=random.uniform(1.1, 2.5)
|
||||
image_r=cv2.resize(image, (int(coeff*config.largeur), int(coeff*config.hauteur)))
|
||||
image_r=gamma(image_r, random.uniform(0.7, 1.3), np.random.randint(60)-30)
|
||||
image_r=bruit(image_r)
|
||||
|
||||
if coeff==1:
|
||||
shift_x=0
|
||||
shift_y=0
|
||||
else:
|
||||
shift_x=np.random.randint(image_r.shape[1]-config.largeur)
|
||||
shift_y=np.random.randint(image_r.shape[0]-config.hauteur)
|
||||
|
||||
ratio_x=coeff*config.largeur/image.shape[1]
|
||||
ratio_y=coeff*config.hauteur/image.shape[0]
|
||||
|
||||
flip=np.random.randint(4)
|
||||
if flip!=3:
|
||||
image_r=cv2.flip(image_r, flip-1)
|
||||
|
||||
label =np.zeros((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes), dtype=np.float32)
|
||||
label2=np.zeros((config.max_objet, 7), dtype=np.float32)
|
||||
|
||||
nbr_objet=0
|
||||
for o in objects:
|
||||
id_class=config.dict2.index(o['category'])
|
||||
box=o['bounding_box']
|
||||
|
||||
if flip==3:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==2:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int(box['minimum']['r']*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int(box['maximum']['r']*ratio_y)
|
||||
if flip==1:
|
||||
x_min=int(box['minimum']['c']*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int(box['maximum']['c']*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
if flip==0:
|
||||
x_min=int((image.shape[1]-box['maximum']['c'])*ratio_x)
|
||||
y_min=int((image.shape[0]-box['maximum']['r'])*ratio_y)
|
||||
x_max=int((image.shape[1]-box['minimum']['c'])*ratio_x)
|
||||
y_max=int((image.shape[0]-box['minimum']['r'])*ratio_y)
|
||||
|
||||
if x_min<shift_x or y_min<shift_y or x_max>(shift_x+config.largeur) or y_max>(shift_y+config.hauteur):
|
||||
continue
|
||||
x_min=(x_min-shift_x)/config.r_x
|
||||
y_min=(y_min-shift_y)/config.r_y
|
||||
x_max=(x_max-shift_x)/config.r_x
|
||||
y_max=(y_max-shift_y)/config.r_y
|
||||
|
||||
area=(x_max-x_min)*(y_max-y_min)
|
||||
label2[nbr_objet]=[x_min, y_min, x_max, y_max, area, 1, id_class]
|
||||
|
||||
x_centre=int(x_min+(x_max-x_min)/2)
|
||||
y_centre=int(y_min+(y_max-y_min)/2)
|
||||
x_cell=int(x_centre)
|
||||
y_cell=int(y_centre)
|
||||
|
||||
a_x_min=x_centre-config.anchors[:, 0]/2
|
||||
a_y_min=y_centre-config.anchors[:, 1]/2
|
||||
a_x_max=x_centre+config.anchors[:, 0]/2
|
||||
a_y_max=y_centre+config.anchors[:, 1]/2
|
||||
|
||||
id_a=0
|
||||
best_iou=0
|
||||
for i in range(len(config.anchors)):
|
||||
iou=intersection_over_union([x_min, y_min, x_max, y_max], [a_x_min[i], a_y_min[i], a_x_max[i], a_y_max[i]])
|
||||
if iou>best_iou:
|
||||
best_iou=iou
|
||||
id_a=i
|
||||
|
||||
label[y_cell, x_cell, id_a, 0]=(x_max+x_min)/2
|
||||
label[y_cell, x_cell, id_a, 1]=(y_max+y_min)/2
|
||||
label[y_cell, x_cell, id_a, 2]=x_max-x_min
|
||||
label[y_cell, x_cell, id_a, 3]=y_max-y_min
|
||||
label[y_cell, x_cell, id_a, 4]=1.
|
||||
label[y_cell, x_cell, id_a, 5+id_class]=1.
|
||||
|
||||
nbr_objet=nbr_objet+1
|
||||
if nbr_objet==config.max_objet:
|
||||
print("Nbr objet max atteind !!!!!")
|
||||
break
|
||||
|
||||
######################
|
||||
trophozoite=0
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if np.argmax(label[y, x, b, 5:])==4:
|
||||
trophozoite=1
|
||||
if not trophozoite:
|
||||
return None, None, None
|
||||
######################
|
||||
|
||||
return image_r[shift_y:shift_y+config.hauteur, shift_x:shift_x+config.largeur], label, label2
|
||||
|
||||
def read_json(file, nbr=1, nbr_fichier=None):
|
||||
images=[]
|
||||
labels=[]
|
||||
labels2=[]
|
||||
with open(file) as json_file:
|
||||
data=json.load(json_file)
|
||||
id=0
|
||||
for p in data:
|
||||
print(id, p['image']['pathname'])
|
||||
id+=1
|
||||
for i in range(nbr):
|
||||
image, label, label2=prepare_labels("./{}".format(p['image']['pathname']), p['objects'])
|
||||
if image is not None:
|
||||
images.append(image)
|
||||
labels.append(label)
|
||||
labels2.append(label2)
|
||||
if nbr_fichier is not None:
|
||||
if id==nbr_fichier:
|
||||
break
|
||||
images=np.array(images)
|
||||
labels=np.array(labels)
|
||||
labels2=np.array(labels2)
|
||||
return images, labels, labels2
|
||||
33
Tensorflow/tutoriel27/config.py
Normal file
33
Tensorflow/tutoriel27/config.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
|
||||
dict={'leukocyte': (255, 255, 0 ),
|
||||
'red blood cell':(0 , 0 , 255),
|
||||
'ring': (0 , 255, 0 ),
|
||||
'schizont': (255, 0 , 255),
|
||||
'trophozoite': (255, 0 , 0 ),
|
||||
'difficult': (0 , 0 , 0 ),
|
||||
'gametocyte': (0 , 255, 255)}
|
||||
dict2=[]
|
||||
for d in dict:
|
||||
dict2.append(d)
|
||||
|
||||
largeur=256
|
||||
hauteur=192
|
||||
cellule_x=16
|
||||
cellule_y=12
|
||||
nbr_classes=len(dict)
|
||||
r_x=int(largeur/cellule_x)
|
||||
r_y=int(hauteur/cellule_y)
|
||||
max_objet=60
|
||||
|
||||
anchors=np.array([[3.0, 1.5], [2.0, 2.0], [1.5, 3.0]])
|
||||
nbr_boxes=len(anchors)
|
||||
|
||||
batch_size=16
|
||||
|
||||
lambda_coord=5
|
||||
lambda_noobj=0.5
|
||||
#lambda_coord=1
|
||||
#lambda_noobj=1
|
||||
|
||||
seuil_iou_loss=0.6
|
||||
16
Tensorflow/tutoriel27/images.py
Normal file
16
Tensorflow/tutoriel27/images.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 10, 10)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
for i in range(len(images)):
|
||||
image=common.prepare_image(images[i], labels[i], False)
|
||||
cv2.imshow("image", cv2.resize(image, (2*config.largeur, 2*config.hauteur)))
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
63
Tensorflow/tutoriel27/inference.py
Normal file
63
Tensorflow/tutoriel27/inference.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import math
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
images, labels, labels2=common.read_json('test.json', 5)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
grid=np.meshgrid(np.arange(config.cellule_x, dtype=np.float32), np.arange(config.cellule_y, dtype=np.float32))
|
||||
grid=np.expand_dims(np.stack(grid, axis=-1), axis=2)
|
||||
grid=np.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
for i in range(len(images)):
|
||||
img=common.prepare_image(images[i], labels[i], False)
|
||||
predictions=model(np.array([images[i]]))
|
||||
|
||||
pred_boxes=predictions[0, :, :, :, 0:4]
|
||||
pred_conf=common.sigmoid(predictions[0, :, :, :, 4])
|
||||
pred_classes=common.softmax(predictions[0, :, :, :, 5:])
|
||||
ids=np.argmax(pred_classes, axis=-1)
|
||||
|
||||
x_center=((grid[:, :, :, 0]+common.sigmoid(pred_boxes[:, :, :, 0]))*config.r_x)
|
||||
y_center=((grid[:, :, :, 1]+common.sigmoid(pred_boxes[:, :, :, 1]))*config.r_y)
|
||||
w=(np.exp(pred_boxes[:, :, :, 2])*config.anchors[:, 0]*config.r_x)
|
||||
h=(np.exp(pred_boxes[:, :, :, 3])*config.anchors[:, 1]*config.r_y)
|
||||
|
||||
x_min=(x_center-w/2).astype(np.int32)
|
||||
y_min=(y_center-h/2).astype(np.int32)
|
||||
x_max=(x_center+w/2).astype(np.int32)
|
||||
y_max=(y_center+h/2).astype(np.int32)
|
||||
|
||||
tab_boxes=[]
|
||||
conf=[]
|
||||
for y in range(config.cellule_y):
|
||||
for x in range(config.cellule_x):
|
||||
for b in range(config.nbr_boxes):
|
||||
if pred_conf[y, x, b]>0.10:
|
||||
color=list(config.dict.values())[ids[y, x, b]]
|
||||
cv2.circle(images[i], (x_center[y, x, b], y_center[y, x, b]), 1, color, 2)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_max[y, x, b]), color, 1)
|
||||
cv2.rectangle(images[i], (x_min[y, x, b], y_min[y, x, b]), (x_max[y, x, b], y_min[y, x, b]-15), color, cv2.FILLED)
|
||||
cv2.putText(images[i], "{:3.0%}".format(pred_conf[y, x, b]), (x_min[y, x, b], y_min[y, x, b]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (255, 255, 255), 1)
|
||||
|
||||
cv2.imshow("Inference", images[i])
|
||||
cv2.imshow("Bonne reponse", img)
|
||||
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
54
Tensorflow/tutoriel27/model.py
Normal file
54
Tensorflow/tutoriel27/model.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import config
|
||||
|
||||
def block_resnet(input, filters, kernel_size, reduce=False):
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(input)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
|
||||
if reduce is True:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=2, padding='SAME')(result)
|
||||
else:
|
||||
result=layers.Conv2D(filters, kernel_size, strides=1, padding='SAME')(result)
|
||||
|
||||
if input.shape[-1]==filters:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=input
|
||||
else:
|
||||
if reduce is True:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=2, padding='SAME')(input)
|
||||
else:
|
||||
shortcut=layers.Conv2D(filters, 1, strides=1, padding='SAME')(input)
|
||||
|
||||
result=layers.add([result, shortcut])
|
||||
result=layers.LeakyReLU(alpha=0.1)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
return result
|
||||
|
||||
def model(nbr_classes, nbr_boxes, cellule_y, cellule_x):
|
||||
entree=layers.Input(shape=(config.largeur, config.hauteur, 3), dtype='float32')
|
||||
|
||||
result=block_resnet(entree, 16, 3, False)
|
||||
result=block_resnet(result, 16, 3, True)
|
||||
|
||||
result=block_resnet(result, 32, 3, False)
|
||||
result=block_resnet(result, 32, 3, True)
|
||||
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, False)
|
||||
result=block_resnet(result, 64, 3, True)
|
||||
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, False)
|
||||
result=block_resnet(result, 128, 3, True)
|
||||
|
||||
result=layers.Conv2D(config.nbr_boxes*(5+config.nbr_classes), 1, padding='SAME')(result)
|
||||
sortie=layers.Reshape((config.cellule_y, config.cellule_x, config.nbr_boxes, 5+config.nbr_classes))(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
|
||||
return model
|
||||
|
||||
117
Tensorflow/tutoriel27/train.py
Normal file
117
Tensorflow/tutoriel27/train.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import common
|
||||
import config
|
||||
import model
|
||||
|
||||
batch_size=16
|
||||
|
||||
images, labels, labels2=common.read_json('training.json', 20)
|
||||
images=np.array(images, dtype=np.float32)/255
|
||||
labels=np.array(labels, dtype=np.float32)
|
||||
index=np.random.permutation(len(images))
|
||||
images=images[index]
|
||||
labels=labels[index]
|
||||
|
||||
print("Nbr images:", len(images))
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((images, labels)).batch(batch_size)
|
||||
|
||||
def my_loss(labels, preds):
|
||||
grid=tf.meshgrid(tf.range(config.cellule_x, dtype=tf.float32), tf.range(config.cellule_y, dtype=tf.float32))
|
||||
grid=tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
|
||||
grid=tf.tile(grid, (1, 1, config.nbr_boxes, 1))
|
||||
|
||||
preds_xy =tf.math.sigmoid(preds[:, :, :, :, 0:2])+grid
|
||||
preds_wh =preds[:, :, :, :, 2:4]
|
||||
preds_conf =tf.math.sigmoid(preds[:, :, :, :, 4])
|
||||
preds_classe=tf.math.sigmoid(preds[:, :, :, :, 5:])
|
||||
|
||||
preds_wh_half=preds_wh/2
|
||||
preds_xymin=preds_xy-preds_wh_half
|
||||
preds_xymax=preds_xy+preds_wh_half
|
||||
preds_areas=preds_wh[:, :, :, :, 0]*preds_wh[:, :, :, :, 1]
|
||||
|
||||
l2_xy_min=labels2[:, :, 0:2]
|
||||
l2_xy_max=labels2[:, :, 2:4]
|
||||
l2_area =labels2[:, :, 4]
|
||||
|
||||
preds_xymin=tf.expand_dims(preds_xymin, 4)
|
||||
preds_xymax=tf.expand_dims(preds_xymax, 4)
|
||||
preds_areas=tf.expand_dims(preds_areas, 4)
|
||||
|
||||
labels_xy =labels[:, :, :, :, 0:2]
|
||||
labels_wh =tf.math.log(labels[:, :, :, :, 2:4]/config.anchors)
|
||||
labels_wh=tf.where(tf.math.is_inf(labels_wh), tf.zeros_like(labels_wh), labels_wh)
|
||||
|
||||
conf_mask_obj=labels[:, :, :, :, 4]
|
||||
labels_classe=labels[:, :, :, :, 5:]
|
||||
|
||||
conf_mask_noobj=[]
|
||||
for i in range(len(preds)):
|
||||
xy_min=tf.maximum(preds_xymin[i], l2_xy_min[i])
|
||||
xy_max=tf.minimum(preds_xymax[i], l2_xy_max[i])
|
||||
intersect_wh=tf.maximum(xy_max-xy_min, 0.)
|
||||
intersect_areas=intersect_wh[..., 0]*intersect_wh[..., 1]
|
||||
union_areas=preds_areas[i]+l2_area[i]-intersect_areas
|
||||
ious=tf.truediv(intersect_areas, union_areas)
|
||||
best_ious=tf.reduce_max(ious, axis=3)
|
||||
conf_mask_noobj.append(tf.cast(best_ious<config.seuil_iou_loss, tf.float32)*(1-conf_mask_obj[i]))
|
||||
conf_mask_noobj=tf.stack(conf_mask_noobj)
|
||||
|
||||
preds_x=preds_xy[..., 0]
|
||||
preds_y=preds_xy[..., 1]
|
||||
preds_w=preds_wh[..., 0]
|
||||
preds_h=preds_wh[..., 1]
|
||||
labels_x=labels_xy[..., 0]
|
||||
labels_y=labels_xy[..., 1]
|
||||
labels_w=labels_wh[..., 0]
|
||||
labels_h=labels_wh[..., 1]
|
||||
|
||||
loss_xy=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_x-labels_x)+tf.math.square(preds_y-labels_y)), axis=(1, 2, 3))
|
||||
loss_wh=tf.reduce_sum(conf_mask_obj*(tf.math.square(preds_w-labels_w)+tf.math.square(preds_h-labels_h)), axis=(1, 2, 3))
|
||||
|
||||
loss_conf_obj=tf.reduce_sum(conf_mask_obj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
loss_conf_noobj=tf.reduce_sum(conf_mask_noobj*tf.math.square(preds_conf-conf_mask_obj), axis=(1, 2, 3))
|
||||
|
||||
loss_classe=tf.reduce_sum(tf.math.square(preds_classe-labels_classe), axis=4)
|
||||
loss_classe=tf.reduce_sum(conf_mask_obj*loss_classe, axis=(1, 2, 3))
|
||||
|
||||
loss=config.lambda_coord*loss_xy+config.lambda_coord*loss_wh+loss_conf_obj+config.lambda_noobj*loss_conf_noobj+loss_classe
|
||||
return loss
|
||||
|
||||
model=model.model(config.nbr_classes, config.nbr_boxes, config.cellule_y, config.cellule_x)
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
time.time()-start))
|
||||
if not entrainement%20:
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
|
||||
checkpoint=tf.train.Checkpoint(model=model)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training/"))
|
||||
|
||||
train(train_ds, 400)
|
||||
checkpoint.save(file_prefix="./training/")
|
||||
Reference in New Issue
Block a user