Initial commit

This commit is contained in:
2026-03-31 13:28:59 +02:00
commit 7ec43ca17d
314 changed files with 189852 additions and 0 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

View File

@@ -0,0 +1,33 @@
import cv2
import numpy as np
import tensorflow as tf
from L42Project import ia as LPia
mnist_test_images=np.fromfile("mnist/t10k-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 28, 28, 1)/255
mnist_test_labels=np.eye(10)[np.fromfile("mnist/t10k-labels-idx1-ubyte", dtype=np.uint8)[8:]]
tf.reset_default_graph()
np.set_printoptions(formatter={'float': '{:0.3f}'.format})
with tf.Session() as s:
saver=tf.train.import_meta_graph('./mon_vgg/modele.meta')
saver.restore(s, tf.train.latest_checkpoint('./mon_vgg/'))
graph=tf.get_default_graph()
images=graph.get_tensor_by_name("images:0")
sortie=graph.get_tensor_by_name("sortie:0")
is_training=graph.get_tensor_by_name("is_training:0")
while True:
image=cv2.imread("/home/laurent/chiffre.png", cv2.IMREAD_GRAYSCALE)
image=cv2.resize(image, (28, 28))
image=image.reshape(28, 28, 1)/255
test_images=[]
test_images.append(image)
test_images=np.asarray(test_images)
#cv2.imshow('image', test_images[0])
for i in mnist_test_images[0:10]:
#for i in test_images:
prediction=s.run(sortie, feed_dict={images: [i], is_training: False})
print(prediction, np.argmax(prediction))
#if cv2.waitKey()==ord('q'):
# break
break
cv2.destroyAllWindows()

View File

@@ -0,0 +1,50 @@
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
import vgg
taille_batch=100
nbr_entrainement=200
train_images=np.fromfile("mnist/train-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 28, 28, 1)/255
train_labels=np.eye(10)[np.fromfile("mnist/train-labels-idx1-ubyte", dtype=np.uint8)[8:]]
test_images=np.fromfile("mnist/t10k-images-idx3-ubyte", dtype=np.uint8)[16:].reshape(-1, 28, 28, 1)/255
test_labels=np.eye(10)[np.fromfile("mnist/t10k-labels-idx1-ubyte", dtype=np.uint8)[8:]]
images, labels, is_training, sortie, train, accuracy, saver=vgg.vggnet()
with tf.Session() as s:
s.run(tf.global_variables_initializer())
tab_train=[]
tab_test=[]
train_images, train_labels=shuffle(train_images, train_labels)
for id_entrainement in np.arange(nbr_entrainement):
print("> Entrainement", id_entrainement)
for batch in np.arange(0, len(train_images), taille_batch):
s.run(train, feed_dict={
images: train_images[batch:batch+taille_batch],
labels: train_labels[batch:batch+taille_batch],
is_training: True
})
print(" entrainement OK")
tab_accuracy_train=[]
for batch in np.arange(0, len(train_images), taille_batch):
p=s.run(accuracy, feed_dict={
images: train_images[batch:batch+taille_batch],
labels: train_labels[batch:batch+taille_batch],
is_training: True
})
tab_accuracy_train.append(p)
print(" train:", np.mean(tab_accuracy_train))
tab_accuracy_test=[]
for batch in np.arange(0, len(test_images), taille_batch):
p=s.run(accuracy, feed_dict={
images: test_images[batch:batch+taille_batch],
labels: test_labels[batch:batch+taille_batch],
is_training: True
})
tab_accuracy_test.append(p)
print(" test :", np.mean(tab_accuracy_test))
tab_train.append(1-np.mean(tab_accuracy_train))
tab_test.append(1-np.mean(tab_accuracy_test))
saver.save(s, './mon_vgg/modele')

View File

@@ -0,0 +1,10 @@
# Tutoriel tensorflow
## Sauvegarde d'un modèle type VGGnet et réutilisation
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=vIOdUchtbhQ
Courbe d'apprentissage sur 200 cycles:
![graph apprentissage](https://github.com/L42Project/Tutoriels/blob/master/Tensorflow/tutoriel6/Figure_1.png)
L'apprentissage prend 6h30 sur une GeForce 1080

200
Tensorflow/tutoriel6/log Normal file
View File

@@ -0,0 +1,200 @@
0:0.985800:0.982300
1:0.985783:0.982600
2:0.986883:0.983600
3:0.988517:0.984300
4:0.989633:0.983300
5:0.992733:0.989600
6:0.994267:0.989700
7:0.995400:0.990500
8:0.996600:0.991800
9:0.996267:0.990800
10:0.995133:0.990400
11:0.998000:0.993700
12:0.996100:0.991100
13:0.995583:0.991500
14:0.996600:0.990400
15:0.994667:0.991700
16:0.997500:0.992600
17:0.996317:0.991500
18:0.997017:0.992300
19:0.996850:0.992300
20:0.997167:0.991100
21:0.998567:0.994600
22:0.998583:0.993600
23:0.997950:0.992500
24:0.997000:0.993300
25:0.998600:0.993100
26:0.997083:0.992000
27:0.999250:0.994600
28:0.998517:0.994000
29:0.998583:0.993000
30:0.999050:0.994200
31:0.999150:0.993900
32:0.997417:0.992800
33:0.998617:0.994500
34:0.998950:0.994400
35:0.998950:0.993700
36:0.998167:0.992200
37:0.999650:0.995400
38:0.996217:0.991300
39:0.999250:0.993900
40:0.999650:0.993700
41:0.998233:0.992400
42:0.999333:0.994300
43:0.999650:0.995200
44:0.999567:0.995100
45:0.999167:0.993400
46:0.999150:0.994300
47:0.999183:0.994300
48:0.997917:0.992100
49:0.999400:0.994400
50:0.999917:0.994700
51:0.999450:0.993600
52:0.998317:0.993400
53:0.999700:0.994600
54:0.999850:0.994500
55:0.996717:0.993200
56:0.998883:0.994400
57:0.999900:0.994300
58:0.998650:0.994400
59:0.999500:0.994700
60:0.999017:0.993100
61:0.999783:0.994200
62:0.998567:0.992000
63:0.999300:0.994700
64:0.999950:0.995500
65:0.998917:0.993700
66:0.998783:0.992900
67:0.999083:0.993300
68:1.000000:0.994600
69:1.000000:0.994900
70:1.000000:0.994800
71:1.000000:0.994800
72:1.000000:0.994800
73:1.000000:0.994700
74:1.000000:0.994700
75:1.000000:0.994700
76:1.000000:0.994600
77:1.000000:0.994600
78:1.000000:0.994500
79:1.000000:0.994600
80:1.000000:0.994500
81:1.000000:0.994600
82:1.000000:0.994500
83:1.000000:0.994600
84:1.000000:0.994500
85:1.000000:0.994700
86:1.000000:0.994600
87:1.000000:0.994800
88:1.000000:0.994700
89:1.000000:0.994700
90:1.000000:0.994700
91:1.000000:0.994900
92:1.000000:0.994900
93:1.000000:0.994900
94:1.000000:0.994900
95:1.000000:0.994900
96:1.000000:0.994900
97:1.000000:0.994900
98:1.000000:0.994900
99:1.000000:0.994900
100:1.000000:0.994900
101:1.000000:0.994800
102:1.000000:0.994800
103:1.000000:0.994700
104:1.000000:0.994800
105:1.000000:0.994800
106:1.000000:0.994500
107:1.000000:0.994700
108:1.000000:0.994700
109:1.000000:0.994700
110:1.000000:0.994700
111:1.000000:0.994800
112:1.000000:0.994800
113:1.000000:0.994800
114:1.000000:0.994900
115:1.000000:0.994800
116:1.000000:0.994900
117:1.000000:0.994900
118:1.000000:0.995000
119:1.000000:0.995000
120:1.000000:0.994900
121:1.000000:0.994900
122:1.000000:0.994900
123:0.973983:0.971500
124:0.999267:0.993200
125:0.999800:0.994200
126:0.999883:0.994800
127:0.999767:0.994600
128:0.998750:0.994000
129:0.999750:0.993500
130:0.999733:0.993600
131:0.997083:0.991600
132:0.999683:0.994000
133:0.999783:0.994100
134:0.999267:0.993000
135:0.999700:0.994600
136:0.999350:0.993300
137:0.999600:0.993700
138:0.998900:0.993000
139:0.999400:0.993500
140:0.999667:0.994400
141:0.999050:0.992900
142:0.999017:0.993300
143:0.999700:0.994500
144:0.999067:0.993200
145:0.999900:0.995000
146:0.999617:0.994300
147:0.999283:0.992900
148:0.998833:0.993500
149:0.999883:0.994400
150:0.999783:0.994300
151:0.999617:0.994300
152:0.999783:0.994200
153:0.999817:0.994000
154:0.999517:0.992100
155:0.999900:0.993400
156:1.000000:0.994400
157:1.000000:0.994700
158:1.000000:0.994700
159:1.000000:0.994600
160:1.000000:0.994800
161:1.000000:0.994800
162:1.000000:0.994900
163:1.000000:0.994900
164:1.000000:0.995000
165:1.000000:0.994800
166:1.000000:0.994800
167:1.000000:0.994700
168:1.000000:0.994700
169:1.000000:0.994700
170:1.000000:0.994500
171:1.000000:0.994500
172:1.000000:0.994500
173:1.000000:0.994500
174:1.000000:0.994500
175:1.000000:0.994500
176:1.000000:0.994400
177:1.000000:0.994500
178:1.000000:0.994500
179:1.000000:0.994400
180:1.000000:0.994400
181:1.000000:0.994400
182:1.000000:0.994400
183:1.000000:0.994400
184:1.000000:0.994400
185:1.000000:0.994300
186:1.000000:0.994300
187:1.000000:0.994200
188:1.000000:0.994200
189:1.000000:0.994300
190:1.000000:0.994400
191:1.000000:0.994400
192:1.000000:0.994400
193:1.000000:0.994600
194:1.000000:0.994600
195:1.000000:0.994700
196:1.000000:0.994800
197:1.000000:0.994600
198:1.000000:0.994800
199:1.000000:0.994700

View File

@@ -0,0 +1,84 @@
import tensorflow as tf
import numpy as np
def convolution(couche_prec, taille_noyau, nbr_noyau):
w=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(couche_prec.get_shape()[-1]), nbr_noyau)))
b=np.zeros(nbr_noyau)
result=tf.nn.conv2d(couche_prec, w, strides=[1, 1, 1, 1], padding='SAME')+b
return result
def fc(couche_prec, nbr_neurone):
w=tf.Variable(tf.random.truncated_normal(shape=(int(couche_prec.get_shape()[-1]), nbr_neurone), dtype=tf.float32))
b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)
result=tf.matmul(couche_prec, w)+b
return result
def vggnet(learning_rate=0.01, momentum=0.99):
ph_images=tf.placeholder(shape=(None, 28, 28, 1), dtype=tf.float32, name='images')
ph_labels=tf.placeholder(shape=(None, 10), dtype=tf.float32)
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
result=convolution(ph_images, 3, 64)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 64)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
result=convolution(result, 3, 128)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 128)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
result=convolution(result, 3, 256)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 256)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 256)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
result=convolution(result, 3, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
result=convolution(result, 3, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=convolution(result, 3, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
result=tf.contrib.layers.flatten(result)
result=fc(result, 512)
result=tf.layers.batch_normalization(result, training=ph_is_training, momentum=momentum)
result=tf.nn.relu(result)
result=fc(result, 10)
socs=tf.nn.softmax(result, name="sortie")
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(socs, 1), tf.argmax(ph_labels, 1)), tf.float32))
return ph_images, ph_labels, ph_is_training, socs, train, accuracy, tf.train.Saver()

View File

@@ -0,0 +1,30 @@
import cv2
import numpy as np
import tensorflow as tf
cap=cv2.VideoCapture(0)
np.set_printoptions(formatter={'float': '{:0.3f}'.format})
with tf.Session() as s:
saver=tf.train.import_meta_graph('./mon_vgg/modele.meta')
saver.restore(s, tf.train.latest_checkpoint('./mon_vgg/'))
graph=tf.get_default_graph()
images=graph.get_tensor_by_name("images:0")
sortie=graph.get_tensor_by_name("sortie:0")
is_training=graph.get_tensor_by_name("is_training:0")
while True:
ret, frame=cap.read()
test=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
test=cv2.resize(test, (28, 28))
for x in range(28):
for y in range(28):
if test[y][x]<110:
test[y][x]=1
else:
test[y][x]=0
cv2.imshow('image', cv2.resize(test, (120, 120))*255)
prediction=s.run(sortie, feed_dict={images: [test.reshape(28, 28, 1)], is_training: False})
print(prediction, np.argmax(prediction))
if cv2.waitKey(20)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()