Initial commit
This commit is contained in:
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
OpenCV/tutoriel3/cars.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
OpenCV/tutoriel3/test.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
50
.gitignore
vendored
Normal file
50
.gitignore
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
# ── Python ──────────────────────────────────────────────────────────────────
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*.pyo
|
||||
*.pyd
|
||||
*.pyc
|
||||
|
||||
# ── Environnements virtuels ──────────────────────────────────────────────────
|
||||
venv/
|
||||
.venv/
|
||||
env/
|
||||
ENV/
|
||||
Pipfile.lock
|
||||
|
||||
# ── Secrets ───────────────────────────────────────────────────────────────────
|
||||
.env
|
||||
.env.*
|
||||
secret.env
|
||||
secrets.py
|
||||
*.secret
|
||||
credentials.json
|
||||
token.json
|
||||
|
||||
# ── Logs ─────────────────────────────────────────────────────────────────────
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# ── IDE ───────────────────────────────────────────────────────────────────────
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# ── Tests / Coverage ─────────────────────────────────────────────────────────
|
||||
.coverage
|
||||
htmlcov/
|
||||
.pytest_cache/
|
||||
|
||||
# ── Build ─────────────────────────────────────────────────────────────────────
|
||||
dist/
|
||||
build/
|
||||
*.egg-info/
|
||||
|
||||
# ── Modèles IA (trop lourds pour git) ───────────────────────────────────────
|
||||
*.gguf
|
||||
*.bin
|
||||
*.safetensors
|
||||
models/
|
||||
weights/
|
||||
4
Divers/decoupe_video/README.md
Normal file
4
Divers/decoupe_video/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Construction d'un dataset opensource
|
||||
|
||||
Dataset actuel: https://drive.google.com/file/d/1PS40KnziMB7uVcmLHfOQZiD5uWNF-QiS/view?usp=sharing
|
||||
|
||||
26
Divers/decoupe_video/extract_image_from_video.py
Normal file
26
Divers/decoupe_video/extract_image_from_video.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import cv2
|
||||
import os
|
||||
|
||||
film='Le_chemin_du_passe.mp4'
|
||||
|
||||
if not os.path.exists(film):
|
||||
quit("Le film n'existe pas")
|
||||
|
||||
nom_film=film.split('.')[0]
|
||||
|
||||
cap=cv2.VideoCapture(film)
|
||||
|
||||
if not os.path.isdir(nom_film):
|
||||
os.mkdir(nom_film)
|
||||
|
||||
id=0
|
||||
while True:
|
||||
print("#", end="", flush=True)
|
||||
for cpt in range(500):
|
||||
ret, frame=cap.read()
|
||||
if frame is None:
|
||||
print("")
|
||||
cap.release()
|
||||
quit()
|
||||
cv2.imwrite("{}/{}-{:d}.png".format(nom_film, nom_film, id), frame)
|
||||
id+=1
|
||||
6
Divers/descente_gradient/README.md
Normal file
6
Divers/descente_gradient/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Algorithme d'apprentissage
|
||||
## La descente de gradient
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=0MEyDJa2GTc
|
||||
|
||||
|
||||
52
Divers/descente_gradient/comparaison.py
Normal file
52
Divers/descente_gradient/comparaison.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
from matplotlib import cm
|
||||
from matplotlib.colors import LogNorm
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
def fonction(X, Y):
|
||||
return X*np.exp(-X**2-Y**2)+(X**2+Y**2)/20
|
||||
|
||||
def gradient_fonction(X, Y):
|
||||
g_x=np.exp(-X**2-Y**2)+X*-2*X*np.exp(-X**2-Y**2)+X/10
|
||||
g_y=-2*Y*X*np.exp(-X**2-Y**2)+Y/10
|
||||
return g_x, g_y
|
||||
|
||||
fig=plt.figure()
|
||||
fig.set_size_inches(9, 7, forward=True)
|
||||
ax=Axes3D(fig, azim=-29, elev=49)
|
||||
X=np.arange(-3, 3, 0.2)
|
||||
Y=np.arange(-3, 3, 0.2)
|
||||
X, Y=np.meshgrid(X, Y)
|
||||
Z=fonction(X, Y)
|
||||
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
|
||||
plt.xlabel("Paramètre 1 (x)")
|
||||
plt.ylabel("Paramètre 2 (y)")
|
||||
|
||||
x1=x2=np.random.random_integers(-2, 2)+np.random.rand(1)[0]
|
||||
y1=y2=np.random.random_integers(-2, 2)+np.random.rand(1)[0]
|
||||
|
||||
lr=0.2
|
||||
lr2=0.9
|
||||
correction_x1=0
|
||||
correction_y1=0
|
||||
i=0
|
||||
while True:
|
||||
g_x1, g_y1=gradient_fonction(x1, y1)
|
||||
g_x2, g_y2=gradient_fonction(x2, y2)
|
||||
|
||||
correction_x1=lr2*correction_x1-lr*g_x1
|
||||
x1=x1+correction_x1
|
||||
correction_y1=lr2*correction_y1-lr*g_y1
|
||||
y1=y1+correction_y1
|
||||
|
||||
x2=x2-lr*g_x2
|
||||
y2=y2-lr*g_y2
|
||||
|
||||
ax.scatter(x1, y1, fonction(x1, y1), marker='o', s=10, color='#FF0000')
|
||||
ax.scatter(x2, y2, fonction(x2, y2), marker='o', s=10, color='#00FF00')
|
||||
plt.draw()
|
||||
print("iteration= {} x1={:+7.5f} y1={:+7.5f} x2={:+7.5f} y2={:+7.5f}".format(i, x1, y1, x2, y2))
|
||||
plt.pause(0.05)
|
||||
i+=1
|
||||
23
Divers/descente_gradient/exemple_2d_1.py
Normal file
23
Divers/descente_gradient/exemple_2d_1.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def fonction(x):
|
||||
return x**2+3*x-2
|
||||
|
||||
def gradient_fonction(x):
|
||||
return 2*x+3
|
||||
|
||||
xvals=np.arange(-5, 3, 0.1)
|
||||
yvals=fonction(xvals)
|
||||
plt.plot(xvals, yvals)
|
||||
|
||||
x=np.random.random_integers(-4, 3)+np.random.rand(1)[0]
|
||||
lr=0.2
|
||||
i=0
|
||||
while True:
|
||||
plt.scatter(x, fonction(x), color='#FF0000')
|
||||
plt.draw()
|
||||
plt.pause(0.5)
|
||||
x=x-lr*gradient_fonction(x)
|
||||
print("itération {:3d} -> x={:+7.5f}".format(i, x))
|
||||
i+=1
|
||||
24
Divers/descente_gradient/exemple_2d_2.py
Normal file
24
Divers/descente_gradient/exemple_2d_2.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def fonction(x):
|
||||
return 3*x**4-4*x**3-12*x**2-0*x-3
|
||||
|
||||
def gradient_fonction(x):
|
||||
return 12*x**3-12*x**2-24*x
|
||||
|
||||
xvals=np.arange(-3, 4, 0.1)
|
||||
yvals=fonction(xvals)
|
||||
plt.plot(xvals, yvals)
|
||||
|
||||
x=np.random.random_integers(-3, 3)+np.random.rand(1)[0]
|
||||
i=0
|
||||
print("itération: {} x={}".format(i, x))
|
||||
lr=0.015
|
||||
while True:
|
||||
plt.scatter(x, fonction(x), color='#FF0000')
|
||||
plt.draw()
|
||||
plt.pause(0.5)
|
||||
x=x-lr*gradient_fonction(x)
|
||||
i+=1
|
||||
print("itération {:3d} -> x={}".format(i, x))
|
||||
27
Divers/descente_gradient/exemple_2d_2_inertie.py
Normal file
27
Divers/descente_gradient/exemple_2d_2_inertie.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def fonction(x):
|
||||
return 3*x**4-4*x**3-12*x**2-0*x-3
|
||||
|
||||
def gradient_fonction(x):
|
||||
return 12*x**3-12*x**2-24*x
|
||||
|
||||
xvals=np.arange(-3, 4, 0.1)
|
||||
yvals=fonction(xvals)
|
||||
plt.plot(xvals, yvals)
|
||||
|
||||
x=np.random.random_integers(-3, 3)+np.random.rand(1)[0]
|
||||
i=0
|
||||
print("itération: {} x={}".format(i, x))
|
||||
lr=0.015
|
||||
lr2=0.3
|
||||
correction=0
|
||||
while True:
|
||||
plt.scatter(x, fonction(x), color='#FF0000')
|
||||
plt.draw()
|
||||
plt.pause(0.5)
|
||||
correction=lr2*correction-lr*gradient_fonction(x)
|
||||
x=x+correction
|
||||
i+=1
|
||||
print("itération {:3d} -> x={}".format(i, x))
|
||||
42
Divers/descente_gradient/exemple_3d.py
Normal file
42
Divers/descente_gradient/exemple_3d.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
from matplotlib import cm
|
||||
from matplotlib.colors import LogNorm
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
def fonction(X, Y):
|
||||
return X*np.exp(-X**2-Y**2)+(X**2+Y**2)/20
|
||||
|
||||
def gradient_fonction(X, Y):
|
||||
g_x=np.exp(-X**2-Y**2)+X*-2*X*np.exp(-X**2-Y**2)+X/10
|
||||
g_y=-2*Y*X*np.exp(-X**2-Y**2)+Y/10
|
||||
return g_x, g_y
|
||||
|
||||
fig=plt.figure()
|
||||
fig.set_size_inches(9, 7, forward=True)
|
||||
ax=Axes3D(fig, azim=-29, elev=49)
|
||||
X=np.arange(-3, 3, 0.2)
|
||||
Y=np.arange(-3, 3, 0.2)
|
||||
X, Y=np.meshgrid(X, Y)
|
||||
Z=fonction(X, Y)
|
||||
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
|
||||
plt.xlabel("Paramètre 1 (x)")
|
||||
plt.ylabel("Paramètre 2 (y)")
|
||||
|
||||
x=np.random.random_integers(-2, 2)+np.random.rand(1)[0]
|
||||
y=np.random.random_integers(-2, 2)+np.random.rand(1)[0]
|
||||
|
||||
lr=0.2
|
||||
correction_x=0
|
||||
correction_y=0
|
||||
i=0
|
||||
while True:
|
||||
g_x, g_y=gradient_fonction(x, y)
|
||||
x=x-lr*g_x
|
||||
y=y-lr*g_y
|
||||
ax.scatter(x, y, fonction(x, y), marker='o', s=10, color='#00FF00')
|
||||
plt.draw()
|
||||
print("itération {:3d} -> x={:+7.5f} y={:+7.5f}".format(i, x, y))
|
||||
plt.pause(0.05)
|
||||
i+=1
|
||||
47
Divers/descente_gradient/exemple_3d_inertie.py
Normal file
47
Divers/descente_gradient/exemple_3d_inertie.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
from matplotlib import cm
|
||||
from matplotlib.colors import LogNorm
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
def fonction(X, Y):
|
||||
return X*np.exp(-X**2-Y**2)+(X**2+Y**2)/20
|
||||
|
||||
def gradient_fonction(X, Y):
|
||||
g_x=np.exp(-X**2-Y**2)+X*-2*X*np.exp(-X**2-Y**2)+X/10
|
||||
g_y=-2*Y*X*np.exp(-X**2-Y**2)+Y/10
|
||||
return g_x, g_y
|
||||
|
||||
fig=plt.figure()
|
||||
fig.set_size_inches(9, 7, forward=True)
|
||||
ax=Axes3D(fig, azim=-29, elev=49)
|
||||
X=np.arange(-3, 3, 0.2)
|
||||
Y=np.arange(-3, 3, 0.2)
|
||||
X, Y=np.meshgrid(X, Y)
|
||||
Z=fonction(X, Y)
|
||||
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
|
||||
#ax.contour(X, Y, Z, 70, rstride=1, cstride=1, cmap='plasma')
|
||||
|
||||
plt.xlabel("Paramètre 1 (x)")
|
||||
plt.ylabel("Paramètre 2 (y)")
|
||||
|
||||
x=np.random.random_integers(-2, 2)+np.random.rand(1)[0]
|
||||
y=np.random.random_integers(-2, 2)+np.random.rand(1)[0]
|
||||
|
||||
lr=0.2
|
||||
lr2=0.9
|
||||
correction_x=0
|
||||
correction_y=0
|
||||
i=0
|
||||
while True:
|
||||
g_x, g_y=gradient_fonction(x, y)
|
||||
correction_x=lr2*correction_x-lr*g_x
|
||||
x=x+correction_x
|
||||
correction_y=lr2*correction_y-lr*g_y
|
||||
y=y+correction_y
|
||||
ax.scatter(x, y, fonction(x, y), marker='o', s=10, color='#FF0000')
|
||||
plt.draw()
|
||||
print("itération {:3d} -> x={:+7.5f} y={:+7.5f}".format(i, x, y))
|
||||
plt.pause(0.05)
|
||||
i+=1
|
||||
35
Divers/descente_gradient/gradient.py
Normal file
35
Divers/descente_gradient/gradient.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
from matplotlib import cm
|
||||
from matplotlib.colors import LogNorm
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
def fonction(X, Y):
|
||||
return X*np.exp(-X**2-Y**2)+(X**2+Y**2)/20
|
||||
|
||||
def gradient_fonction(X, Y):
|
||||
g_x=np.exp(-X**2-Y**2)+X*-2*X*np.exp(-X**2-Y**2)+X/10
|
||||
g_y=-2*Y*X*np.exp(-X**2-Y**2)+Y/10
|
||||
return g_x, g_y
|
||||
|
||||
fig=plt.figure()
|
||||
fig.set_size_inches(9, 7, forward=True)
|
||||
ax=Axes3D(fig, azim=-29, elev=49)
|
||||
X=np.arange(-3, 3, 0.2)
|
||||
Y=np.arange(-3, 3, 0.2)
|
||||
X, Y=np.meshgrid(X, Y)
|
||||
Z=fonction(X, Y)
|
||||
ax.plot_surface(X, Y, Z, rstride=1, cstride=1)
|
||||
plt.xlabel("Paramètre 1 (x)")
|
||||
plt.ylabel("Paramètre 2 (y)")
|
||||
|
||||
x, y=np.meshgrid(np.arange(-3, 3, 0.2),
|
||||
np.arange(-3, 3, 0.2))
|
||||
z=-1
|
||||
|
||||
u, v=gradient_fonction(x, y)
|
||||
w=0
|
||||
ax.quiver(x, y, z, u, v, w, length=0.15, normalize=True, color='#333333')
|
||||
|
||||
plt.show()
|
||||
6
Divers/jetson/README.md
Normal file
6
Divers/jetson/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Tutoriel 40
|
||||
## Inference sur Jetson Nano
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=xU9qlCy7j1c
|
||||
|
||||
|
||||
54
Divers/jetson/inference.py
Normal file
54
Divers/jetson/inference.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import pyrealsense2 as rs
|
||||
import cv2
|
||||
import numpy as np
|
||||
import jetson.inference
|
||||
import jetson.utils
|
||||
import time
|
||||
|
||||
net=jetson.inference.detectNet("SSD-Inception-v2", threshold=0.5)
|
||||
#net=jetson.inference.detectNet("SSD-MobileNet-v2", threshold=0.5)
|
||||
display=jetson.utils.videoOutput("display://0")
|
||||
|
||||
pipeline=rs.pipeline()
|
||||
config=rs.config()
|
||||
|
||||
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
|
||||
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)
|
||||
|
||||
align_to = rs.stream.color
|
||||
align = rs.align(align_to)
|
||||
|
||||
pipeline.start(config)
|
||||
|
||||
while True:
|
||||
|
||||
frames=pipeline.wait_for_frames()
|
||||
|
||||
aligned_frames = align.process(frames)
|
||||
|
||||
depth_frame=aligned_frames.get_depth_frame()
|
||||
color_frame=aligned_frames.get_color_frame()
|
||||
|
||||
if not depth_frame or not color_frame:
|
||||
continue
|
||||
|
||||
depth_image=np.array(depth_frame.get_data())
|
||||
color_image=np.array(color_frame.get_data())
|
||||
|
||||
depth_colormap=cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
|
||||
|
||||
start=time.time()
|
||||
cuda_image=jetson.utils.cudaFromNumpy(color_image)
|
||||
detections=net.Detect(cuda_image, color_image.shape[1], color_image.shape[0])
|
||||
print("Temps", time.time()-start)
|
||||
|
||||
display.Render(cuda_image)
|
||||
cuda_image=jetson.utils.cudaToNumpy(cuda_image)
|
||||
|
||||
cv2.imshow('RealSense1', depth_colormap)
|
||||
#cv2.imshow('RealSense2', color_image)
|
||||
cv2.imshow('cuda_image', cuda_image)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
pipeline.stop()
|
||||
quit()
|
||||
72
Divers/jetson/inference_avec_distance.py
Normal file
72
Divers/jetson/inference_avec_distance.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import pyrealsense2 as rs
|
||||
import cv2
|
||||
import numpy as np
|
||||
import jetson.inference
|
||||
import jetson.utils
|
||||
import time
|
||||
|
||||
net=jetson.inference.detectNet("SSD-Inception-v2", threshold=0.5)
|
||||
#net=jetson.inference.detectNet("SSD-MobileNet-v2", threshold=0.5)
|
||||
display=jetson.utils.videoOutput("display://0")
|
||||
|
||||
pipeline=rs.pipeline()
|
||||
config=rs.config()
|
||||
|
||||
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
|
||||
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)
|
||||
|
||||
align_to = rs.stream.color
|
||||
align = rs.align(align_to)
|
||||
|
||||
pipeline.start(config)
|
||||
|
||||
while True:
|
||||
|
||||
frames=pipeline.wait_for_frames()
|
||||
|
||||
aligned_frames = align.process(frames)
|
||||
|
||||
depth_frame=aligned_frames.get_depth_frame()
|
||||
color_frame=aligned_frames.get_color_frame()
|
||||
|
||||
if not depth_frame or not color_frame:
|
||||
continue
|
||||
|
||||
depth_image=np.array(depth_frame.get_data())
|
||||
color_image=np.array(color_frame.get_data())
|
||||
|
||||
depth_colormap=cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
|
||||
|
||||
start=time.time()
|
||||
cuda_image=jetson.utils.cudaFromNumpy(color_image)
|
||||
detections=net.Detect(cuda_image, color_image.shape[1], color_image.shape[0])
|
||||
|
||||
print("#######################################")
|
||||
print("Temps", time.time()-start)
|
||||
for detection in detections:
|
||||
print(detection)
|
||||
x, y=detection.Center
|
||||
x1=int(x-detection.Width/2)
|
||||
y1=int(y-detection.Height/2)
|
||||
x2=int(x+detection.Width/2)
|
||||
y2=int(y+detection.Height/2)
|
||||
if detection.ClassID==1:
|
||||
cv2.rectangle(color_image, (x1, y1), (x2, y2), (255, 0, 0), 2)
|
||||
dist=depth_frame.get_distance(int(x), int(y))
|
||||
if dist<1:
|
||||
msg="{:2.0f} cm".format(dist*100)
|
||||
else:
|
||||
msg="{:4.2f} m".format(dist)
|
||||
cv2.putText(color_image, msg, (int(x), int(y)), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
|
||||
|
||||
cv2.imshow('RealSense1', depth_colormap)
|
||||
cv2.imshow('RealSense2', color_image)
|
||||
|
||||
display.Render(cuda_image)
|
||||
cuda_image=jetson.utils.cudaToNumpy(cuda_image)
|
||||
#display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS()))
|
||||
cv2.imshow('cuda_image', cuda_image)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
pipeline.stop()
|
||||
quit()
|
||||
5
Divers/odrive/README.md
Normal file
5
Divers/odrive/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Parlons actuator, odrive, moteur ...
|
||||
## Exemple de code avec la carte odrive robotics
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=Mg-KiG3Rq2Q
|
||||
|
||||
49
Divers/odrive/test0.py
Normal file
49
Divers/odrive/test0.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import time
|
||||
import odrive
|
||||
from odrive.enums import *
|
||||
|
||||
accel=30.
|
||||
vel=4.
|
||||
calibration=False
|
||||
|
||||
odrv0=odrive.find_any(serial_number='205C3690424D')
|
||||
|
||||
if calibration:
|
||||
print("Calibration...", end='', flush=True)
|
||||
odrv0.axis0.requested_state=4
|
||||
odrv0.axis1.requested_state=4
|
||||
|
||||
while odrv0.axis0.current_state != AXIS_STATE_IDLE:
|
||||
time.sleep(0.1)
|
||||
while odrv0.axis1.current_state != AXIS_STATE_IDLE:
|
||||
time.sleep(0.1)
|
||||
print("OK")
|
||||
|
||||
odrv0.axis0.controller.config.input_mode=INPUT_MODE_TRAP_TRAJ
|
||||
odrv0.axis0.trap_traj.config.vel_limit=vel
|
||||
odrv0.axis0.trap_traj.config.accel_limit=accel
|
||||
odrv0.axis0.trap_traj.config.decel_limit=accel
|
||||
odrv0.axis0.controller.config.inertia=0
|
||||
|
||||
odrv0.axis1.controller.config.input_mode=INPUT_MODE_TRAP_TRAJ
|
||||
odrv0.axis1.trap_traj.config.vel_limit=vel
|
||||
odrv0.axis1.trap_traj.config.accel_limit=accel
|
||||
odrv0.axis1.trap_traj.config.decel_limit=accel
|
||||
odrv0.axis1.controller.config.inertia=0
|
||||
|
||||
odrv0.axis0.requested_state=AXIS_STATE_CLOSED_LOOP_CONTROL
|
||||
odrv0.axis1.requested_state=AXIS_STATE_CLOSED_LOOP_CONTROL
|
||||
|
||||
pos0=odrv0.axis0.encoder.pos_estimate
|
||||
pos1=odrv0.axis1.encoder.pos_estimate
|
||||
shift=1/5
|
||||
|
||||
while True:
|
||||
pos0_finale=pos0+shift
|
||||
pos1_finale=pos1+shift
|
||||
odrv0.axis0.controller.input_pos=pos0_finale
|
||||
odrv0.axis1.controller.input_pos=pos1_finale
|
||||
while abs(odrv0.axis0.encoder.pos_estimate-pos0_finale)>0.02 or \
|
||||
abs(odrv0.axis1.encoder.pos_estimate-pos1_finale)>0.02:
|
||||
time.sleep(0.1)
|
||||
shift=-shift
|
||||
55
Divers/odrive/test1.py
Normal file
55
Divers/odrive/test1.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import time
|
||||
import odrive
|
||||
from odrive.enums import *
|
||||
|
||||
accel=10.
|
||||
vel=3.
|
||||
calibration=False
|
||||
|
||||
odrv0=odrive.find_any(serial_number='205C3690424D')
|
||||
|
||||
if calibration:
|
||||
print("Calibration...", end='', flush=True)
|
||||
odrv0.axis0.requested_state=4
|
||||
odrv0.axis1.requested_state=4
|
||||
|
||||
while odrv0.axis0.current_state != AXIS_STATE_IDLE:
|
||||
time.sleep(0.1)
|
||||
while odrv0.axis1.current_state != AXIS_STATE_IDLE:
|
||||
time.sleep(0.1)
|
||||
|
||||
print("OK")
|
||||
|
||||
odrv0.axis0.controller.config.control_mode = CONTROL_MODE_VELOCITY_CONTROL
|
||||
odrv0.axis0.controller.config.input_mode = INPUT_MODE_VEL_RAMP
|
||||
odrv0.axis1.controller.config.control_mode = CONTROL_MODE_VELOCITY_CONTROL
|
||||
odrv0.axis1.controller.config.input_mode = INPUT_MODE_VEL_RAMP
|
||||
|
||||
odrv0.axis0.controller.config.vel_ramp_rate=accel
|
||||
odrv0.axis1.controller.config.vel_ramp_rate=accel
|
||||
|
||||
odrv0.axis0.requested_state=AXIS_STATE_CLOSED_LOOP_CONTROL
|
||||
odrv0.axis1.requested_state=AXIS_STATE_CLOSED_LOOP_CONTROL
|
||||
|
||||
odrv0.axis0.controller.input_vel=vel
|
||||
odrv0.axis1.controller.input_vel=0
|
||||
|
||||
evenement=time.time()
|
||||
id=0
|
||||
while True:
|
||||
if id==0:
|
||||
torque=odrv0.axis0.motor.current_control.Iq_setpoint*odrv0.axis0.motor.config.torque_constant
|
||||
else:
|
||||
torque=odrv0.axis1.motor.current_control.Iq_setpoint*odrv0.axis1.motor.config.torque_constant
|
||||
if abs(torque)>0.2 and (time.time()-evenement)>1:
|
||||
if id==0:
|
||||
odrv0.axis0.controller.input_vel=0
|
||||
odrv0.axis1.controller.input_vel=vel
|
||||
id=1
|
||||
else:
|
||||
odrv0.axis1.controller.input_vel=0
|
||||
odrv0.axis0.controller.input_vel=vel
|
||||
id=0
|
||||
evenement0=time.time()
|
||||
|
||||
|
||||
40
Divers/odrive/test2.py
Normal file
40
Divers/odrive/test2.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import time
|
||||
import odrive
|
||||
from odrive.enums import *
|
||||
|
||||
accel=20.
|
||||
vel=5.
|
||||
ratio=2
|
||||
calibration=False
|
||||
|
||||
odrv0=odrive.find_any(serial_number='205C3690424D')
|
||||
|
||||
if calibration:
|
||||
print("Calibration...", end='', flush=True)
|
||||
odrv0.axis0.requested_state=4
|
||||
odrv0.axis1.requested_state=4
|
||||
|
||||
while odrv0.axis0.current_state != AXIS_STATE_IDLE:
|
||||
time.sleep(0.1)
|
||||
while odrv0.axis1.current_state != AXIS_STATE_IDLE:
|
||||
time.sleep(0.1)
|
||||
|
||||
print("OK")
|
||||
|
||||
odrv0.axis0.requested_state=AXIS_STATE_CLOSED_LOOP_CONTROL
|
||||
odrv0.axis1.requested_state=AXIS_STATE_CLOSED_LOOP_CONTROL
|
||||
|
||||
odrv0.axis0.controller.config.input_mode = INPUT_MODE_TRAP_TRAJ
|
||||
odrv0.axis0.trap_traj.config.vel_limit=vel
|
||||
odrv0.axis0.trap_traj.config.accel_limit=accel
|
||||
odrv0.axis0.trap_traj.config.decel_limit=accel
|
||||
odrv0.axis0.controller.config.inertia=0
|
||||
|
||||
pos0=odrv0.axis0.encoder.pos_estimate
|
||||
pos1=odrv0.axis1.encoder.pos_estimate
|
||||
|
||||
odrv0.axis1.requested_state=AXIS_STATE_IDLE
|
||||
|
||||
while True:
|
||||
delta1=odrv0.axis1.encoder.pos_estimate-pos1
|
||||
odrv0.axis0.controller.input_pos=pos0+ratio*delta1
|
||||
5
Divers/renforcement1/README.md
Normal file
5
Divers/renforcement1/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Apprentissage par renforcement
|
||||
## Processus de décision markovien
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=Rgxs8lfoG4I
|
||||
35
Divers/renforcement1/q_valeur.py
Normal file
35
Divers/renforcement1/q_valeur.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
Q=[[0, 0], [0, 0], [0, 0]]
|
||||
|
||||
T=[[[0.50, 0.00, 0.50], [0.00, 0.00, 1.00]],
|
||||
[[0.70, 0.10, 0.20], [0.00, 0.95, 0.05]],
|
||||
[[0.40, 0.00, 0.60], [0.30, 0.30, 0.40]]]
|
||||
|
||||
R=[[[ 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00]],
|
||||
[[+5.00, 0.00, 0.00], [ 0.00, 0.00, 0.00]],
|
||||
[[ 0.00, 0.00, 0.00], [-1.00, 0.00, 0.00]]]
|
||||
|
||||
gamma=0.95
|
||||
|
||||
for i in range(200):
|
||||
time.sleep(0.05)
|
||||
tab_somme_action=[]
|
||||
for S in range(3):
|
||||
for A in range(2):
|
||||
somme=0
|
||||
for s in range(3):
|
||||
somme+=T[S][A][s]*(R[S][A][s]+gamma*np.max(Q[s]))
|
||||
Q[S][A]=somme
|
||||
|
||||
print("---------------------------------")
|
||||
print("Iteration:", i)
|
||||
for S in range(3):
|
||||
print()
|
||||
for A in range(2):
|
||||
text="Q[etat:{}, action:{}]={:+10.4f}".format(S, A, Q[S][A])
|
||||
if A==np.argmax(Q[S]):
|
||||
text=text+" <-"
|
||||
print(text)
|
||||
print("---------------------------------")
|
||||
12
Divers/renforcement2/CartPole_common.py
Normal file
12
Divers/renforcement2/CartPole_common.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import numpy as np
|
||||
|
||||
# Valeurs hautes et basses des observations
|
||||
low_values=np.array([-5, -5, -0.45, -5])
|
||||
high_values=np.array([5, 5, 0.45, 5])
|
||||
|
||||
division=[42, 42, 42, 42]
|
||||
pas=(high_values-low_values)/division
|
||||
|
||||
def discretise(state):
|
||||
discrete_state=(state-low_values)/pas
|
||||
return tuple(discrete_state.astype(np.int))
|
||||
25
Divers/renforcement2/CartPole_predict.py
Normal file
25
Divers/renforcement2/CartPole_predict.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import gym
|
||||
import numpy as np
|
||||
import CartPole_common
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=5000
|
||||
|
||||
q_table=np.load("CartPole_qtable.npy")
|
||||
|
||||
for epoch in range(1000):
|
||||
state = env.reset()
|
||||
score = 0
|
||||
while True:
|
||||
env.render()
|
||||
discrete_state=CartPole_common.discretise(state)
|
||||
action=np.argmax(q_table[discrete_state])
|
||||
#if not np.random.randint(5):
|
||||
# action=np.random.randint(2)
|
||||
state, reward, done, info=env.step(action)
|
||||
score+=reward
|
||||
if done:
|
||||
print('Essai {:05d} Score: {:04d}'.format(epoch, int(score)))
|
||||
break
|
||||
|
||||
env.close()
|
||||
82
Divers/renforcement2/CartPole_train.py
Normal file
82
Divers/renforcement2/CartPole_train.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import gym
|
||||
import numpy as np
|
||||
import cv2
|
||||
import CartPole_common
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=500
|
||||
|
||||
alpha=0.05
|
||||
gamma=0.98
|
||||
|
||||
epoch=50000
|
||||
show_every=500
|
||||
|
||||
epsilon=1.
|
||||
epsilon_min=0.05
|
||||
start_epsilon=1
|
||||
end_epsilon=epoch//2
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
nbr_action=env.action_space.n
|
||||
q_table=np.random.uniform(low=-1, high=1, size=(CartPole_common.division+[nbr_action]))
|
||||
|
||||
result_done=0
|
||||
scores=[]
|
||||
best_score=0
|
||||
for episode in range(epoch):
|
||||
obs=env.reset()
|
||||
discrete_state=CartPole_common.discretise(obs)
|
||||
done=False
|
||||
|
||||
if episode%show_every == 0:
|
||||
render=True
|
||||
mean_score=np.mean(scores)
|
||||
print("Epoch {:06d}/{:06d} reussite:{:04d}/{:04d} epsilon={:06.4f} Mean score={:08.4f} alpha={:06.4f}".format(episode, epoch, result_done, show_every, epsilon, mean_score, alpha))
|
||||
scores=[]
|
||||
result_done=0
|
||||
if mean_score>best_score:
|
||||
print("Sauvegarde ...")
|
||||
np.save("CartPole_qtable", q_table)
|
||||
best_score=mean_score
|
||||
alpha=alpha*0.99
|
||||
|
||||
else:
|
||||
render=False
|
||||
|
||||
score=1
|
||||
while not done:
|
||||
|
||||
if np.random.random()>epsilon:
|
||||
action=np.argmax(q_table[discrete_state])
|
||||
else:
|
||||
action=np.random.randint(nbr_action)
|
||||
|
||||
new_state, reward, done, info=env.step(action)
|
||||
new_discrete_state=CartPole_common.discretise(new_state)
|
||||
|
||||
if episode%show_every == 0:
|
||||
env.render()
|
||||
|
||||
#reward=2-np.abs(new_state[0])
|
||||
if done:
|
||||
scores.append(score)
|
||||
if score==env._max_episode_steps:
|
||||
result_done+=1
|
||||
else:
|
||||
reward=-10
|
||||
|
||||
max_future_q=np.max(q_table[new_discrete_state])
|
||||
current_q=q_table[discrete_state][action]
|
||||
new_q=(1-alpha)*current_q+alpha*(reward+gamma*max_future_q)
|
||||
q_table[discrete_state][action]=new_q
|
||||
|
||||
score+=1
|
||||
discrete_state=new_discrete_state
|
||||
|
||||
if end_epsilon>=episode>=start_epsilon:
|
||||
epsilon-=epsilon_decay_value
|
||||
if epsilon<epsilon_min:
|
||||
epsilon=epsilon_min
|
||||
|
||||
env.close()
|
||||
12
Divers/renforcement2/MountainCar_common.py
Normal file
12
Divers/renforcement2/MountainCar_common.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import numpy as np
|
||||
|
||||
# Valeurs hautes et basses des observations
|
||||
low_values =np.array([-1.2, -0.07])
|
||||
high_values=np.array([0.6, 0.07])
|
||||
|
||||
division=[42, 42]
|
||||
pas=(high_values-low_values)/division
|
||||
|
||||
def discretise(state):
|
||||
discrete_state=(state-low_values)/pas
|
||||
return tuple(discrete_state.astype(np.int))
|
||||
19
Divers/renforcement2/MountainCar_predict.py
Normal file
19
Divers/renforcement2/MountainCar_predict.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import gym
|
||||
import numpy as np
|
||||
import MountainCar_common
|
||||
|
||||
env=gym.make("MountainCar-v0")
|
||||
|
||||
q_table=np.load("MountainCar_qtable.npy")
|
||||
|
||||
for epoch in range(1000):
|
||||
state = env.reset()
|
||||
while True:
|
||||
env.render()
|
||||
discrete_state=MountainCar_common.discretise(state)
|
||||
action=np.argmax(q_table[discrete_state])
|
||||
state, reward, done, info=env.step(action)
|
||||
if done:
|
||||
print("Essai {:05d}: {}".format(epoch, "OK" if state[0]>=env.goal_position else "raté ..."))
|
||||
break
|
||||
env.close()
|
||||
68
Divers/renforcement2/MountainCar_train.py
Normal file
68
Divers/renforcement2/MountainCar_train.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import gym
|
||||
import numpy as np
|
||||
import MountainCar_common
|
||||
|
||||
env=gym.make("MountainCar-v0")
|
||||
|
||||
# Coefficient d'apprentissage
|
||||
alpha=0.1
|
||||
# Le "discount rate"
|
||||
gamma=0.98
|
||||
|
||||
epoch=25000
|
||||
show_every=500
|
||||
|
||||
# Politique exploration/exploitation
|
||||
epsilon=1.
|
||||
epsilon_min=0.1
|
||||
start_epsilon=1
|
||||
end_epsilon=epoch//2
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
nbr_action=env.action_space.n
|
||||
q_table=np.random.uniform(low=-1, high=1, size=(MountainCar_common.division+[nbr_action]))
|
||||
|
||||
OK=0
|
||||
for episode in range(epoch):
|
||||
obs=env.reset()
|
||||
discrete_state=MountainCar_common.discretise(obs)
|
||||
done=False
|
||||
|
||||
if episode%show_every == 0:
|
||||
render=True
|
||||
print("epoch {:06d}/{:06d} reussite:{:04d}/{:04d} epsilon={:08.6f}".format(episode, epoch, OK, show_every, epsilon))
|
||||
OK=0
|
||||
else:
|
||||
render=False
|
||||
|
||||
while not done:
|
||||
|
||||
if np.random.random()>epsilon:
|
||||
action=np.argmax(q_table[discrete_state])
|
||||
else:
|
||||
action=np.random.randint(nbr_action)
|
||||
|
||||
new_state, reward, done, info=env.step(action)
|
||||
new_discrete_state=MountainCar_common.discretise(new_state)
|
||||
if episode%show_every == 0:
|
||||
env.render()
|
||||
|
||||
if new_state[0]>=env.goal_position:
|
||||
reward=1
|
||||
OK+=1
|
||||
|
||||
# Mise à jour de Q(s, a) avec la formule de Bellman
|
||||
max_future_q=np.max(q_table[new_discrete_state])
|
||||
current_q=q_table[discrete_state][action]
|
||||
new_q=(1-alpha)*current_q+alpha*(reward+gamma*max_future_q)
|
||||
q_table[discrete_state][action]=new_q
|
||||
|
||||
discrete_state=new_discrete_state
|
||||
|
||||
if end_epsilon>=episode>=start_epsilon:
|
||||
epsilon-=epsilon_decay_value
|
||||
if epsilon<epsilon_min:
|
||||
epsilon=epsilon_min
|
||||
|
||||
np.save("MountainCar_qtable", q_table)
|
||||
env.close()
|
||||
6
Divers/renforcement2/README.md
Normal file
6
Divers/renforcement2/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Apprentissage par renforcement
|
||||
## Equation de Bellman
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=4Ak6OyehqJc
|
||||
|
||||
7
Divers/renforcement3/README.md
Normal file
7
Divers/renforcement3/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Apprentissage par renforcement
|
||||
## Q learning "basique" avec un perceptron
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=03U-3BOqfMs
|
||||
|
||||
|
||||
25
Divers/renforcement3/graph.py
Normal file
25
Divers/renforcement3/graph.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plot
|
||||
import sys
|
||||
|
||||
fenetre=50
|
||||
max_score=500
|
||||
|
||||
if len(sys.argv)!=2:
|
||||
print("Usage:", sys.argv[0], "<fichier npy>")
|
||||
quit()
|
||||
|
||||
tab_s=np.load(sys.argv[1])
|
||||
|
||||
tab_m=[]
|
||||
for i in range(len(tab_s)-fenetre):
|
||||
m=np.mean(tab_s[i:i+fenetre])
|
||||
tab_m.append(m)
|
||||
|
||||
fig=plot.gcf()
|
||||
fig.set_size_inches(12, 6)
|
||||
plot.plot(tab_s)
|
||||
plot.grid()
|
||||
plot.ylim(0, max_score)
|
||||
plot.plot(np.arange(fenetre, len(tab_s)), tab_m, color='#FF0000')
|
||||
plot.show()
|
||||
21
Divers/renforcement3/predict.py
Normal file
21
Divers/renforcement3/predict.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=500
|
||||
|
||||
model=tf.keras.models.load_model("my_model")
|
||||
|
||||
while True:
|
||||
observations=env.reset()
|
||||
score=0
|
||||
while True:
|
||||
env.render()
|
||||
valeurs_q=model(np.expand_dims(observations, axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
observations, reward, done, info=env.step(action)
|
||||
if done:
|
||||
print("SCORE", score)
|
||||
break
|
||||
score+=1
|
||||
108
Divers/renforcement3/train.py
Normal file
108
Divers/renforcement3/train.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
|
||||
env = gym.make("CartPole-v0")
|
||||
env._max_episode_steps=500
|
||||
nbr_action=2
|
||||
|
||||
gamma=tf.constant(0.98)
|
||||
epoch=20000
|
||||
best_score=0
|
||||
|
||||
epsilon=1.
|
||||
epsilon_min=0.10
|
||||
start_epsilon=1
|
||||
end_epsilon=epoch//2
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
def model():
|
||||
entree=layers.Input(shape=(4), dtype='float32')
|
||||
result=layers.Dense(30, activation='relu')(entree)
|
||||
result=layers.Dense(30, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_action)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def my_loss(target_q, predicted_q):
|
||||
loss=tf.reduce_mean(tf.math.square(target_q-predicted_q))
|
||||
return loss
|
||||
|
||||
@tf.function
|
||||
def train_step(reward, action, observation, next_observation, done):
|
||||
next_Q_values=model(next_observation)
|
||||
best_next_actions=tf.math.argmax(next_Q_values, axis=1)
|
||||
next_mask=tf.one_hot(best_next_actions, nbr_action)
|
||||
next_best_Q_values=tf.reduce_sum(next_Q_values*next_mask, axis=1)
|
||||
target_Q_values=reward+(1-done)*gamma*next_best_Q_values
|
||||
target_Q_values=tf.reshape(target_Q_values, (-1, 1))
|
||||
mask=tf.one_hot(action, nbr_action)
|
||||
with tf.GradientTape() as tape:
|
||||
all_Q_values=model(observation)
|
||||
Q_values=tf.reduce_sum(all_Q_values*mask, axis=1, keepdims=True)
|
||||
loss=my_loss(target_Q_values, Q_values)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(debug=False):
|
||||
global epsilon, best_score, tab_score
|
||||
for e in range(epoch):
|
||||
print("EPOCH:", e, "epsilon", epsilon)
|
||||
score=0
|
||||
tab_observations=[]
|
||||
tab_rewards=[]
|
||||
tab_actions=[]
|
||||
tab_next_observations=[]
|
||||
tab_done=[]
|
||||
|
||||
observations=env.reset()
|
||||
while True:
|
||||
tab_observations.append(observations)
|
||||
if np.random.random()>epsilon:
|
||||
valeurs_q=model(np.expand_dims(observations, axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
else:
|
||||
action=np.random.randint(0, nbr_action)
|
||||
observations, reward, done, info=env.step(action)
|
||||
tab_actions.append(action)
|
||||
tab_next_observations.append(observations)
|
||||
tab_done.append(done)
|
||||
if done:
|
||||
tab_rewards.append(-10.)
|
||||
print("FIN, score:", score)
|
||||
tab_score.append(score)
|
||||
score=0
|
||||
break
|
||||
score+=1
|
||||
tab_rewards.append(reward)
|
||||
|
||||
tab_rewards=np.array(tab_rewards, dtype=np.float32)
|
||||
tab_actions=np.array(tab_actions, dtype=np.int32)
|
||||
tab_observations=np.array(tab_observations, dtype=np.float32)
|
||||
tab_next_observations=np.array(tab_next_observations, dtype=np.float32)
|
||||
tab_done=np.array(tab_done, dtype=np.float32)
|
||||
train_step(tab_rewards, tab_actions, tab_observations, tab_next_observations, tab_done)
|
||||
train_loss.reset_states()
|
||||
|
||||
epsilon-=epsilon_decay_value
|
||||
epsilon=max(epsilon, epsilon_min)
|
||||
if np.mean(tab_score[-20:])>best_score:
|
||||
print("Sauvegarde du modele")
|
||||
model.save("my_model")
|
||||
best_score=np.mean(tab_score[-20:])
|
||||
if best_score==env._max_episode_steps-1:
|
||||
return
|
||||
|
||||
model=model()
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
tab_s=[]
|
||||
|
||||
tab_score=[]
|
||||
train()
|
||||
|
||||
np.save("tab_score", tab_score)
|
||||
|
||||
108
Divers/renforcement3/train_better.py
Normal file
108
Divers/renforcement3/train_better.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=200
|
||||
nbr_action=2
|
||||
|
||||
fichier_log=open("log_critic.csv", "a")
|
||||
|
||||
gamma=0.98
|
||||
max_episode=600
|
||||
epsilon=1.
|
||||
epsilon_min=0.10
|
||||
start_epsilon=10
|
||||
end_epsilon=max_episode
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
def model():
|
||||
entree=layers.Input(shape=(4), dtype='float32')
|
||||
result=layers.Dense(32, activation='relu')(entree)
|
||||
result=layers.Dense(32, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_action)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def my_loss(target_q, predicted_q):
|
||||
loss=tf.reduce_mean(tf.math.square(target_q-predicted_q))
|
||||
return loss
|
||||
|
||||
@tf.function
|
||||
def train_step(reward, action, observation, next_observation, done):
|
||||
next_Q_values=model(next_observation)
|
||||
best_next_actions=tf.math.argmax(next_Q_values, axis=1)
|
||||
next_mask=tf.one_hot(best_next_actions, nbr_action)
|
||||
next_best_Q_values=tf.reduce_sum(next_Q_values*next_mask, axis=1)
|
||||
target_Q_values=reward+(1-done)*gamma*next_best_Q_values
|
||||
target_Q_values=tf.reshape(target_Q_values, (-1, 1))
|
||||
mask=tf.one_hot(action, nbr_action)
|
||||
with tf.GradientTape() as tape:
|
||||
all_Q_values=model(observation)
|
||||
Q_values=tf.reduce_sum(all_Q_values*mask, axis=1, keepdims=True)
|
||||
loss=my_loss(target_Q_values, Q_values)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(debug=False):
|
||||
global epsilon
|
||||
m_reward=0
|
||||
for episode in range(max_episode):
|
||||
score=0
|
||||
tab_observations=[]
|
||||
tab_rewards=[]
|
||||
tab_actions=[]
|
||||
tab_next_observations=[]
|
||||
tab_done=[]
|
||||
|
||||
observations=env.reset()
|
||||
score=0
|
||||
while True:
|
||||
tab_observations.append(observations)
|
||||
if np.random.random()>epsilon:
|
||||
valeurs_q=model(np.expand_dims(observations, axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
else:
|
||||
action=np.random.randint(0, nbr_action)
|
||||
observations, reward, done, info=env.step(action)
|
||||
score+=reward
|
||||
tab_actions.append(action)
|
||||
tab_next_observations.append(observations)
|
||||
tab_done.append(done)
|
||||
if done:
|
||||
tab_rewards.append(-10.)
|
||||
break
|
||||
tab_rewards.append(reward)
|
||||
|
||||
tab_rewards=np.array(tab_rewards, dtype=np.float32)
|
||||
tab_actions=np.array(tab_actions, dtype=np.int32)
|
||||
tab_observations=np.array(tab_observations, dtype=np.float32)
|
||||
tab_next_observations=np.array(tab_next_observations, dtype=np.float32)
|
||||
tab_done=np.array(tab_done, dtype=np.float32)
|
||||
train_step(tab_rewards, tab_actions, tab_observations, tab_next_observations, tab_done)
|
||||
train_loss.reset_states()
|
||||
|
||||
epsilon-=epsilon_decay_value
|
||||
epsilon=max(epsilon, epsilon_min)
|
||||
|
||||
m_reward=0.05*score+(1-0.05)*m_reward
|
||||
message="Episode {:04d} score:{:6.1f} moyenne lissée: {:6.1f} (epsilon={:5.3f})"
|
||||
print(message.format(episode, score, m_reward, epsilon))
|
||||
|
||||
fichier_log.write("{:f}:{:f}\n".format(score, m_reward))
|
||||
|
||||
if m_reward>env._max_episode_steps-10:
|
||||
print("Fin de l'apprentissage".format(episode))
|
||||
break
|
||||
|
||||
model=model()
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-2)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
tab_s=[]
|
||||
|
||||
train()
|
||||
|
||||
fichier_log.close()
|
||||
7
Divers/renforcement4/README.md
Normal file
7
Divers/renforcement4/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Apprentissage par renforcement
|
||||
## Pacman en mode target !
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=F-u9AOMt7zo
|
||||
|
||||

|
||||
BIN
Divers/renforcement4/img.png
Normal file
BIN
Divers/renforcement4/img.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 114 KiB |
72
Divers/renforcement4/joue.py
Normal file
72
Divers/renforcement4/joue.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import gym
|
||||
import cv2
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import time
|
||||
import matplotlib.pyplot as plot
|
||||
import os
|
||||
|
||||
env=gym.make("MsPacman-v0")
|
||||
|
||||
#model=tf.keras.models.load_model('my_model_v1')
|
||||
model=tf.keras.models.load_model('my_model_target')
|
||||
|
||||
decalage_debut=90
|
||||
taille_sequence=6
|
||||
|
||||
def transform_img(image):
|
||||
result=np.expand_dims(image[:170, :, 0], axis=-1)
|
||||
return result
|
||||
|
||||
def joue():
|
||||
|
||||
######
|
||||
observations=env.reset()
|
||||
vie=3
|
||||
for i in range(decalage_debut-taille_sequence):
|
||||
env.step(0)
|
||||
tab_sequence=[]
|
||||
for i in range(taille_sequence):
|
||||
observation, reward, done, info=env.step(0)
|
||||
img=transform_img(observation)
|
||||
tab_sequence.append(img)
|
||||
tab_sequence=np.array(tab_sequence, dtype=np.float32)
|
||||
######
|
||||
|
||||
tab_img=[]
|
||||
score=0
|
||||
vie=3
|
||||
while True:
|
||||
valeurs_q=model(np.expand_dims(np.concatenate(tab_sequence, axis=-1), axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
print(action+1, end=' ')
|
||||
score+=min(reward, 10.)
|
||||
if info['ale.lives']<vie:
|
||||
print("XXX", end=" ")
|
||||
vie-=1
|
||||
if done:
|
||||
print("\nSCORE:", score)
|
||||
return tab_img, score
|
||||
|
||||
observation, reward, done, info=env.step(action+1)
|
||||
if reward>10:
|
||||
print("MIAM", reward, end=" ")
|
||||
|
||||
img=transform_img(observation)
|
||||
tab_sequence[:-1]=tab_sequence[1:]
|
||||
tab_sequence[taille_sequence-1]=img
|
||||
tab_img.append(observation)
|
||||
|
||||
score=0
|
||||
while score<1400:
|
||||
start_time=time.time()
|
||||
tab_img, score=joue()
|
||||
print(time.time()-start_time)
|
||||
|
||||
for i in range(len(tab_img)):
|
||||
cv2.imshow("Pacman", tab_img[i])
|
||||
key=cv2.waitKey(20)
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
180
Divers/renforcement4/train_target.py
Normal file
180
Divers/renforcement4/train_target.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import gym
|
||||
import cv2
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import time
|
||||
import matplotlib.pyplot as plot
|
||||
|
||||
env = gym.make("MsPacman-v0")
|
||||
print("Liste des actions", env.unwrapped.get_action_meanings())
|
||||
nbr_action=tf.constant(4)
|
||||
|
||||
file_model='my_model_target'
|
||||
file_stats='tab_score_target'
|
||||
|
||||
gamma=tf.constant(0.999)
|
||||
epoch=200
|
||||
decalage_debut=90
|
||||
taille_sequence=6
|
||||
nbr_jeu=300
|
||||
pourcentage_batch=0.20
|
||||
best_score=0
|
||||
|
||||
epsilon=1.
|
||||
epsilon_min=0.10
|
||||
start_epsilon=1
|
||||
end_epsilon=epoch//4
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
def model(nbr_cc=8):
|
||||
entree=layers.Input(shape=(170, 160, taille_sequence), dtype='float32')
|
||||
result=layers.Conv2D( nbr_cc, 3, activation='relu', padding='same', strides=2)((entree/128)-1)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.Conv2D(8*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
|
||||
result=layers.Dense(512, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_action)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def transform_img(image):
|
||||
result=np.expand_dims(image[:170, :, 0], axis=-1)
|
||||
return result
|
||||
|
||||
def simulation(epsilon, debug=False):
|
||||
if debug:
|
||||
start_time=time.time()
|
||||
|
||||
tab_observations=[]
|
||||
tab_rewards=[]
|
||||
tab_actions=[]
|
||||
tab_next_observations=[]
|
||||
tab_done=[]
|
||||
|
||||
######
|
||||
observations=env.reset()
|
||||
vie=3
|
||||
for i in range(decalage_debut-taille_sequence):
|
||||
env.step(0)
|
||||
tab_sequence=[]
|
||||
for i in range(taille_sequence):
|
||||
observation, reward, done, info=env.step(0)
|
||||
img=transform_img(observation)
|
||||
tab_sequence.append(img)
|
||||
tab_sequence=np.array(tab_sequence, dtype=np.float32)
|
||||
######
|
||||
|
||||
score=0
|
||||
while True:
|
||||
if np.random.random()>epsilon:
|
||||
valeurs_q=model_primaire(np.expand_dims(np.concatenate(tab_sequence, axis=-1), axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
else:
|
||||
action=np.random.randint(0, nbr_action)
|
||||
|
||||
h=np.random.randint(10)
|
||||
if h==0:
|
||||
tab_observations.append(np.concatenate(tab_sequence, axis=-1))
|
||||
tab_actions.append(action)
|
||||
score+=reward
|
||||
if info['ale.lives']<vie:
|
||||
reward=-50.
|
||||
vie=info['ale.lives']
|
||||
if h==0:
|
||||
tab_done.append(True)
|
||||
else:
|
||||
if h==0:
|
||||
tab_done.append(done)
|
||||
if h==0:
|
||||
tab_rewards.append(reward)
|
||||
if done:
|
||||
tab_s.append(score)
|
||||
if h==0:
|
||||
tab_sequence[:-1]=tab_sequence[1:]
|
||||
tab_sequence[taille_sequence-1]=img
|
||||
tab_next_observations.append(np.concatenate(tab_sequence, axis=-1))
|
||||
tab_done=np.array(tab_done, dtype=np.float32)
|
||||
tab_observations=np.array(tab_observations, dtype=np.float32)
|
||||
tab_next_observations=np.array(tab_next_observations, dtype=np.float32)
|
||||
tab_rewards=np.array(tab_rewards, dtype=np.float32)
|
||||
tab_rewards[tab_rewards==0]=-1.
|
||||
tab_rewards[tab_rewards>10]=10.
|
||||
tab_actions=np.array(tab_actions, dtype=np.int32)
|
||||
if debug:
|
||||
print(" Creation observations {:5.3f} seconde(s)".format(float(time.time()-start_time)))
|
||||
print(" score:{:5d} batch:{:4d}".format(int(score), len(tab_done)))
|
||||
return tab_observations,\
|
||||
tab_rewards,\
|
||||
tab_actions,\
|
||||
tab_next_observations,\
|
||||
tab_done
|
||||
observation, reward, done, info=env.step(action+1)
|
||||
img=transform_img(observation)
|
||||
tab_sequence[:-1]=tab_sequence[1:]
|
||||
tab_sequence[taille_sequence-1]=img
|
||||
if h==0:
|
||||
tab_next_observations.append(np.concatenate(tab_sequence, axis=-1))
|
||||
|
||||
def my_loss(y, q):
|
||||
loss=tf.reduce_mean(tf.math.square(y-q))
|
||||
return loss
|
||||
|
||||
@tf.function
|
||||
def train_step(reward, action, observation, next_observation, done):
|
||||
next_Q_values=model_cible(next_observation)
|
||||
best_next_actions=tf.math.argmax(next_Q_values, axis=1)
|
||||
next_mask=tf.one_hot(best_next_actions, nbr_action)
|
||||
next_best_Q_values=tf.reduce_sum(next_Q_values*next_mask, axis=1)
|
||||
target_Q_values=reward+(1-done)*gamma*next_best_Q_values
|
||||
target_Q_values=tf.reshape(target_Q_values, (-1, 1))
|
||||
mask=tf.one_hot(action, nbr_action)
|
||||
with tf.GradientTape() as tape:
|
||||
all_Q_values=model_primaire(observation)
|
||||
Q_values=tf.reduce_sum(all_Q_values*mask, axis=1, keepdims=True)
|
||||
loss=my_loss(target_Q_values, Q_values)
|
||||
gradients=tape.gradient(loss, model_primaire.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model_primaire.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(debug=False):
|
||||
global epsilon, best_score
|
||||
for e in range(epoch):
|
||||
for i in range(nbr_jeu):
|
||||
print("Epoch {:04d}/{:05d} epsilon={:05.3f}".format(i, e, epsilon))
|
||||
tab_observations, tab_rewards, tab_actions, tab_next_observations, tab_done=simulation(epsilon, debug=True)
|
||||
if debug:
|
||||
start_time=time.time()
|
||||
train_step(tab_rewards, tab_actions, tab_observations, tab_next_observations, tab_done)
|
||||
if debug:
|
||||
print(" Entrainement {:5.3f} seconde(s)".format(float(time.time()-start_time)))
|
||||
print(" loss: {:6.4f}".format(train_loss.result()))
|
||||
train_loss.reset_states()
|
||||
|
||||
print("Copie des poids primaire -> cible")
|
||||
for a, b in zip(model_cible.variables, model_primaire.variables):
|
||||
a.assign(b)
|
||||
|
||||
epsilon-=epsilon_decay_value
|
||||
epsilon=max(epsilon, epsilon_min)
|
||||
np.save(file_stats, tab_s)
|
||||
if np.mean(tab_s[-200:])>best_score:
|
||||
print("Sauvegarde du modele")
|
||||
model_cible.save(file_model)
|
||||
best_score=np.mean(tab_s[-200:])
|
||||
|
||||
model_primaire=model(16)
|
||||
model_cible=tf.keras.models.clone_model(model_primaire)
|
||||
for a, b in zip(model_cible.variables, model_primaire.variables):
|
||||
a.assign(b)
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
tab_s=[]
|
||||
train(debug=True)
|
||||
173
Divers/renforcement4/train_v1.py
Normal file
173
Divers/renforcement4/train_v1.py
Normal file
@@ -0,0 +1,173 @@
|
||||
import gym
|
||||
import cv2
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import time
|
||||
import matplotlib.pyplot as plot
|
||||
|
||||
env = gym.make("MsPacman-v0")
|
||||
print("Liste des actions", env.unwrapped.get_action_meanings())
|
||||
nbr_action=tf.constant(4)
|
||||
|
||||
file_model='my_model_v1'
|
||||
file_stats='tab_score_v1'
|
||||
|
||||
gamma=tf.constant(0.999)
|
||||
epoch=1500
|
||||
decalage_debut=90
|
||||
taille_sequence=6
|
||||
nbr_jeu=40
|
||||
pourcentage_batch=0.20
|
||||
best_score=0
|
||||
|
||||
epsilon=1.
|
||||
epsilon_min=0.10
|
||||
start_epsilon=1
|
||||
end_epsilon=epoch//4
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
def model(nbr_cc=8):
|
||||
entree=layers.Input(shape=(170, 160, taille_sequence), dtype='float32')
|
||||
result=layers.Conv2D( nbr_cc, 3, activation='relu', padding='same', strides=2)((entree/128)-1)
|
||||
result=layers.Conv2D(2*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
result=layers.Conv2D(4*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.Conv2D(8*nbr_cc, 3, activation='relu', padding='same', strides=2)(result)
|
||||
result=layers.BatchNormalization()(result)
|
||||
|
||||
result=layers.Flatten()(result)
|
||||
|
||||
result=layers.Dense(512, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_action)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def transform_img(image):
|
||||
result=np.expand_dims(image[:170, :, 0], axis=-1)
|
||||
return result
|
||||
|
||||
def simulation(epsilon, debug=False):
|
||||
if debug:
|
||||
start_time=time.time()
|
||||
|
||||
tab_observations=[]
|
||||
tab_rewards=[]
|
||||
tab_actions=[]
|
||||
tab_next_observations=[]
|
||||
tab_done=[]
|
||||
|
||||
######
|
||||
observations=env.reset()
|
||||
vie=3
|
||||
for i in range(decalage_debut-taille_sequence):
|
||||
env.step(0)
|
||||
tab_sequence=[]
|
||||
for i in range(taille_sequence):
|
||||
observation, reward, done, info=env.step(0)
|
||||
img=transform_img(observation)
|
||||
tab_sequence.append(img)
|
||||
tab_sequence=np.array(tab_sequence, dtype=np.float32)
|
||||
######
|
||||
|
||||
score=0
|
||||
while True:
|
||||
if np.random.random()>epsilon:
|
||||
valeurs_q=model(np.expand_dims(np.concatenate(tab_sequence, axis=-1), axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
else:
|
||||
action=np.random.randint(0, nbr_action)
|
||||
|
||||
h=np.random.randint(10)
|
||||
if h==0:
|
||||
tab_observations.append(np.concatenate(tab_sequence, axis=-1))
|
||||
tab_actions.append(action)
|
||||
score+=reward
|
||||
if info['ale.lives']<vie:
|
||||
reward=-50.
|
||||
vie=info['ale.lives']
|
||||
if h==0:
|
||||
tab_done.append(True)
|
||||
else:
|
||||
if h==0:
|
||||
tab_done.append(done)
|
||||
if h==0:
|
||||
tab_rewards.append(reward)
|
||||
if done:
|
||||
tab_s.append(score)
|
||||
if h==0:
|
||||
tab_sequence[:-1]=tab_sequence[1:]
|
||||
tab_sequence[taille_sequence-1]=img
|
||||
tab_next_observations.append(np.concatenate(tab_sequence, axis=-1))
|
||||
tab_done=np.array(tab_done, dtype=np.float32)
|
||||
tab_observations=np.array(tab_observations, dtype=np.float32)
|
||||
tab_next_observations=np.array(tab_next_observations, dtype=np.float32)
|
||||
tab_rewards=np.array(tab_rewards, dtype=np.float32)
|
||||
tab_rewards[tab_rewards==0]=-1.
|
||||
tab_rewards[tab_rewards>10]=10.
|
||||
tab_actions=np.array(tab_actions, dtype=np.int32)
|
||||
if debug:
|
||||
print(" Creation observations {:5.3f} seconde(s)".format(float(time.time()-start_time)))
|
||||
print(" score:{:5d} batch:{:4d}".format(int(score), len(tab_done)))
|
||||
return tab_observations,\
|
||||
tab_rewards,\
|
||||
tab_actions,\
|
||||
tab_next_observations,\
|
||||
tab_done
|
||||
observation, reward, done, info=env.step(action+1)
|
||||
img=transform_img(observation)
|
||||
tab_sequence[:-1]=tab_sequence[1:]
|
||||
tab_sequence[taille_sequence-1]=img
|
||||
if h==0:
|
||||
tab_next_observations.append(np.concatenate(tab_sequence, axis=-1))
|
||||
|
||||
def my_loss(y, q):
|
||||
loss=tf.reduce_mean(tf.math.square(y-q))
|
||||
return loss
|
||||
|
||||
@tf.function
|
||||
def train_step(reward, action, observation, next_observation, done):
|
||||
next_Q_values=model(next_observation)
|
||||
best_next_actions=tf.math.argmax(next_Q_values, axis=1)
|
||||
next_mask=tf.one_hot(best_next_actions, nbr_action)
|
||||
next_best_Q_values=tf.reduce_sum(next_Q_values*next_mask, axis=1)
|
||||
target_Q_values=reward+(1-done)*gamma*next_best_Q_values
|
||||
target_Q_values=tf.reshape(target_Q_values, (-1, 1))
|
||||
mask=tf.one_hot(action, nbr_action)
|
||||
with tf.GradientTape() as tape:
|
||||
all_Q_values=model(observation)
|
||||
Q_values=tf.reduce_sum(all_Q_values*mask, axis=1, keepdims=True)
|
||||
loss=my_loss(target_Q_values, Q_values)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(debug=False):
|
||||
global epsilon, best_score
|
||||
for e in range(epoch):
|
||||
for i in range(nbr_jeu):
|
||||
print("Epoch {:04d}/{:05d} epsilon={:05.3f}".format(i, e, epsilon))
|
||||
tab_observations, tab_rewards, tab_actions, tab_next_observations, tab_done=simulation(epsilon, debug=True)
|
||||
if debug:
|
||||
start_time=time.time()
|
||||
train_step(tab_rewards, tab_actions, tab_observations, tab_next_observations, tab_done)
|
||||
if debug:
|
||||
print(" Entrainement {:5.3f} seconde(s)".format(float(time.time()-start_time)))
|
||||
print(" loss: {:6.4f}".format(train_loss.result()))
|
||||
train_loss.reset_states()
|
||||
|
||||
epsilon-=epsilon_decay_value
|
||||
epsilon=max(epsilon, epsilon_min)
|
||||
np.save(file_stats, tab_s)
|
||||
if np.mean(tab_s[-200:])>best_score:
|
||||
print("Sauvegarde du modele")
|
||||
model.save(file_model)
|
||||
best_score=np.mean(tab_s[-200:])
|
||||
|
||||
model=model(16)
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-4)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
tab_s=[]
|
||||
train(debug=True)
|
||||
11
Divers/renforcement5/README.md
Normal file
11
Divers/renforcement5/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Apprentissage par renforcement
|
||||
## Méthode 'acteur'
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=LtRAgxRb5eQ
|
||||
|
||||
Ci dessous, le graph de l'apprentissage sur l'environnement CartPole (https://gym.openai.com/envs/CartPole-v0/)<br>
|
||||
En bleu: Méthode 'critique'<br>
|
||||
En orange: Méthode 'acteur'<br>
|
||||
|
||||

|
||||
83
Divers/renforcement5/cartpole_actor.py
Normal file
83
Divers/renforcement5/cartpole_actor.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=200
|
||||
nbr_actions=2
|
||||
gamma=0.99
|
||||
max_episode=600
|
||||
|
||||
prefix_log_file="log_actor"
|
||||
id_file=0
|
||||
while os.path.exists(prefix_log_file+str(id_file)+".csv"):
|
||||
id_file+=1
|
||||
fichier_log=open(prefix_log_file+str(id_file)+".csv", "w")
|
||||
print("Création du fichier de log", prefix_log_file+str(id_file)+".csv")
|
||||
|
||||
def model(nbr_inputs, nbr_hidden, nbr_actions):
|
||||
entree=layers.Input(shape=(nbr_inputs), dtype='float32')
|
||||
result=layers.Dense(32, activation='relu')(entree)
|
||||
result=layers.Dense(32, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_actions, activation='softmax')(result)
|
||||
|
||||
my_model=models.Model(inputs=entree, outputs=sortie)
|
||||
return my_model
|
||||
|
||||
def calcul_discount_rate(rewards_history, gamma, normalize=False):
|
||||
result=[]
|
||||
discounted_sum=0
|
||||
for r in rewards_history[::-1]:
|
||||
discounted_sum=r+gamma*discounted_sum
|
||||
result.insert(0, discounted_sum)
|
||||
|
||||
# Normalisation
|
||||
if normalize is True:
|
||||
result=np.array(result)
|
||||
result=(result-np.mean(result))/(np.std(result)+1E-7)
|
||||
result=list(result)
|
||||
|
||||
return result
|
||||
|
||||
def train():
|
||||
m_reward=0
|
||||
for episode in range(max_episode):
|
||||
tab_rewards=[]
|
||||
tab_prob_actions=[]
|
||||
|
||||
observations=env.reset()
|
||||
with tf.GradientTape() as tape:
|
||||
while True:
|
||||
action_probs=my_model(np.expand_dims(observations, axis=0))
|
||||
action=np.random.choice(nbr_actions, p=np.squeeze(action_probs))
|
||||
tab_prob_actions.append(action_probs[0, action])
|
||||
observations, reward, done, info=env.step(action)
|
||||
tab_rewards.append(reward)
|
||||
if done:
|
||||
break
|
||||
|
||||
discount_rate=calcul_discount_rate(tab_rewards, gamma, normalize=True)
|
||||
|
||||
loss=-tf.math.log(tab_prob_actions)*discount_rate
|
||||
gradients=tape.gradient(loss, my_model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, my_model.trainable_variables))
|
||||
|
||||
score=sum(tab_rewards)
|
||||
m_reward=0.05*score+(1-0.05)*m_reward
|
||||
message="Episode {:04d} score:{:6.1f} MPE: {:6.1f}"
|
||||
print(message.format(episode, score, m_reward))
|
||||
|
||||
fichier_log.write("{:f}:{:f}\n".format(score, m_reward))
|
||||
|
||||
if m_reward>env._max_episode_steps-10:
|
||||
print("Fin de l'apprentissage".format(episode))
|
||||
break
|
||||
|
||||
my_model=model(4, 32, nbr_actions)
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-2)
|
||||
|
||||
train()
|
||||
|
||||
fichier_log.close()
|
||||
114
Divers/renforcement5/cartpole_critic.py
Normal file
114
Divers/renforcement5/cartpole_critic.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=200
|
||||
nbr_action=2
|
||||
|
||||
prefix_log_file="log_critic_"
|
||||
id_file=0
|
||||
while os.path.exists(prefix_log_file+str(id_file)+".csv"):
|
||||
id_file+=1
|
||||
fichier_log=open(prefix_log_file+str(id_file)+".csv", "w")
|
||||
print("Création du fichier de log", prefix_log_file+str(id_file)+".csv")
|
||||
|
||||
gamma=0.98
|
||||
max_episode=600
|
||||
epsilon=1.
|
||||
epsilon_min=0.10
|
||||
start_epsilon=10
|
||||
end_epsilon=max_episode
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
def model():
|
||||
entree=layers.Input(shape=(4), dtype='float32')
|
||||
result=layers.Dense(32, activation='relu')(entree)
|
||||
result=layers.Dense(32, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_action)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def my_loss(target_q, predicted_q):
|
||||
loss=tf.reduce_mean(tf.math.square(target_q-predicted_q))
|
||||
return loss
|
||||
|
||||
@tf.function
|
||||
def train_step(reward, action, observation, next_observation, done):
|
||||
next_Q_values=model(next_observation)
|
||||
best_next_actions=tf.math.argmax(next_Q_values, axis=1)
|
||||
next_mask=tf.one_hot(best_next_actions, nbr_action)
|
||||
next_best_Q_values=tf.reduce_sum(next_Q_values*next_mask, axis=1)
|
||||
target_Q_values=reward+(1-done)*gamma*next_best_Q_values
|
||||
target_Q_values=tf.reshape(target_Q_values, (-1, 1))
|
||||
mask=tf.one_hot(action, nbr_action)
|
||||
with tf.GradientTape() as tape:
|
||||
all_Q_values=model(observation)
|
||||
Q_values=tf.reduce_sum(all_Q_values*mask, axis=1, keepdims=True)
|
||||
loss=my_loss(target_Q_values, Q_values)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(debug=False):
|
||||
global epsilon
|
||||
m_reward=0
|
||||
for episode in range(max_episode):
|
||||
score=0
|
||||
tab_observations=[]
|
||||
tab_rewards=[]
|
||||
tab_actions=[]
|
||||
tab_next_observations=[]
|
||||
tab_done=[]
|
||||
|
||||
observations=env.reset()
|
||||
score=0
|
||||
while True:
|
||||
tab_observations.append(observations)
|
||||
if np.random.random()>epsilon:
|
||||
valeurs_q=model(np.expand_dims(observations, axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
else:
|
||||
action=np.random.randint(0, nbr_action)
|
||||
observations, reward, done, info=env.step(action)
|
||||
score+=reward
|
||||
tab_actions.append(action)
|
||||
tab_next_observations.append(observations)
|
||||
tab_done.append(done)
|
||||
if done:
|
||||
tab_rewards.append(-10.)
|
||||
break
|
||||
tab_rewards.append(reward)
|
||||
|
||||
tab_rewards=np.array(tab_rewards, dtype=np.float32)
|
||||
tab_actions=np.array(tab_actions, dtype=np.int32)
|
||||
tab_observations=np.array(tab_observations, dtype=np.float32)
|
||||
tab_next_observations=np.array(tab_next_observations, dtype=np.float32)
|
||||
tab_done=np.array(tab_done, dtype=np.float32)
|
||||
train_step(tab_rewards, tab_actions, tab_observations, tab_next_observations, tab_done)
|
||||
train_loss.reset_states()
|
||||
|
||||
epsilon-=epsilon_decay_value
|
||||
epsilon=max(epsilon, epsilon_min)
|
||||
|
||||
m_reward=0.05*score+(1-0.05)*m_reward
|
||||
message="Episode {:04d} score:{:6.1f} MPE: {:6.1f} (epsilon={:5.3f})"
|
||||
print(message.format(episode, score, m_reward, epsilon))
|
||||
|
||||
fichier_log.write("{:f}:{:f}\n".format(score, m_reward))
|
||||
|
||||
if m_reward>env._max_episode_steps-10:
|
||||
print("Fin de l'apprentissage".format(episode))
|
||||
break
|
||||
|
||||
model=model()
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-2)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
tab_s=[]
|
||||
|
||||
train()
|
||||
|
||||
fichier_log.close()
|
||||
BIN
Divers/renforcement5/mini.png
Normal file
BIN
Divers/renforcement5/mini.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 98 KiB |
12
Divers/renforcement6/README.md
Normal file
12
Divers/renforcement6/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Apprentissage par renforcement
|
||||
## Méthode 'acteur/critique'
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=1okjkEMP79c
|
||||
|
||||
Ci dessous, le graph de l'apprentissage sur l'environnement CartPole (https://gym.openai.com/envs/CartPole-v0/)<br>
|
||||
En bleu: Méthode 'critique'<br>
|
||||
En orange: Méthode 'acteur'<br>
|
||||
En vert: Méthode 'acteur/critique'<br>
|
||||
|
||||

|
||||
83
Divers/renforcement6/cartpole_actor.py
Normal file
83
Divers/renforcement6/cartpole_actor.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=200
|
||||
nbr_actions=2
|
||||
gamma=0.99
|
||||
max_episode=600
|
||||
|
||||
prefix_log_file="log_actor"
|
||||
id_file=0
|
||||
while os.path.exists(prefix_log_file+str(id_file)+".csv"):
|
||||
id_file+=1
|
||||
fichier_log=open(prefix_log_file+str(id_file)+".csv", "w")
|
||||
print("Création du fichier de log", prefix_log_file+str(id_file)+".csv")
|
||||
|
||||
def model(nbr_inputs, nbr_hidden, nbr_actions):
|
||||
entree=layers.Input(shape=(nbr_inputs), dtype='float32')
|
||||
result=layers.Dense(32, activation='relu')(entree)
|
||||
result=layers.Dense(32, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_actions, activation='softmax')(result)
|
||||
|
||||
my_model=models.Model(inputs=entree, outputs=sortie)
|
||||
return my_model
|
||||
|
||||
def calcul_discount_rate(rewards_history, gamma, normalize=False):
|
||||
result=[]
|
||||
discounted_sum=0
|
||||
for r in rewards_history[::-1]:
|
||||
discounted_sum=r+gamma*discounted_sum
|
||||
result.insert(0, discounted_sum)
|
||||
|
||||
# Normalisation
|
||||
if normalize is True:
|
||||
result=np.array(result)
|
||||
result=(result-np.mean(result))/(np.std(result)+1E-7)
|
||||
result=list(result)
|
||||
|
||||
return result
|
||||
|
||||
def train():
|
||||
m_reward=0
|
||||
for episode in range(max_episode):
|
||||
tab_rewards=[]
|
||||
tab_prob_actions=[]
|
||||
|
||||
observations=env.reset()
|
||||
with tf.GradientTape() as tape:
|
||||
while True:
|
||||
action_probs=my_model(np.expand_dims(observations, axis=0))
|
||||
action=np.random.choice(nbr_actions, p=np.squeeze(action_probs))
|
||||
tab_prob_actions.append(action_probs[0, action])
|
||||
observations, reward, done, info=env.step(action)
|
||||
tab_rewards.append(reward)
|
||||
if done:
|
||||
break
|
||||
|
||||
discount_rate=calcul_discount_rate(tab_rewards, gamma, normalize=True)
|
||||
|
||||
loss=-tf.math.log(tab_prob_actions)*discount_rate
|
||||
gradients=tape.gradient(loss, my_model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, my_model.trainable_variables))
|
||||
|
||||
score=sum(tab_rewards)
|
||||
m_reward=0.05*score+(1-0.05)*m_reward
|
||||
message="Episode {:04d} score:{:6.1f} MPE: {:6.1f}"
|
||||
print(message.format(episode, score, m_reward))
|
||||
|
||||
fichier_log.write("{:f}:{:f}\n".format(score, m_reward))
|
||||
|
||||
if m_reward>env._max_episode_steps-10:
|
||||
print("Fin de l'apprentissage".format(episode))
|
||||
break
|
||||
|
||||
my_model=model(4, 32, nbr_actions)
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-2)
|
||||
|
||||
train()
|
||||
|
||||
fichier_log.close()
|
||||
103
Divers/renforcement6/cartpole_actor_critic.py
Normal file
103
Divers/renforcement6/cartpole_actor_critic.py
Normal file
@@ -0,0 +1,103 @@
|
||||
import gym
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
import os
|
||||
|
||||
gamma=0.99
|
||||
max_steps_per_episode=10000
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=200
|
||||
|
||||
prefix_log_file="log_actor_critic_dsum_"
|
||||
id_file=0
|
||||
while os.path.exists(prefix_log_file+str(id_file)+".csv"):
|
||||
id_file+=1
|
||||
fichier_log=open(prefix_log_file+str(id_file)+".csv", "w")
|
||||
print("Création du fichier de log", prefix_log_file+str(id_file)+".csv")
|
||||
|
||||
nbr_actions=2
|
||||
nbr_inputs=4
|
||||
|
||||
def calcul_discount_rate(rewards_history, gamma, normalize=False):
|
||||
result=[]
|
||||
discounted_sum=0
|
||||
for r in rewards_history[::-1]:
|
||||
discounted_sum=r+gamma*discounted_sum
|
||||
result.insert(0, discounted_sum)
|
||||
|
||||
# Normalisation
|
||||
if normalize is True:
|
||||
result=np.array(result)
|
||||
result=(result-np.mean(result))/(np.std(result)+1E-7)
|
||||
result=list(result)
|
||||
|
||||
return result
|
||||
|
||||
def my_model(nbr_inputs, nbr_hidden, nbr_actions):
|
||||
entree=layers.Input(shape=(nbr_inputs), dtype='float32')
|
||||
|
||||
common=layers.Dense(nbr_hidden, activation="relu")(entree)
|
||||
action=layers.Dense(nbr_actions, activation="softmax")(common)
|
||||
critic=layers.Dense(1)(common)
|
||||
|
||||
model=keras.Model(inputs=entree, outputs=[action, critic])
|
||||
return model
|
||||
|
||||
model=my_model(nbr_inputs, 32, nbr_actions)
|
||||
|
||||
optimizer=keras.optimizers.Adam(learning_rate=1E-2)
|
||||
huber_loss=keras.losses.Huber()
|
||||
|
||||
m_reward=0
|
||||
episode=0
|
||||
|
||||
while True:
|
||||
action_probs_history=[]
|
||||
critic_value_history=[]
|
||||
rewards_history=[]
|
||||
|
||||
state=env.reset()
|
||||
episode_reward=0
|
||||
with tf.GradientTape() as tape:
|
||||
|
||||
# Récupération de données
|
||||
for timestep in range(1, max_steps_per_episode):
|
||||
action_probs, critic_value=model(np.expand_dims(state, axis=0))
|
||||
critic_value_history.append(critic_value[0, 0])
|
||||
action=np.random.choice(nbr_actions, p=np.squeeze(action_probs))
|
||||
action_probs_history.append(action_probs[0, action])
|
||||
state, reward, done, infos=env.step(action)
|
||||
rewards_history.append(reward)
|
||||
episode_reward+=reward
|
||||
if done:
|
||||
break
|
||||
|
||||
discount_rate=calcul_discount_rate(rewards_history, gamma, normalize=True)
|
||||
|
||||
history=zip(action_probs_history, critic_value_history, discount_rate)
|
||||
actor_losses=[]
|
||||
critic_losses=[]
|
||||
for action_prob, critic_value, discount_rate in history:
|
||||
actor_losses.append(-tf.math.log(action_prob)*(discount_rate-critic_value))
|
||||
critic_losses.append(huber_loss([critic_value], [discount_rate]))
|
||||
|
||||
loss_value=tf.reduce_mean(actor_losses+critic_losses)
|
||||
grads=tape.gradient(loss_value, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(grads, model.trainable_variables))
|
||||
|
||||
episode+=1
|
||||
m_reward=0.05*episode_reward+(1-0.05)*m_reward
|
||||
|
||||
message="Episode {:04d} score:{:6.1f} MPE: {:6.1f}"
|
||||
print(message.format(episode, episode_reward, m_reward))
|
||||
|
||||
fichier_log.write("{:f}:{:f}\n".format(episode_reward, m_reward))
|
||||
|
||||
if m_reward>env._max_episode_steps-10:
|
||||
print("Fin de l'apprentissage".format(episode))
|
||||
break
|
||||
|
||||
fichier_log.close()
|
||||
model.save("my_model")
|
||||
114
Divers/renforcement6/cartpole_critic.py
Normal file
114
Divers/renforcement6/cartpole_critic.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import gym
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
env=gym.make("CartPole-v0")
|
||||
env._max_episode_steps=200
|
||||
nbr_action=2
|
||||
|
||||
prefix_log_file="log_critic_"
|
||||
id_file=0
|
||||
while os.path.exists(prefix_log_file+str(id_file)+".csv"):
|
||||
id_file+=1
|
||||
fichier_log=open(prefix_log_file+str(id_file)+".csv", "w")
|
||||
print("Création du fichier de log", prefix_log_file+str(id_file)+".csv")
|
||||
|
||||
gamma=0.98
|
||||
max_episode=600
|
||||
epsilon=1.
|
||||
epsilon_min=0.10
|
||||
start_epsilon=10
|
||||
end_epsilon=max_episode
|
||||
epsilon_decay_value=epsilon/(end_epsilon-start_epsilon)
|
||||
|
||||
def model():
|
||||
entree=layers.Input(shape=(4), dtype='float32')
|
||||
result=layers.Dense(32, activation='relu')(entree)
|
||||
result=layers.Dense(32, activation='relu')(result)
|
||||
sortie=layers.Dense(nbr_action)(result)
|
||||
|
||||
model=models.Model(inputs=entree, outputs=sortie)
|
||||
return model
|
||||
|
||||
def my_loss(target_q, predicted_q):
|
||||
loss=tf.reduce_mean(tf.math.square(target_q-predicted_q))
|
||||
return loss
|
||||
|
||||
@tf.function
|
||||
def train_step(reward, action, observation, next_observation, done):
|
||||
next_Q_values=model(next_observation)
|
||||
best_next_actions=tf.math.argmax(next_Q_values, axis=1)
|
||||
next_mask=tf.one_hot(best_next_actions, nbr_action)
|
||||
next_best_Q_values=tf.reduce_sum(next_Q_values*next_mask, axis=1)
|
||||
target_Q_values=reward+(1-done)*gamma*next_best_Q_values
|
||||
target_Q_values=tf.reshape(target_Q_values, (-1, 1))
|
||||
mask=tf.one_hot(action, nbr_action)
|
||||
with tf.GradientTape() as tape:
|
||||
all_Q_values=model(observation)
|
||||
Q_values=tf.reduce_sum(all_Q_values*mask, axis=1, keepdims=True)
|
||||
loss=my_loss(target_Q_values, Q_values)
|
||||
gradients=tape.gradient(loss, model.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
train_loss(loss)
|
||||
|
||||
def train(debug=False):
|
||||
global epsilon
|
||||
m_reward=0
|
||||
for episode in range(max_episode):
|
||||
score=0
|
||||
tab_observations=[]
|
||||
tab_rewards=[]
|
||||
tab_actions=[]
|
||||
tab_next_observations=[]
|
||||
tab_done=[]
|
||||
|
||||
observations=env.reset()
|
||||
score=0
|
||||
while True:
|
||||
tab_observations.append(observations)
|
||||
if np.random.random()>epsilon:
|
||||
valeurs_q=model(np.expand_dims(observations, axis=0))
|
||||
action=int(tf.argmax(valeurs_q[0], axis=-1))
|
||||
else:
|
||||
action=np.random.randint(0, nbr_action)
|
||||
observations, reward, done, info=env.step(action)
|
||||
score+=reward
|
||||
tab_actions.append(action)
|
||||
tab_next_observations.append(observations)
|
||||
tab_done.append(done)
|
||||
if done:
|
||||
tab_rewards.append(-10.)
|
||||
break
|
||||
tab_rewards.append(reward)
|
||||
|
||||
tab_rewards=np.array(tab_rewards, dtype=np.float32)
|
||||
tab_actions=np.array(tab_actions, dtype=np.int32)
|
||||
tab_observations=np.array(tab_observations, dtype=np.float32)
|
||||
tab_next_observations=np.array(tab_next_observations, dtype=np.float32)
|
||||
tab_done=np.array(tab_done, dtype=np.float32)
|
||||
train_step(tab_rewards, tab_actions, tab_observations, tab_next_observations, tab_done)
|
||||
train_loss.reset_states()
|
||||
|
||||
epsilon-=epsilon_decay_value
|
||||
epsilon=max(epsilon, epsilon_min)
|
||||
|
||||
m_reward=0.05*score+(1-0.05)*m_reward
|
||||
message="Episode {:04d} score:{:6.1f} MPE: {:6.1f} (epsilon={:5.3f})"
|
||||
print(message.format(episode, score, m_reward, epsilon))
|
||||
|
||||
fichier_log.write("{:f}:{:f}\n".format(score, m_reward))
|
||||
|
||||
if m_reward>env._max_episode_steps-10:
|
||||
print("Fin de l'apprentissage".format(episode))
|
||||
break
|
||||
|
||||
model=model()
|
||||
optimizer=tf.keras.optimizers.Adam(learning_rate=1E-2)
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
tab_s=[]
|
||||
|
||||
train()
|
||||
|
||||
fichier_log.close()
|
||||
BIN
Divers/renforcement6/graph.png
Normal file
BIN
Divers/renforcement6/graph.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 128 KiB |
4
Divers/tutoriel11/README.md
Normal file
4
Divers/tutoriel11/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Tutoriel 11
|
||||
## Utilisation de Labelimg et conversion des fichiers xml en np.array
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=VWXXFFDqBqA
|
||||
39
Divers/tutoriel11/xmltoarray.py
Normal file
39
Divers/tutoriel11/xmltoarray.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import xmltodict
|
||||
import numpy as np
|
||||
import glob
|
||||
import cv2
|
||||
|
||||
def xml_to_dataset(dir, size=None):
|
||||
tab_image=[]
|
||||
tab_label=[]
|
||||
for fichier in glob.glob(dir+"/*.xml"):
|
||||
with open(fichier) as fd:
|
||||
doc=xmltodict.parse(fd.read())
|
||||
image=doc['annotation']['filename']
|
||||
img=cv2.imread(dir+"/"+image)
|
||||
objects=doc['annotation']['object'] if type(doc['annotation']['object'])==list else [doc['annotation']['object']]
|
||||
for obj in objects:
|
||||
xmin=int(obj['bndbox']['xmin'])
|
||||
xmax=int(obj['bndbox']['xmax'])
|
||||
ymin=int(obj['bndbox']['ymin'])
|
||||
ymax=int(obj['bndbox']['ymax'])
|
||||
if size is not None:
|
||||
tab_image.append(cv2.resize(img[ymin:ymax, xmin:xmax], size))
|
||||
else:
|
||||
tab_image.append(img[ymin:ymax, xmin:xmax])
|
||||
tab_label.append(obj['name'])
|
||||
|
||||
l=list(set(tab_label))
|
||||
tab_one_hot=[]
|
||||
for e in tab_label:
|
||||
tab_one_hot.append(np.eye(len(l))[l.index(e)])
|
||||
|
||||
return tab_image, l, tab_one_hot
|
||||
|
||||
tab_image, tab_label, tab_one_hot=xml_to_dataset("./", (32, 32))
|
||||
|
||||
for i in range(len(tab_image)):
|
||||
cv2.imshow('image', tab_image[i])
|
||||
print(tab_label[np.argmax(tab_one_hot[i])], tab_one_hot[i])
|
||||
cv2.waitKey()
|
||||
|
||||
6
Divers/tutoriel12/README.md
Normal file
6
Divers/tutoriel12/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Tutoriel 12
|
||||
## Utilisation basique de réseau de neurones dédié à la vision (modèle zoo)
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=r2U-ntB-RM4
|
||||
|
||||
Depuis la vidéo, la version tensorflow 2 est sortie, le programme 'detection_tf2.py' est la version en tensorflow 2 (testé avec la version 2.3 cpu)
|
||||
146
Divers/tutoriel12/detection.py
Normal file
146
Divers/tutoriel12/detection.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
|
||||
labels={
|
||||
1: "person",
|
||||
2: "bicycle",
|
||||
3: "car",
|
||||
4: "motorcycle",
|
||||
5: "airplane",
|
||||
6: "bus",
|
||||
7: "train",
|
||||
8: "truck",
|
||||
9: "boat",
|
||||
10: "traffic light",
|
||||
11: "fire hydrant",
|
||||
13: "stop sign",
|
||||
14: "parking meter",
|
||||
15: "bench",
|
||||
16: "bird",
|
||||
17: "cat",
|
||||
18: "dog",
|
||||
19: "horse",
|
||||
20: "sheep",
|
||||
21: "cow",
|
||||
22: "elephant",
|
||||
23: "bear",
|
||||
24: "zebra",
|
||||
25: "giraffe",
|
||||
27: "backpack",
|
||||
28: "umbrella",
|
||||
31: "handbag",
|
||||
32: "tie",
|
||||
33: "suitcase",
|
||||
34: "frisbee",
|
||||
35: "skis",
|
||||
36: "snowboard",
|
||||
37: "sports ball",
|
||||
38: "kite",
|
||||
39: "baseball bat",
|
||||
40: "baseball glove",
|
||||
41: "skateboard",
|
||||
42: "surfboard",
|
||||
43: "tennis racket",
|
||||
44: "bottle",
|
||||
46: "wine glass",
|
||||
47: "cup",
|
||||
48: "fork",
|
||||
49: "knife",
|
||||
50: "spoon",
|
||||
51: "bowl",
|
||||
52: "banana",
|
||||
53: "apple",
|
||||
54: "sandwich",
|
||||
55: "orange",
|
||||
56: "broccoli",
|
||||
57: "carrot",
|
||||
58: "hot dog",
|
||||
59: "pizza",
|
||||
60: "donut",
|
||||
61: "cake",
|
||||
62: "chair",
|
||||
63: "couch",
|
||||
64: "potted plant",
|
||||
65: "bed",
|
||||
67: "dining table",
|
||||
70: "toilet",
|
||||
72: "tv",
|
||||
73: "laptop",
|
||||
74: "mouse",
|
||||
75: "remote",
|
||||
76: "keyboard",
|
||||
77: "cell phone",
|
||||
78: "microwave",
|
||||
79: "oven",
|
||||
80: "toaster",
|
||||
81: "sink",
|
||||
82: "refrigerator",
|
||||
84: "book",
|
||||
85: "clock",
|
||||
86: "vase",
|
||||
87: "scissors",
|
||||
88: "teddy bear",
|
||||
89: "hair drier",
|
||||
90: "toothbrush"
|
||||
}
|
||||
|
||||
MODEL_NAME='ssd_mobilenet_v2_coco_2018_03_29'
|
||||
PATH_TO_FROZEN_GRAPH=MODEL_NAME+'/frozen_inference_graph.pb'
|
||||
color_infos=(255, 255, 0)
|
||||
|
||||
detection_graph=tf.Graph()
|
||||
with detection_graph.as_default():
|
||||
od_graph_def=tf.GraphDef()
|
||||
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
|
||||
serialized_graph=fid.read()
|
||||
od_graph_def.ParseFromString(serialized_graph)
|
||||
tf.import_graph_def(od_graph_def, name='')
|
||||
|
||||
with detection_graph.as_default():
|
||||
with tf.Session() as sess:
|
||||
cap=cv2.VideoCapture(0)
|
||||
ops=tf.get_default_graph().get_operations()
|
||||
all_tensor_names={output.name for op in ops for output in op.outputs}
|
||||
tensor_dict={}
|
||||
for key in [
|
||||
'num_detections', 'detection_boxes', 'detection_scores',
|
||||
'detection_classes', 'detection_masks']:
|
||||
tensor_name=key+':0'
|
||||
if tensor_name in all_tensor_names:
|
||||
tensor_dict[key]=tf.get_default_graph().get_tensor_by_name(tensor_name)
|
||||
if 'detection_masks' in tensor_dict:
|
||||
quit("Masque non géré")
|
||||
image_tensor=tf.get_default_graph().get_tensor_by_name('image_tensor:0')
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
tickmark=cv2.getTickCount()
|
||||
output_dict=sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(frame, 0)})
|
||||
nbr_object=int(output_dict['num_detections'])
|
||||
classes=output_dict['detection_classes'][0].astype(np.uint8)
|
||||
boxes=output_dict['detection_boxes'][0]
|
||||
scores=output_dict['detection_scores'][0]
|
||||
for objet in range(nbr_object):
|
||||
ymin, xmin, ymax, xmax=boxes[objet]
|
||||
if scores[objet]>0.30:
|
||||
height, width=frame.shape[:2]
|
||||
xmin=int(xmin*width)
|
||||
xmax=int(xmax*width)
|
||||
ymin=int(ymin*height)
|
||||
ymax=int(ymax*height)
|
||||
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color_infos, 1)
|
||||
txt="{:s}:{:3.0%}".format(labels[classes[objet]], scores[objet])
|
||||
cv2.putText(frame, txt, (xmin, ymin-5), cv2.FONT_HERSHEY_PLAIN, 1, color_infos, 2)
|
||||
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
|
||||
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1, color_infos, 2)
|
||||
cv2.imshow('image', frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('a'):
|
||||
for objet in range(500):
|
||||
ret, frame=cap.read()
|
||||
if key==ord('q'):
|
||||
break
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
148
Divers/tutoriel12/detection_tf2.py
Normal file
148
Divers/tutoriel12/detection_tf2.py
Normal file
@@ -0,0 +1,148 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
|
||||
labels={
|
||||
1: "person",
|
||||
2: "bicycle",
|
||||
3: "car",
|
||||
4: "motorcycle",
|
||||
5: "airplane",
|
||||
6: "bus",
|
||||
7: "train",
|
||||
8: "truck",
|
||||
9: "boat",
|
||||
10: "traffic light",
|
||||
11: "fire hydrant",
|
||||
13: "stop sign",
|
||||
14: "parking meter",
|
||||
15: "bench",
|
||||
16: "bird",
|
||||
17: "cat",
|
||||
18: "dog",
|
||||
19: "horse",
|
||||
20: "sheep",
|
||||
21: "cow",
|
||||
22: "elephant",
|
||||
23: "bear",
|
||||
24: "zebra",
|
||||
25: "giraffe",
|
||||
27: "backpack",
|
||||
28: "umbrella",
|
||||
31: "handbag",
|
||||
32: "tie",
|
||||
33: "suitcase",
|
||||
34: "frisbee",
|
||||
35: "skis",
|
||||
36: "snowboard",
|
||||
37: "sports ball",
|
||||
38: "kite",
|
||||
39: "baseball bat",
|
||||
40: "baseball glove",
|
||||
41: "skateboard",
|
||||
42: "surfboard",
|
||||
43: "tennis racket",
|
||||
44: "bottle",
|
||||
46: "wine glass",
|
||||
47: "cup",
|
||||
48: "fork",
|
||||
49: "knife",
|
||||
50: "spoon",
|
||||
51: "bowl",
|
||||
52: "banana",
|
||||
53: "apple",
|
||||
54: "sandwich",
|
||||
55: "orange",
|
||||
56: "broccoli",
|
||||
57: "carrot",
|
||||
58: "hot dog",
|
||||
59: "pizza",
|
||||
60: "donut",
|
||||
61: "cake",
|
||||
62: "chair",
|
||||
63: "couch",
|
||||
64: "potted plant",
|
||||
65: "bed",
|
||||
67: "dining table",
|
||||
70: "toilet",
|
||||
72: "tv",
|
||||
73: "laptop",
|
||||
74: "mouse",
|
||||
75: "remote",
|
||||
76: "keyboard",
|
||||
77: "cell phone",
|
||||
78: "microwave",
|
||||
79: "oven",
|
||||
80: "toaster",
|
||||
81: "sink",
|
||||
82: "refrigerator",
|
||||
84: "book",
|
||||
85: "clock",
|
||||
86: "vase",
|
||||
87: "scissors",
|
||||
88: "teddy bear",
|
||||
89: "hair drier",
|
||||
90: "toothbrush"
|
||||
}
|
||||
|
||||
#MODEL_NAME='ssd_mobilenet_v2_coco_2018_03_29'
|
||||
MODEL_NAME='faster_rcnn_inception_v2_coco_2018_01_28'
|
||||
PATH_TO_FROZEN_GRAPH=MODEL_NAME+'/frozen_inference_graph.pb'
|
||||
color_infos=(255, 255, 0)
|
||||
|
||||
detection_graph=tf.compat.v1.Graph()
|
||||
with detection_graph.as_default():
|
||||
od_graph_def=tf.compat.v1.GraphDef()
|
||||
with tf.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
|
||||
serialized_graph=fid.read()
|
||||
od_graph_def.ParseFromString(serialized_graph)
|
||||
tf.import_graph_def(od_graph_def, name='')
|
||||
|
||||
with detection_graph.as_default():
|
||||
with tf.compat.v1.Session() as sess:
|
||||
#cap=cv2.VideoCapture('../../En_cours/tuto8-suite/Plan_9_from_Outer_Space_1959_512kb.mp4')
|
||||
cap=cv2.VideoCapture(0)
|
||||
ops=tf.compat.v1.get_default_graph().get_operations()
|
||||
all_tensor_names={output.name for op in ops for output in op.outputs}
|
||||
tensor_dict={}
|
||||
for key in [
|
||||
'num_detections', 'detection_boxes', 'detection_scores',
|
||||
'detection_classes', 'detection_masks']:
|
||||
tensor_name=key+':0'
|
||||
if tensor_name in all_tensor_names:
|
||||
tensor_dict[key]=tf.compat.v1.get_default_graph().get_tensor_by_name(tensor_name)
|
||||
if 'detection_masks' in tensor_dict:
|
||||
quit("Masque non géré")
|
||||
image_tensor=tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
tickmark=cv2.getTickCount()
|
||||
output_dict=sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(frame, 0)})
|
||||
nbr_object=int(output_dict['num_detections'])
|
||||
classes=output_dict['detection_classes'][0].astype(np.uint8)
|
||||
boxes=output_dict['detection_boxes'][0]
|
||||
scores=output_dict['detection_scores'][0]
|
||||
for objet in range(nbr_object):
|
||||
ymin, xmin, ymax, xmax=boxes[objet]
|
||||
if scores[objet]>0.30:
|
||||
height, width=frame.shape[:2]
|
||||
xmin=int(xmin*width)
|
||||
xmax=int(xmax*width)
|
||||
ymin=int(ymin*height)
|
||||
ymax=int(ymax*height)
|
||||
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color_infos, 1)
|
||||
txt="{:s}:{:3.0%}".format(labels[classes[objet]], scores[objet])
|
||||
cv2.putText(frame, txt, (xmin, ymin-5), cv2.FONT_HERSHEY_PLAIN, 1, color_infos, 2)
|
||||
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
|
||||
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1, color_infos, 2)
|
||||
cv2.imshow('image', frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('a'):
|
||||
for objet in range(500):
|
||||
ret, frame=cap.read()
|
||||
if key==ord('q'):
|
||||
break
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
5
Divers/tutoriel18-1/README.md
Normal file
5
Divers/tutoriel18-1/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel 18 1artie 1
|
||||
## Sudoku
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=WwPHs1SJrec
|
||||
|
||||
69
Divers/tutoriel18-1/perspective.py
Normal file
69
Divers/tutoriel18-1/perspective.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import operator
|
||||
|
||||
marge=4
|
||||
case=28+2*marge
|
||||
taille_grille=9*case
|
||||
|
||||
methode=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
|
||||
v1=9
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
gray=cv2.GaussianBlur(gray, (5, 5), 0)
|
||||
thresh=cv2.adaptiveThreshold(gray, 255, methode, cv2.THRESH_BINARY_INV, v1, 2)
|
||||
#cv2.imshow("thresh", thresh)
|
||||
contours, hierarchy=cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
contour_grille=None
|
||||
maxArea=0
|
||||
for c in contours:
|
||||
area=cv2.contourArea(c)
|
||||
if area>25000:
|
||||
peri=cv2.arcLength(c, True)
|
||||
polygone=cv2.approxPolyDP(c, 0.01*peri, True)
|
||||
if area>maxArea and len(polygone)==4:
|
||||
contour_grille=polygone
|
||||
maxArea=area
|
||||
if contour_grille is not None:
|
||||
cv2.drawContours(frame, [contour_grille], 0, (0, 255, 0), 2)
|
||||
points=np.vstack(contour_grille).squeeze()
|
||||
points=sorted(points, key=operator.itemgetter(1))
|
||||
if points[0][0]<points[1][0]:
|
||||
if points[3][0]<points[2][0]:
|
||||
pts1=np.float32([points[0], points[1], points[3], points[2]])
|
||||
else:
|
||||
pts1=np.float32([points[0], points[1], points[2], points[3]])
|
||||
else:
|
||||
if points[3][0]<points[2][0]:
|
||||
pts1=np.float32([points[1], points[0], points[3], points[2]])
|
||||
else:
|
||||
pts1=np.float32([points[1], points[0], points[2], points[3]])
|
||||
pts2=np.float32([[0, 0], [taille_grille, 0], [0, taille_grille], [taille_grille, taille_grille]])
|
||||
M=cv2.getPerspectiveTransform(pts1, pts2)
|
||||
grille=cv2.warpPerspective(frame, M, (taille_grille, taille_grille))
|
||||
cv2.putText(frame, "1", (points[0][0], points[0][1]), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
cv2.putText(frame, "2", (points[1][0], points[1][1]), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
cv2.putText(frame, "3", (points[2][0], points[2][1]), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
cv2.putText(frame, "4", (points[3][0], points[3][1]), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
cv2.imshow("grille", grille)
|
||||
txt="ADAPTIVE_THRESH_MEAN_C" if methode==cv2.ADAPTIVE_THRESH_MEAN_C else "ADAPTIVE_THRESH_GAUSSIAN_C"
|
||||
cv2.putText(frame, "[p|m]v1: {:2d} [o]methode: {}".format(v1, txt), (10, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
cv2.imshow("frame", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
if key==ord('p'):
|
||||
v1=min(21, v1+2)
|
||||
if key==ord('m'):
|
||||
v1=max(3, v1-2)
|
||||
print(v1)
|
||||
if key==ord('o'):
|
||||
if methode==cv2.ADAPTIVE_THRESH_GAUSSIAN_C:
|
||||
methode=cv2.ADAPTIVE_THRESH_MEAN_C
|
||||
else:
|
||||
methode=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
47
Divers/tutoriel18-1/poly.py
Normal file
47
Divers/tutoriel18-1/poly.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import operator
|
||||
|
||||
methode=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
|
||||
v1=9
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
gray=cv2.GaussianBlur(gray, (5, 5), 0)
|
||||
thresh=cv2.adaptiveThreshold(gray, 255, methode, cv2.THRESH_BINARY_INV, v1, 2)
|
||||
cv2.imshow("thresh", thresh)
|
||||
|
||||
contours, hierarchy=cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
contour_grille=None
|
||||
maxArea=0
|
||||
for c in contours:
|
||||
area=cv2.contourArea(c)
|
||||
if area>25000:
|
||||
peri=cv2.arcLength(c, True)
|
||||
polygone=cv2.approxPolyDP(c, 0.01*peri, True)
|
||||
if area>maxArea and len(polygone)==4:
|
||||
contour_grille=polygone
|
||||
maxArea=area
|
||||
if contour_grille is not None:
|
||||
cv2.drawContours(frame, [contour_grille], 0, (0, 255, 0), 2)
|
||||
txt="ADAPTIVE_THRESH_MEAN_C" if methode==cv2.ADAPTIVE_THRESH_MEAN_C else "ADAPTIVE_THRESH_GAUSSIAN_C"
|
||||
cv2.putText(frame, "[p|m]v1: {:2d} [o]methode: {}".format(v1, txt), (10, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
|
||||
cv2.imshow("frame", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
if key==ord('p'):
|
||||
v1=min(21, v1+2)
|
||||
if key==ord('m'):
|
||||
v1=max(3, v1-2)
|
||||
print(v1)
|
||||
if key==ord('o'):
|
||||
if methode==cv2.ADAPTIVE_THRESH_GAUSSIAN_C:
|
||||
methode=cv2.ADAPTIVE_THRESH_MEAN_C
|
||||
else:
|
||||
methode=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
31
Divers/tutoriel18-1/threshold.py
Normal file
31
Divers/tutoriel18-1/threshold.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import operator
|
||||
|
||||
methode=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
|
||||
v1=9
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
gray=cv2.GaussianBlur(gray, (5, 5), 0)
|
||||
thresh=cv2.adaptiveThreshold(gray, 255, methode, cv2.THRESH_BINARY_INV, v1, 2)
|
||||
cv2.imshow("thresh", thresh)
|
||||
txt="ADAPTIVE_THRESH_MEAN_C" if methode==cv2.ADAPTIVE_THRESH_MEAN_C else "ADAPTIVE_THRESH_GAUSSIAN_C"
|
||||
cv2.putText(frame, "[p|m]v1: {:2d} [o]methode: {}".format(v1, txt), (10, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.9, (0, 0, 255), 1)
|
||||
cv2.imshow("frame", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
if key==ord('p'):
|
||||
v1=min(21, v1+2)
|
||||
if key==ord('m'):
|
||||
v1=max(3, v1-2)
|
||||
if key==ord('o'):
|
||||
if methode==cv2.ADAPTIVE_THRESH_GAUSSIAN_C:
|
||||
methode=cv2.ADAPTIVE_THRESH_MEAN_C
|
||||
else:
|
||||
methode=cv2.ADAPTIVE_THRESH_GAUSSIAN_C
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
5
Divers/tutoriel18-2/README.md
Normal file
5
Divers/tutoriel18-2/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel 18 partie 2
|
||||
## Sudoku
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=XFNg8lXe-Tk
|
||||
|
||||
BIN
Divers/tutoriel18-2/din1451altG.ttf
Normal file
BIN
Divers/tutoriel18-2/din1451altG.ttf
Normal file
Binary file not shown.
18
Divers/tutoriel18-2/font.py
Normal file
18
Divers/tutoriel18-2/font.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from PIL import ImageFont, ImageDraw, Image
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
for i in range(1, 10):
|
||||
image=Image.new("L", (28, 28))
|
||||
draw=ImageDraw.Draw(image)
|
||||
font=ImageFont.truetype("din1451altG.ttf", 27)
|
||||
text="{:d}".format(i)
|
||||
draw.text((10, 0), text, font=font, fill=(255))
|
||||
image=np.array(image).reshape(28, 28, 1)
|
||||
cv2.imshow("image", image)
|
||||
key=cv2.waitKey()
|
||||
if key&0xFF==ord('q'):
|
||||
quit()
|
||||
|
||||
|
||||
|
||||
100
Divers/tutoriel18-2/sudoku.py
Normal file
100
Divers/tutoriel18-2/sudoku.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import sudoku_solver as ss
|
||||
from time import sleep
|
||||
import operator
|
||||
|
||||
marge=4
|
||||
case=28+2*marge
|
||||
taille_grille=9*case
|
||||
flag=0
|
||||
cap=cv2.VideoCapture(0)
|
||||
with tf.Session() as s:
|
||||
saver=tf.train.import_meta_graph('./mon_modele/modele.meta')
|
||||
saver.restore(s, tf.train.latest_checkpoint('./mon_modele/'))
|
||||
graph=tf.get_default_graph()
|
||||
images=graph.get_tensor_by_name("entree:0")
|
||||
sortie=graph.get_tensor_by_name("sortie:0")
|
||||
is_training=graph.get_tensor_by_name("is_training:0")
|
||||
maxArea=0
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if maxArea==0:
|
||||
cv2.imshow("frame", frame)
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
gray=cv2.GaussianBlur(gray, (5, 5), 0)
|
||||
thresh=cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 9, 2)
|
||||
contours, hierarchy=cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
contour_grille=None
|
||||
maxArea=0
|
||||
for c in contours:
|
||||
area=cv2.contourArea(c)
|
||||
if area>25000:
|
||||
peri=cv2.arcLength(c, True)
|
||||
polygone=cv2.approxPolyDP(c, 0.02*peri, True)
|
||||
if area>maxArea and len(polygone)==4:
|
||||
contour_grille=polygone
|
||||
maxArea=area
|
||||
if contour_grille is not None:
|
||||
points=np.vstack(contour_grille).squeeze()
|
||||
points=sorted(points, key=operator.itemgetter(1))
|
||||
if points[0][0]<points[1][0]:
|
||||
if points[3][0]<points[2][0]:
|
||||
pts1=np.float32([points[0], points[1], points[3], points[2]])
|
||||
else:
|
||||
pts1=np.float32([points[0], points[1], points[2], points[3]])
|
||||
else:
|
||||
if points[3][0]<points[2][0]:
|
||||
pts1=np.float32([points[1], points[0], points[3], points[2]])
|
||||
else:
|
||||
pts1=np.float32([points[1], points[0], points[2], points[3]])
|
||||
pts2=np.float32([[0, 0], [taille_grille, 0], [0, taille_grille], [taille_grille, taille_grille]])
|
||||
M=cv2.getPerspectiveTransform(pts1, pts2)
|
||||
grille=cv2.warpPerspective(frame, M, (taille_grille, taille_grille))
|
||||
grille=cv2.cvtColor(grille, cv2.COLOR_BGR2GRAY)
|
||||
grille=cv2.adaptiveThreshold(grille, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 9, 2)
|
||||
cv2.imshow("grille", grille)
|
||||
if flag==0:
|
||||
grille=grille/255
|
||||
grille_txt=[]
|
||||
for y in range(9):
|
||||
ligne=""
|
||||
for x in range(9):
|
||||
y2min=y*case+marge
|
||||
y2max=(y+1)*case-marge
|
||||
x2min=x*case+marge
|
||||
x2max=(x+1)*case-marge
|
||||
prediction=s.run(sortie, feed_dict={images: [grille[y2min:y2max, x2min:x2max].reshape(28, 28, 1)], is_training: False})
|
||||
ligne+="{:d}".format(np.argmax(prediction[0]))
|
||||
grille_txt.append(ligne)
|
||||
result=ss.sudoku(grille_txt)
|
||||
print("Resultat:", result)
|
||||
#result=None
|
||||
if result is not None:
|
||||
flag=1
|
||||
fond=np.zeros(shape=(taille_grille, taille_grille, 3), dtype=np.float32)
|
||||
for y in range(len(result)):
|
||||
for x in range(len(result[y])):
|
||||
if grille_txt[y][x]=="0":
|
||||
cv2.putText(fond, "{:d}".format(result[y][x]), ((x)*case+marge+3, (y+1)*case-marge-3), cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 0.9, (0, 0, 255), 1)
|
||||
M=cv2.getPerspectiveTransform(pts2, pts1)
|
||||
h, w, c=frame.shape
|
||||
fondP=cv2.warpPerspective(fond, M, (w, h))
|
||||
img2gray=cv2.cvtColor(fondP, cv2.COLOR_BGR2GRAY)
|
||||
ret, mask=cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
|
||||
mask=mask.astype('uint8')
|
||||
mask_inv=cv2.bitwise_not(mask)
|
||||
img1_bg=cv2.bitwise_and(frame, frame, mask=mask_inv)
|
||||
img2_fg=cv2.bitwise_and(fondP, fondP, mask=mask).astype('uint8')
|
||||
dst=cv2.add(img1_bg, img2_fg)
|
||||
cv2.imshow("frame", dst)
|
||||
else:
|
||||
cv2.imshow("frame", frame)
|
||||
else:
|
||||
flag=0
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
89
Divers/tutoriel18-2/sudoku_solver.py
Normal file
89
Divers/tutoriel18-2/sudoku_solver.py
Normal file
@@ -0,0 +1,89 @@
|
||||
def sudoku(f):
|
||||
|
||||
def af(g):
|
||||
for n,l in enumerate(g):
|
||||
for m,c in enumerate(l):
|
||||
P(str(c).replace("0","."),end="")
|
||||
if m in {2,5}:
|
||||
P("+",end="")
|
||||
P()
|
||||
if n in {2,5}:
|
||||
P("+"*11)
|
||||
|
||||
def cp(q,s):
|
||||
l=set(s[q[0]])
|
||||
l|={s[i][q[1]] for i in range(9)}
|
||||
k=q[0]//3,q[1]//3
|
||||
for i in range(3):
|
||||
l|=set(s[k[0]*3+i][k[1]*3:(k[1]+1)*3])
|
||||
return set(range(1,10))-l
|
||||
|
||||
def ec(l):
|
||||
q=set(l)-{0}
|
||||
for c in q:
|
||||
if l.count(c)!=1:
|
||||
return True
|
||||
return False
|
||||
|
||||
# Remplissage de la grille et tests de format
|
||||
|
||||
P=print
|
||||
af(f)
|
||||
|
||||
s=[]
|
||||
t=[]
|
||||
for nl,l in enumerate(f):
|
||||
try:
|
||||
n=list(map(int,l))
|
||||
except:
|
||||
P("La ligne "+str(nl+1)+" contient autre chose qu'un chiffre.")
|
||||
return
|
||||
if len(n)!=9:
|
||||
P("La ligne "+str(nl+1)+" ne contient pas 9 chiffres.")
|
||||
return
|
||||
t+=[[nl,i] for i in range(9) if n[i]==0]
|
||||
s.append(n)
|
||||
if nl!=8:
|
||||
P("Le jeu contient "+str(nl+1)+" lignes au lieu de 9.")
|
||||
return
|
||||
# Tests de validite
|
||||
|
||||
for l in range(9):
|
||||
if ec(s[l]):
|
||||
P("La ligne "+str(l+1)+" est contradictoire.")
|
||||
return
|
||||
for c in range(9):
|
||||
k=[s[l][c] for l in range(9)]
|
||||
if ec(k):
|
||||
P("La colonne "+str(c+1)+" est contradictoire.")
|
||||
return
|
||||
for l in range(3):
|
||||
for c in range(3):
|
||||
q=[]
|
||||
for i in range(3):
|
||||
q+=s[l*3+i][c*3:(c+1)*3]
|
||||
if ec(q):
|
||||
P("La cellule ("+str(l+1)+";"+str(c+1)+") est contradictoire.")
|
||||
return
|
||||
|
||||
# Resolution
|
||||
|
||||
p=[[] for i in t]
|
||||
cr=0
|
||||
|
||||
while cr<len(t):
|
||||
p[cr]=cp(t[cr],s)
|
||||
try:
|
||||
while not p[cr]:
|
||||
s[t[cr][0]][t[cr][1]]=0
|
||||
cr-=1
|
||||
except:
|
||||
P("Le sudoku n'a pas de solution.")
|
||||
return
|
||||
s[t[cr][0]][t[cr][1]]=p[cr].pop()
|
||||
cr+=1
|
||||
|
||||
# Presentation de la grille resolue
|
||||
|
||||
af(s)
|
||||
return(s)
|
||||
147
Divers/tutoriel18-2/train.py
Normal file
147
Divers/tutoriel18-2/train.py
Normal file
@@ -0,0 +1,147 @@
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
from sklearn.model_selection import train_test_split
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
from PIL import ImageFont, ImageDraw, Image
|
||||
|
||||
taille_batch=100
|
||||
nbr_entrainement=100
|
||||
nbr=2*42
|
||||
|
||||
def modif_image(image, seuil=1):
|
||||
b=np.random.normal(0, 1, (28, 28))
|
||||
a=image.copy()
|
||||
a[b>seuil]=255
|
||||
a[b<-seuil]=0
|
||||
return a
|
||||
|
||||
def convolution(input, taille_noyau, nbr_noyau, stride, b_norm=False, f_activation=None, training=False):
|
||||
w_filtre=tf.Variable(tf.random.truncated_normal(shape=(taille_noyau, taille_noyau, int(input.get_shape()[-1]), nbr_noyau)))
|
||||
b_filtre=np.zeros(nbr_noyau)
|
||||
result=tf.nn.conv2d(input, w_filtre, strides=[1, stride, stride, 1], padding='SAME')+b_filtre
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def fc(input, nbr_neurone, b_norm=False, f_activation=None, training=False):
|
||||
w=tf.Variable(tf.random.truncated_normal(shape=(int(input.get_shape()[-1]), nbr_neurone), dtype=tf.float32))
|
||||
b=tf.Variable(np.zeros(shape=(nbr_neurone)), dtype=tf.float32)
|
||||
result=tf.matmul(input, w)+b
|
||||
if b_norm is True:
|
||||
result=tf.layers.batch_normalization(result, training=training)
|
||||
if f_activation is not None:
|
||||
result=f_activation(result)
|
||||
return result
|
||||
|
||||
def ia(nbr_classes, size, couche, learning_rate=1E-3):
|
||||
ph_images=tf.placeholder(shape=(None, size, size, couche), dtype=tf.float32, name='entree')
|
||||
ph_labels=tf.placeholder(shape=(None, nbr_classes), dtype=tf.float32)
|
||||
ph_is_training=tf.placeholder_with_default(False, (), name='is_training')
|
||||
|
||||
result=convolution(ph_images, 3, 64, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.layers.dropout(result, 0.3, training=ph_is_training)
|
||||
result=convolution(result, 3, 128, 1, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.layers.dropout(result, 0.4, training=ph_is_training)
|
||||
result=tf.nn.max_pool(result, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
result=tf.contrib.layers.flatten(result)
|
||||
|
||||
result=fc(result, 128, True, tf.nn.relu, ph_is_training)
|
||||
result=tf.layers.dropout(result, 0.5, training=ph_is_training)
|
||||
result=fc(result, nbr_classes)
|
||||
socs=tf.nn.softmax(result, name="sortie")
|
||||
|
||||
loss=tf.nn.softmax_cross_entropy_with_logits_v2(labels=ph_labels, logits=result)
|
||||
extra_update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(extra_update_ops):
|
||||
train=tf.train.AdamOptimizer(learning_rate).minimize(loss)
|
||||
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(socs, 1), tf.argmax(ph_labels, 1)), tf.float32))
|
||||
|
||||
return ph_images, ph_labels, ph_is_training, socs, train, accuracy, tf.train.Saver()
|
||||
|
||||
tab_images=[]
|
||||
tab_labels=[]
|
||||
|
||||
for dir in ["/usr/share/fonts/truetype/ubuntu-font-family/", "/usr/share/fonts/truetype/freefont/"]:
|
||||
for root, dirs, files in os.walk(dir):
|
||||
for file in files:
|
||||
if file.endswith("ttf"):
|
||||
print(root+"/"+file)
|
||||
for i in range(1, 10):
|
||||
for cpt in range(nbr):
|
||||
image=Image.new("L", (28, 28))
|
||||
draw=ImageDraw.Draw(image)
|
||||
font=ImageFont.truetype(root+"/"+file, np.random.randint(26, 32))
|
||||
text="{:d}".format(i)
|
||||
draw.text((np.random.randint(1, 10), np.random.randint(-4, 0)), text, font=font, fill=(255))
|
||||
image=np.array(image).reshape(28, 28, 1)
|
||||
tab_images.append(image)
|
||||
tab_labels.append(np.eye(10)[i])
|
||||
image_m=modif_image(image, 1.05+np.random.rand())
|
||||
tab_images.append(image_m)
|
||||
tab_labels.append(np.eye(10)[i])
|
||||
image=np.zeros((28, 28, 1))
|
||||
for cpt in range(3*nbr):
|
||||
image_m=modif_image(image, 1.05+np.random.rand())
|
||||
tab_images.append(image_m)
|
||||
tab_labels.append(np.eye(10)[0])
|
||||
|
||||
tab_images=np.array(tab_images)
|
||||
tab_labels=np.array(tab_labels)
|
||||
|
||||
tab_images=tab_images/255
|
||||
|
||||
tab_images, tab_labels=shuffle(tab_images, tab_labels)
|
||||
|
||||
if False: # Changer en True si vous voulez voir les images générées
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow('chiffre', tab_images[i].reshape(28, 28, 1))
|
||||
print(tab_labels[i], np.argmax(tab_labels[i]))
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
|
||||
print("Nbr:", len(tab_images))
|
||||
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_images, tab_labels, test_size=0.10)
|
||||
|
||||
images, labels, is_training, sortie, train, accuracy, saver=ia(10, 28, 1)
|
||||
|
||||
with tf.Session() as s:
|
||||
s.run(tf.global_variables_initializer())
|
||||
tab_train=[]
|
||||
tab_test=[]
|
||||
for id_entrainement in np.arange(nbr_entrainement):
|
||||
print("> Entrainement", id_entrainement)
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
s.run(train, feed_dict={
|
||||
images: train_images[batch:batch+taille_batch],
|
||||
labels: train_labels[batch:batch+taille_batch],
|
||||
is_training: True
|
||||
})
|
||||
print(" entrainement OK")
|
||||
tab_accuracy_train=[]
|
||||
for batch in np.arange(0, len(train_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
images: train_images[batch:batch+taille_batch],
|
||||
labels: train_labels[batch:batch+taille_batch],
|
||||
is_training: True
|
||||
})
|
||||
tab_accuracy_train.append(p)
|
||||
print(" train:", np.mean(tab_accuracy_train))
|
||||
tab_accuracy_test=[]
|
||||
for batch in np.arange(0, len(test_images), taille_batch):
|
||||
p=s.run(accuracy, feed_dict={
|
||||
images: test_images[batch:batch+taille_batch],
|
||||
labels: test_labels[batch:batch+taille_batch],
|
||||
is_training: True
|
||||
})
|
||||
tab_accuracy_test.append(p)
|
||||
print(" test :", np.mean(tab_accuracy_test))
|
||||
tab_train.append(1-np.mean(tab_accuracy_train))
|
||||
tab_test.append(1-np.mean(tab_accuracy_test))
|
||||
saver.save(s, './mon_modele/modele')
|
||||
7
Divers/tutoriel20/README.md
Normal file
7
Divers/tutoriel20/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Tutoriel 20
|
||||
## Dlib: Detection et évaluation de la position de la tête
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=ibuEFfpVWlU
|
||||
|
||||
|
||||

|
||||
113
Divers/tutoriel20/cube.py
Normal file
113
Divers/tutoriel20/cube.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import dlib
|
||||
import math
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
#cap=cv2.VideoCapture("debat.webm")
|
||||
|
||||
detector=dlib.get_frontal_face_detector()
|
||||
predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
|
||||
|
||||
def tr(c, o, coeff):
|
||||
return(int((c-o)*coeff)+o)
|
||||
|
||||
def cube(image, pt1, pt2, a1, a2, a3):
|
||||
color=(0, 255, 0)
|
||||
epaisseur=2
|
||||
offset=1.6
|
||||
offset2=2
|
||||
|
||||
d_eyes=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(45).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(45).y, 2))
|
||||
|
||||
ox1=int((-(pt2.y-pt1.y)+pt2.x-pt1.x)/2)+pt1.x
|
||||
oy1=int(((pt2.x-pt1.x+pt2.y)-pt1.y)/2)+pt1.y
|
||||
|
||||
cv2.line(image,
|
||||
(tr(pt1.x, ox1, offset), tr(pt1.y, oy1, offset)),
|
||||
(tr(pt2.x, ox1, offset), tr(pt2.y, oy1, offset)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(pt2.x, ox1, offset), tr(pt2.y, oy1, offset)),
|
||||
(tr(-(pt2.y-pt1.y)+pt2.x, ox1, offset), tr(pt2.x-pt1.x+pt2.y, oy1, offset)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(-(pt2.y-pt1.y)+pt2.x, ox1, offset), tr(pt2.x-pt1.x+pt2.y, oy1, offset)),
|
||||
(tr(-(pt2.y-pt1.y)+pt1.x, ox1, offset), tr(pt2.x-pt1.x+pt1.y, oy1, offset)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(-(pt2.y-pt1.y)+pt1.x, ox1, offset), tr(pt2.x-pt1.x+pt1.y, oy1, offset)),
|
||||
(tr(pt1.x, ox1, offset), tr(pt1.y, oy1, offset)),
|
||||
color, epaisseur)
|
||||
|
||||
ox2=int((-(pt2.y-pt1.y)+pt2.x-pt1.x)/2)+pt1.x+int(a2)
|
||||
oy2=int(((pt2.x-pt1.x+pt2.y)-pt1.y)/2)+pt1.y+int(a3)
|
||||
|
||||
cv2.line(image,
|
||||
(tr(pt1.x+a2, ox2, offset2), tr(pt1.y+a3, oy2, offset2)),
|
||||
(tr(pt2.x+a2, ox2, offset2), tr(pt2.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(pt2.x+a2, ox2, offset2), tr(pt2.y+a3, oy2, offset2)),
|
||||
(tr(-(pt2.y-pt1.y)+pt2.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt2.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(-(pt2.y-pt1.y)+pt2.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt2.y+a3, oy2, offset2)),
|
||||
(tr(-(pt2.y-pt1.y)+pt1.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt1.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(-(pt2.y-pt1.y)+pt1.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt1.y+a3, oy2, offset2)),
|
||||
(tr(pt1.x+a2, ox2, offset2), tr(pt1.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
|
||||
cv2.line(image,
|
||||
(tr(pt1.x, ox1, offset), tr(pt1.y, oy1, offset)),
|
||||
(tr(pt1.x+a2, ox2, offset2), tr(pt1.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(pt2.x, ox1, offset), tr(pt2.y, oy1, offset)),
|
||||
(tr(pt2.x+a2, ox2, offset2), tr(pt2.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(-(pt2.y-pt1.y)+pt2.x, ox1, offset), tr(pt2.x-pt1.x+pt2.y, oy1, offset)),
|
||||
(tr(-(pt2.y-pt1.y)+pt2.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt2.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
cv2.line(image,
|
||||
(tr(-(pt2.y-pt1.y)+pt1.x, ox1, offset), tr(pt2.x-pt1.x+pt1.y, oy1, offset)),
|
||||
(tr(-(pt2.y-pt1.y)+pt1.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt1.y+a3, oy2, offset2)),
|
||||
color, epaisseur)
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
tickmark=cv2.getTickCount()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
faces=detector(gray)
|
||||
for face in faces:
|
||||
x1=face.left()
|
||||
y1=face.top()
|
||||
x2=face.right()
|
||||
y2=face.bottom()
|
||||
|
||||
landmarks=predictor(gray, face)
|
||||
|
||||
d_eyes=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(45).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(45).y, 2))
|
||||
d1=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(30).y, 2))
|
||||
d2=math.sqrt(math.pow(landmarks.part(45).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(45).y-landmarks.part(30).y, 2))
|
||||
coeff=d1+d2
|
||||
|
||||
a1=int(250*(landmarks.part(36).y-landmarks.part(45).y)/coeff)
|
||||
a2=int(250*(d1-d2)/coeff)
|
||||
cosb=min((math.pow(d2, 2)-math.pow(d1, 2)+math.pow(d_eyes, 2))/(2*d2*d_eyes), 1)
|
||||
a3=int(250*(d2*math.sin(math.acos(cosb))-coeff/4)/coeff)
|
||||
|
||||
cube(frame, landmarks.part(36), landmarks.part(45), a1, a2, a3)
|
||||
cv2.imshow("Frame", frame)
|
||||
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
1
|
||||
BIN
Divers/tutoriel20/dlib-landmark-mean.png
Normal file
BIN
Divers/tutoriel20/dlib-landmark-mean.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
66
Divers/tutoriel20/orientation.py
Normal file
66
Divers/tutoriel20/orientation.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import dlib
|
||||
import math
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
|
||||
detector=dlib.get_frontal_face_detector()
|
||||
predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
faces=detector(gray)
|
||||
if faces is not None:
|
||||
i=np.zeros(shape=(frame.shape), dtype=np.uint8)
|
||||
for face in faces:
|
||||
landmarks=predictor(gray, face)
|
||||
|
||||
d_eyes=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(45).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(45).y, 2))
|
||||
d1=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(30).y, 2))
|
||||
d2=math.sqrt(math.pow(landmarks.part(45).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(45).y-landmarks.part(30).y, 2))
|
||||
coeff=d1+d2
|
||||
|
||||
a1=int(250*(landmarks.part(36).y-landmarks.part(45).y)/coeff)
|
||||
a2=int(250*(d1-d2)/coeff)
|
||||
cosb=min((math.pow(d2, 2)-math.pow(d1, 2)+math.pow(d_eyes, 2))/(2*d2*d_eyes), 1)
|
||||
a3=int(250*(d2*math.sin(math.acos(cosb))-coeff/4)/coeff)
|
||||
|
||||
for n in range(0, 68):
|
||||
x=landmarks.part(n).x
|
||||
y=landmarks.part(n).y
|
||||
if n==30 or n==36 or n==45:
|
||||
cv2.circle(i, (x, y), 3, (255, 255, 0), -1)
|
||||
else:
|
||||
cv2.circle(i, (x, y), 3, (255, 0, 0), -1)
|
||||
print("{:+05d} {:+05d} {:+05d}".format(a1, a2, a3))
|
||||
flag=1
|
||||
txt="Laurent regarde "
|
||||
if a2<-40:
|
||||
txt+="a droite "
|
||||
flag=0
|
||||
if a2>40:
|
||||
txt+="a gauche "
|
||||
flag=0
|
||||
if a3<-10:
|
||||
txt+="en haut "
|
||||
flag=0
|
||||
if a3>10:
|
||||
txt+="en bas "
|
||||
flag=0
|
||||
if flag:
|
||||
txt+="la camera "
|
||||
if a1<-40:
|
||||
txt+="et incline la tete a gauche "
|
||||
if a1>40:
|
||||
txt+="et incline la tete a droite "
|
||||
cv2.putText(frame, txt, (10, 30), cv2.FONT_HERSHEY_PLAIN, 1.2, (255, 255, 255), 2)
|
||||
cv2.imshow("Frame", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
35
Divers/tutoriel20/visage_point.py
Normal file
35
Divers/tutoriel20/visage_point.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import dlib
|
||||
import math
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
detector=dlib.get_frontal_face_detector()
|
||||
predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
tickmark=cv2.getTickCount()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
faces=detector(gray)
|
||||
for face in faces:
|
||||
landmarks=predictor(gray, face)
|
||||
i=np.zeros(shape=(frame.shape), dtype=np.uint8)
|
||||
for n in range(0, 68):
|
||||
x=landmarks.part(n).x
|
||||
y=landmarks.part(n).y
|
||||
cv2.circle(frame, (x, y), 3, (255, 0, 0), -1)
|
||||
if n==30 or n==36 or n==45:
|
||||
cv2.circle(i, (x, y), 3, (255, 255, 0), -1)
|
||||
else:
|
||||
cv2.circle(i, (x, y), 3, (255, 0, 0), -1)
|
||||
cv2.imshow("i", i)
|
||||
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
|
||||
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
|
||||
cv2.imshow("Frame", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
27
Divers/tutoriel20/visage_rectangle.py
Normal file
27
Divers/tutoriel20/visage_rectangle.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import dlib
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
detector=dlib.get_frontal_face_detector()
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
tickmark=cv2.getTickCount()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
faces=detector(gray)
|
||||
for face in faces:
|
||||
x1=face.left()
|
||||
y1=face.top()
|
||||
x2=face.right()
|
||||
y2=face.bottom()
|
||||
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
|
||||
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
|
||||
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
|
||||
cv2.imshow("Frame", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
8
Divers/tutoriel25-2/README.md
Normal file
8
Divers/tutoriel25-2/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Tutoriel 25
|
||||
## Lecture des panneaux de limitation de vitesse
|
||||
|
||||
Les vidéos de ce tutoriel sont disponibles aux adresses suivantes:<br>
|
||||
partie 1: https://www.youtube.com/watch?v=PvD5POjXw8Q <br>
|
||||
partie 2: https://www.youtube.com/watch?v=TYDi0SNCUr0 <br>
|
||||
partie 3: https://www.youtube.com/watch?v=fBysd-Y17Tw
|
||||
|
||||
89
Divers/tutoriel25-2/common.py
Normal file
89
Divers/tutoriel25-2/common.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import os
|
||||
import cv2
|
||||
|
||||
size=42
|
||||
dir_images_panneaux="images_panneaux"
|
||||
dir_images_autres_panneaux="images_autres_panneaux"
|
||||
dir_images_sans_panneaux="images_sans_panneaux"
|
||||
|
||||
def panneau_model(nbr_classes):
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Input(shape=(size, size, 3), dtype='float32'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.3))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.4))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(512, activation='relu'))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dense(nbr_classes, activation='softmax'))
|
||||
|
||||
return model
|
||||
|
||||
def is_panneau_model():
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Input(shape=(size, size, 3), dtype='float32'))
|
||||
|
||||
model.add(layers.Conv2D(64, 3, strides=1))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(256, activation='relu'))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dense(1, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau=[]
|
||||
tab_image_panneau=[]
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
files=os.listdir(dir_images_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image=cv2.imread(dir_images_panneaux+"/"+file)
|
||||
if size is not None:
|
||||
image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
82
Divers/tutoriel25-2/dataset.py
Normal file
82
Divers/tutoriel25-2/dataset.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
from multiprocessing import Pool
|
||||
import multiprocessing
|
||||
import random
|
||||
|
||||
def bruit(image_orig):
|
||||
h, w, c=image_orig.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image_orig+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def change_gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def modif_img(img):
|
||||
h, w, c=img.shape
|
||||
|
||||
r_color=[np.random.randint(255), np.random.randint(255), np.random.randint(255)]
|
||||
img=np.where(img==[142, 142, 142], r_color, img).astype(np.uint8)
|
||||
|
||||
if np.random.randint(3):
|
||||
k_max=3
|
||||
kernel_blur=np.random.randint(k_max)*2+1
|
||||
img=cv2.GaussianBlur(img, (kernel_blur, kernel_blur), 0)
|
||||
|
||||
M=cv2.getRotationMatrix2D((int(w/2), int(h/2)), random.randint(-10, 10), 1)
|
||||
img=cv2.warpAffine(img, M, (w, h))
|
||||
|
||||
if np.random.randint(2):
|
||||
a=int(max(w, h)/5)+1
|
||||
pts1=np.float32([[0, 0], [w, 0], [0, h], [w, h]])
|
||||
pts2=np.float32([[0+random.randint(-a, a), 0+random.randint(-a, a)], [w-random.randint(-a, a), 0+random.randint(-a, a)], [0+random.randint(-a, a), h-random.randint(-a, a)], [w-random.randint(-a, a), h-random.randint(-a, a)]])
|
||||
M=cv2.getPerspectiveTransform(pts1,pts2)
|
||||
img=cv2.warpPerspective(img, M, (w, h))
|
||||
|
||||
if np.random.randint(2):
|
||||
r=random.randint(0, 5)
|
||||
h2=int(h*0.9)
|
||||
w2=int(w*0.9)
|
||||
if r==0:
|
||||
img=img[0:w2, 0:h2]
|
||||
elif r==1:
|
||||
img=img[w-w2:w, 0:h2]
|
||||
elif r==2:
|
||||
img=img[0:w2, h-h2:h]
|
||||
elif r==3:
|
||||
img=img[w-w2:w, h-h2:h]
|
||||
img=cv2.resize(img, (h, w))
|
||||
|
||||
if np.random.randint(2):
|
||||
r=random.randint(1, int(max(w, h)*0.15))
|
||||
img=img[r:w-r, r:h-r]
|
||||
img=cv2.resize(img, (h, w))
|
||||
|
||||
if not np.random.randint(4):
|
||||
t=np.empty((h, w, c) , dtype=np.float32)
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
for k in range(c):
|
||||
t[i][j][k]=(i/h)
|
||||
M=cv2.getRotationMatrix2D((int(w/2), int(h/2)), np.random.randint(4)*90, 1)
|
||||
t=cv2.warpAffine(t, M, (w, h))
|
||||
img=(cv2.multiply((img/255).astype(np.float32), t)*255).astype(np.uint8)
|
||||
|
||||
img=change_gamma(img, random.uniform(0.6, 1.0), -np.random.randint(50))
|
||||
|
||||
if not np.random.randint(4):
|
||||
p=(15+np.random.randint(10))/100
|
||||
img=(img*p+50*(1-p)).astype(np.uint8)+np.random.randint(100)
|
||||
|
||||
img=bruit(img)
|
||||
|
||||
return img
|
||||
|
||||
def create_lot_img(image, nbr, nbr_thread=None):
|
||||
if nbr_thread is None:
|
||||
nbr_thread=multiprocessing.cpu_count()
|
||||
lot_original=np.repeat([image], nbr, axis=0)
|
||||
with Pool(nbr_thread) as p:
|
||||
lot_result=p.map(modif_img, lot_original)
|
||||
p.close()
|
||||
return lot_result
|
||||
40
Divers/tutoriel25-2/genere_fond.py
Normal file
40
Divers/tutoriel25-2/genere_fond.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
import common
|
||||
|
||||
video="videos/France_Motorway.mp4"
|
||||
|
||||
if not os.path.isdir(common.dir_images_sans_panneaux):
|
||||
os.mkdir(common.dir_images_sans_panneaux)
|
||||
|
||||
if not os.path.exists(video):
|
||||
print("Vidéo non présente:", video)
|
||||
quit()
|
||||
|
||||
cap=cv2.VideoCapture(video)
|
||||
|
||||
id=0
|
||||
nbr_image=100000
|
||||
|
||||
nbr_image_par_frame=int(100000/cap.get(cv2.CAP_PROP_FRAME_COUNT))+1
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
quit()
|
||||
h, w, c=frame.shape
|
||||
|
||||
for cpt in range(nbr_image_par_frame):
|
||||
x=random.randint(0, w-common.size)
|
||||
y=random.randint(0, h-common.size)
|
||||
img=frame[y:y+common.size, x:x+common.size]
|
||||
cv2.imwrite(common.dir_images_sans_panneaux+"/{:d}.png".format(id), img)
|
||||
id+=1
|
||||
if id==nbr_image:
|
||||
quit()
|
||||
|
||||
|
||||
|
||||
|
||||
29
Divers/tutoriel25-2/genere_panneaux.py
Normal file
29
Divers/tutoriel25-2/genere_panneaux.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
import cv2
|
||||
import common
|
||||
import dataset
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux, common.size)
|
||||
|
||||
tab_images=np.array([]).reshape(0, common.size, common.size, 3)
|
||||
tab_labels=[]
|
||||
|
||||
id=0
|
||||
for image in tab_image_panneau:
|
||||
lot=dataset.create_lot_img(image, 1000)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
tab_labels=np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
id+=1
|
||||
|
||||
tab_panneau=np.array(tab_panneau)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels).reshape([-1, 1])
|
||||
|
||||
tab_images, tab_labels=shuffle(tab_images, tab_labels)
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("panneau", tab_images[i])
|
||||
print("label", tab_labels[i], "panneau", tab_panneau[int(tab_labels[i])])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
quit()
|
||||
36
Divers/tutoriel25-2/houghcircles.py
Normal file
36
Divers/tutoriel25-2/houghcircles.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
param1=30
|
||||
param2=55
|
||||
dp=1.0
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp, 20, param1=param1, param2=param2, minRadius=10, maxRadius=50)
|
||||
if circles is not None:
|
||||
circles=np.around(circles).astype(np.int32)
|
||||
for i in circles[0, :]:
|
||||
if i[2]!=0:
|
||||
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 4)
|
||||
cv2.putText(frame, "[i|k]dp: {:4.2f} [o|l]param1: {:d} [p|m]param2: {:d}".format(dp, param1, param2), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('i'):
|
||||
dp=min(10, dp+0.1)
|
||||
if key==ord('k'):
|
||||
dp=max(0.1, dp-0.1)
|
||||
if key==ord('o'):
|
||||
param1=min(255, param1+1)
|
||||
if key==ord('l'):
|
||||
param1=max(1, param1-1)
|
||||
if key==ord('p'):
|
||||
param2=min(255, param2+1)
|
||||
if key==ord('m'):
|
||||
param2=max(1, param2-1)
|
||||
cv2.destroyAllWindows()
|
||||
70
Divers/tutoriel25-2/lire_panneau.py
Normal file
70
Divers/tutoriel25-2/lire_panneau.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
import common
|
||||
|
||||
th1=30
|
||||
th2=55
|
||||
|
||||
video_dir="dashcam Cedric"
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux)
|
||||
|
||||
model_is_panneau=common.is_panneau_model()
|
||||
checkpoint=tf.train.Checkpoint(model_is_panneau=model_is_panneau)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training_is_panneau/"))
|
||||
|
||||
model_panneau=common.panneau_model(len(tab_panneau))
|
||||
checkpoint=tf.train.Checkpoint(model_panneau=model_panneau)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training_panneau/"))
|
||||
|
||||
l=os.listdir(video_dir)
|
||||
random.shuffle(l)
|
||||
|
||||
for video in l:
|
||||
if not video.endswith("mp4"):
|
||||
continue
|
||||
cap=cv2.VideoCapture(video_dir+"/"+video)
|
||||
|
||||
print("video:", video)
|
||||
id_panneau=-1
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
break
|
||||
f_w, f_h, f_c=frame.shape
|
||||
frame=cv2.resize(frame, (int(f_h/1.5), int(f_w/1.5)))
|
||||
|
||||
image=frame[200:400, 700:1000]
|
||||
cv2.rectangle(frame, (700, 200), (1000, 400), (255, 255, 255), 1)
|
||||
gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=th1, param2=th2, minRadius=5, maxRadius=45)
|
||||
if circles is not None:
|
||||
circles=np.int16(np.around(circles))
|
||||
for i in circles[0,:]:
|
||||
if i[2]!=0:
|
||||
panneau=cv2.resize(image[max(0, i[1]-i[2]):i[1]+i[2], max(0, i[0]-i[2]):i[0]+i[2]], (common.size, common.size))/255
|
||||
cv2.imshow("panneau", panneau)
|
||||
prediction=model_is_panneau(np.array([panneau]), training=False)
|
||||
print("prediction", prediction)
|
||||
if prediction[0][0]>0.9:
|
||||
prediction=model_panneau(np.array([panneau]), training=False)
|
||||
id_panneau=np.argmax(prediction[0])
|
||||
print("panneau", prediction, id_panneau, tab_panneau[id_panneau])
|
||||
w, h, c=tab_image_panneau[id_panneau].shape
|
||||
if id_panneau!=-1:
|
||||
frame[0:h, 0:w, :]=tab_image_panneau[id_panneau]
|
||||
cv2.putText(frame, "fichier:"+video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('a'):
|
||||
for cpt in range(100):
|
||||
ret, frame=cap.read()
|
||||
if key==ord('f'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
135
Divers/tutoriel25-2/train_is_panneau.py
Normal file
135
Divers/tutoriel25-2/train_is_panneau.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
from sklearn.model_selection import train_test_split
|
||||
import cv2
|
||||
import os
|
||||
import common
|
||||
import time
|
||||
import dataset
|
||||
|
||||
batch_size=64
|
||||
nbr_entrainement=20
|
||||
|
||||
tab_images=np.array([]).reshape(0, common.size, common.size, 3)
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux, common.size)
|
||||
|
||||
if not os.path.exists(common.dir_images_autres_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(common.dir_images_autres_panneaux))
|
||||
|
||||
if not os.path.exists(common.dir_images_sans_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas:".format(common.dir_images_sans_panneaux))
|
||||
|
||||
nbr=0
|
||||
for image in tab_image_panneau:
|
||||
lot=dataset.create_lot_img(image, 12000)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
nbr+=len(lot)
|
||||
|
||||
tab_labels=np.full(nbr, 1)
|
||||
|
||||
print("Image panneaux:", nbr)
|
||||
|
||||
files=os.listdir(common.dir_images_autres_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide:".format(common.dir_images_autres_panneaux))
|
||||
|
||||
nbr=0
|
||||
for file in files:
|
||||
if file.endswith("png"):
|
||||
path=os.path.join(common.dir_images_autres_panneaux, file)
|
||||
image=cv2.resize(cv2.imread(path), (common.size, common.size), cv2.INTER_LANCZOS4)
|
||||
lot=dataset.create_lot_img(image, 700)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
nbr+=len(lot)
|
||||
|
||||
tab_labels=np.concatenate([tab_labels, np.full(nbr, 0)])
|
||||
|
||||
print("Image autres panneaux:", nbr)
|
||||
|
||||
nbr_np=int(len(tab_images)/2)
|
||||
print("nbr_np", nbr_np)
|
||||
|
||||
id=1
|
||||
nbr=0
|
||||
tab=[]
|
||||
for cpt in range(nbr_np):
|
||||
file=common.dir_images_sans_panneaux+"/{:d}.png".format(id)
|
||||
if not os.path.isfile(file):
|
||||
break
|
||||
image=cv2.resize(cv2.imread(file), (common.size, common.size))
|
||||
tab.append(image)
|
||||
id+=1
|
||||
nbr+=1
|
||||
|
||||
tab_images=np.concatenate([tab_images, tab])
|
||||
tab_labels=np.concatenate([tab_labels, np.full(nbr, 0)])
|
||||
print("Image sans panneaux:", nbr)
|
||||
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32).reshape([-1, 1])
|
||||
|
||||
tab_images, tab_labels=shuffle(tab_images, tab_labels)
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_images, tab_labels, test_size=0.10)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((train_images, train_labels)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)
|
||||
|
||||
print("train_images", len(train_images))
|
||||
print("test_images", len(test_images))
|
||||
print("nbr panneau", len(np.where(train_labels==0.)[1]), train_labels.shape)
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model_is_panneau(images)
|
||||
loss=loss_object(labels, predictions)
|
||||
gradients=tape.gradient(loss, model_is_panneau.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model_is_panneau.trainable_variables))
|
||||
train_loss(loss)
|
||||
train_accuracy(labels, predictions)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}, loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
train_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
train_loss.reset_states()
|
||||
train_accuracy.reset_states()
|
||||
test(test_ds)
|
||||
|
||||
def test(test_ds):
|
||||
start=time.time()
|
||||
for test_images, test_labels in test_ds:
|
||||
predictions=model_is_panneau(test_images)
|
||||
t_loss=loss_object(test_labels, predictions)
|
||||
test_loss(t_loss)
|
||||
test_accuracy(test_labels, predictions)
|
||||
message=' >>> Test: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(test_loss.result(),
|
||||
test_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
test_loss.reset_states()
|
||||
test_accuracy.reset_states()
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam()
|
||||
loss_object=tf.keras.losses.BinaryCrossentropy()
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
train_accuracy=tf.keras.metrics.BinaryAccuracy()
|
||||
test_loss=tf.keras.metrics.Mean()
|
||||
test_accuracy=tf.keras.metrics.BinaryAccuracy()
|
||||
model_is_panneau=common.is_panneau_model()
|
||||
checkpoint=tf.train.Checkpoint(model_is_panneau=model_is_panneau)
|
||||
|
||||
print("Entrainement")
|
||||
train(train_ds, nbr_entrainement)
|
||||
test(test_ds)
|
||||
|
||||
checkpoint.save(file_prefix="./training_is_panneau/is_panneau")
|
||||
95
Divers/tutoriel25-2/train_panneau.py
Normal file
95
Divers/tutoriel25-2/train_panneau.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
from sklearn.model_selection import train_test_split
|
||||
import cv2
|
||||
import os
|
||||
import time
|
||||
import common
|
||||
import dataset
|
||||
|
||||
batch_size=128
|
||||
nbr_entrainement=20
|
||||
|
||||
tab_images=np.array([]).reshape(0, common.size, common.size, 3)
|
||||
tab_labels=[]
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux, common.size)
|
||||
|
||||
id=0
|
||||
for image in tab_image_panneau:
|
||||
lot=dataset.create_lot_img(image, 12000)
|
||||
tab_images=np.concatenate((tab_images, lot))
|
||||
tab_labels=np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
id+=1
|
||||
|
||||
tab_panneau=np.array(tab_panneau)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32).reshape([-1, 1])
|
||||
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_images, tab_labels, test_size=0.10)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((train_images, train_labels)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)
|
||||
|
||||
print("train_images", len(train_images))
|
||||
print("test_images", len(test_images))
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model_panneau(images)
|
||||
loss=loss_object(labels, predictions)
|
||||
gradients=tape.gradient(loss, model_panneau.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model_panneau.trainable_variables))
|
||||
train_loss(loss)
|
||||
train_accuracy(labels, predictions)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
train_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
train_loss.reset_states()
|
||||
train_accuracy.reset_states()
|
||||
test(test_ds)
|
||||
|
||||
def test(test_ds):
|
||||
start=time.time()
|
||||
for test_images, test_labels in test_ds:
|
||||
predictions=model_panneau(test_images)
|
||||
t_loss=loss_object(test_labels, predictions)
|
||||
test_loss(t_loss)
|
||||
test_accuracy(test_labels, predictions)
|
||||
message=' >>> Test: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(test_loss.result(),
|
||||
test_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
test_loss.reset_states()
|
||||
test_accuracy.reset_states()
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam()
|
||||
loss_object=tf.keras.losses.SparseCategoricalCrossentropy()
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
train_accuracy=tf.keras.metrics.SparseCategoricalAccuracy()
|
||||
test_loss=tf.keras.metrics.Mean()
|
||||
test_accuracy=tf.keras.metrics.SparseCategoricalAccuracy()
|
||||
model_panneau=common.panneau_model(len(tab_panneau))
|
||||
checkpoint=tf.train.Checkpoint(model_panneau=model_panneau)
|
||||
|
||||
print("Entrainement")
|
||||
train(train_ds, nbr_entrainement)
|
||||
checkpoint.save(file_prefix="./training_panneau/panneau")
|
||||
|
||||
for i in range(len(test_images)):
|
||||
prediction=model_panneau(np.array([test_images[i]]))
|
||||
print("prediction", prediction, tab_panneau[np.argmax(prediction[0])])
|
||||
cv2.imshow("image", test_images[i])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
8
Divers/tutoriel25-3/README.md
Normal file
8
Divers/tutoriel25-3/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Tutoriel 25
|
||||
## Lecture des panneaux de limitation de vitesse
|
||||
|
||||
Les vidéos de ce tutoriel sont disponibles aux adresses suivantes:<br>
|
||||
partie 1: https://www.youtube.com/watch?v=PvD5POjXw8Q <br>
|
||||
partie 2: https://www.youtube.com/watch?v=TYDi0SNCUr0 <br>
|
||||
partie 3: https://www.youtube.com/watch?v=fBysd-Y17Tw
|
||||
|
||||
67
Divers/tutoriel25-3/common.py
Normal file
67
Divers/tutoriel25-3/common.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import os
|
||||
import cv2
|
||||
|
||||
size=42
|
||||
dir_images_panneaux="images_panneaux"
|
||||
dir_images_autres_panneaux="images_autres_panneaux"
|
||||
dir_images_sans_panneaux="images_sans_panneaux"
|
||||
|
||||
def panneau_model(nbr_classes):
|
||||
model=tf.keras.Sequential()
|
||||
|
||||
model.add(layers.Input(shape=(size, size, 3), dtype='float32'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(128, 3, strides=1))
|
||||
model.add(layers.Dropout(0.2))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.3))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.Conv2D(256, 3, strides=1))
|
||||
model.add(layers.Dropout(0.4))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Activation('relu'))
|
||||
|
||||
model.add(layers.MaxPool2D(pool_size=2, strides=2))
|
||||
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(512, activation='relu'))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.BatchNormalization())
|
||||
model.add(layers.Dense(nbr_classes, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau=[]
|
||||
tab_image_panneau=[]
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
files=os.listdir(dir_images_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image=cv2.imread(dir_images_panneaux+"/"+file)
|
||||
if size is not None:
|
||||
image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
82
Divers/tutoriel25-3/dataset.py
Normal file
82
Divers/tutoriel25-3/dataset.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
from multiprocessing import Pool
|
||||
import multiprocessing
|
||||
import random
|
||||
|
||||
def bruit(image_orig):
|
||||
h, w, c=image_orig.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image_orig+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def change_gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def modif_img(img):
|
||||
h, w, c=img.shape
|
||||
|
||||
r_color=[np.random.randint(255), np.random.randint(255), np.random.randint(255)]
|
||||
img=np.where(img==[142, 142, 142], r_color, img).astype(np.uint8)
|
||||
|
||||
if np.random.randint(3):
|
||||
k_max=3
|
||||
kernel_blur=np.random.randint(k_max)*2+1
|
||||
img=cv2.GaussianBlur(img, (kernel_blur, kernel_blur), 0)
|
||||
|
||||
M=cv2.getRotationMatrix2D((int(w/2), int(h/2)), random.randint(-10, 10), 1)
|
||||
img=cv2.warpAffine(img, M, (w, h))
|
||||
|
||||
if np.random.randint(2):
|
||||
a=int(max(w, h)/5)+1
|
||||
pts1=np.float32([[0, 0], [w, 0], [0, h], [w, h]])
|
||||
pts2=np.float32([[0+random.randint(-a, a), 0+random.randint(-a, a)], [w-random.randint(-a, a), 0+random.randint(-a, a)], [0+random.randint(-a, a), h-random.randint(-a, a)], [w-random.randint(-a, a), h-random.randint(-a, a)]])
|
||||
M=cv2.getPerspectiveTransform(pts1,pts2)
|
||||
img=cv2.warpPerspective(img, M, (w, h))
|
||||
|
||||
if np.random.randint(2):
|
||||
r=random.randint(0, 5)
|
||||
h2=int(h*0.9)
|
||||
w2=int(w*0.9)
|
||||
if r==0:
|
||||
img=img[0:w2, 0:h2]
|
||||
elif r==1:
|
||||
img=img[w-w2:w, 0:h2]
|
||||
elif r==2:
|
||||
img=img[0:w2, h-h2:h]
|
||||
elif r==3:
|
||||
img=img[w-w2:w, h-h2:h]
|
||||
img=cv2.resize(img, (h, w))
|
||||
|
||||
if np.random.randint(2):
|
||||
r=random.randint(1, int(max(w, h)*0.15))
|
||||
img=img[r:w-r, r:h-r]
|
||||
img=cv2.resize(img, (h, w))
|
||||
|
||||
if not np.random.randint(4):
|
||||
t=np.empty((h, w, c) , dtype=np.float32)
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
for k in range(c):
|
||||
t[i][j][k]=(i/h)
|
||||
M=cv2.getRotationMatrix2D((int(w/2), int(h/2)), np.random.randint(4)*90, 1)
|
||||
t=cv2.warpAffine(t, M, (w, h))
|
||||
img=(cv2.multiply((img/255).astype(np.float32), t)*255).astype(np.uint8)
|
||||
|
||||
img=change_gamma(img, random.uniform(0.6, 1.0), -np.random.randint(50))
|
||||
|
||||
if not np.random.randint(4):
|
||||
p=(15+np.random.randint(10))/100
|
||||
img=(img*p+50*(1-p)).astype(np.uint8)+np.random.randint(100)
|
||||
|
||||
img=bruit(img)
|
||||
|
||||
return img
|
||||
|
||||
def create_lot_img(image, nbr, nbr_thread=None):
|
||||
if nbr_thread is None:
|
||||
nbr_thread=multiprocessing.cpu_count()
|
||||
lot_original=np.repeat([image], nbr, axis=0)
|
||||
with Pool(nbr_thread) as p:
|
||||
lot_result=p.map(modif_img, lot_original)
|
||||
p.close()
|
||||
return lot_result
|
||||
40
Divers/tutoriel25-3/genere_fond.py
Normal file
40
Divers/tutoriel25-3/genere_fond.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
import common
|
||||
|
||||
video="videos/France_Motorway.mp4"
|
||||
|
||||
if not os.path.isdir(common.dir_images_sans_panneaux):
|
||||
os.mkdir(common.dir_images_sans_panneaux)
|
||||
|
||||
if not os.path.exists(video):
|
||||
print("Vidéo non présente:", video)
|
||||
quit()
|
||||
|
||||
cap=cv2.VideoCapture(video)
|
||||
|
||||
id=0
|
||||
nbr_image=100000
|
||||
|
||||
nbr_image_par_frame=int(100000/cap.get(cv2.CAP_PROP_FRAME_COUNT))+1
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
quit()
|
||||
h, w, c=frame.shape
|
||||
|
||||
for cpt in range(nbr_image_par_frame):
|
||||
x=random.randint(0, w-common.size)
|
||||
y=random.randint(0, h-common.size)
|
||||
img=frame[y:y+common.size, x:x+common.size]
|
||||
cv2.imwrite(common.dir_images_sans_panneaux+"/{:d}.png".format(id), img)
|
||||
id+=1
|
||||
if id==nbr_image:
|
||||
quit()
|
||||
|
||||
|
||||
|
||||
|
||||
29
Divers/tutoriel25-3/genere_panneaux.py
Normal file
29
Divers/tutoriel25-3/genere_panneaux.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
import cv2
|
||||
import common
|
||||
import dataset
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux, common.size)
|
||||
|
||||
tab_images=np.array([]).reshape(0, common.size, common.size, 3)
|
||||
tab_labels=[]
|
||||
|
||||
id=0
|
||||
for image in tab_image_panneau:
|
||||
lot=dataset.create_lot_img(image, 1000)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
tab_labels=np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
id+=1
|
||||
|
||||
tab_panneau=np.array(tab_panneau)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels).reshape([-1, 1])
|
||||
|
||||
tab_images, tab_labels=shuffle(tab_images, tab_labels)
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("panneau", tab_images[i])
|
||||
print("label", tab_labels[i], "panneau", tab_panneau[int(tab_labels[i])])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
quit()
|
||||
69
Divers/tutoriel25-3/lire_panneau.py
Normal file
69
Divers/tutoriel25-3/lire_panneau.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import tensorflow as tf
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
import common
|
||||
|
||||
th1=30
|
||||
th2=55
|
||||
|
||||
video_dir="d:/dashcam Cedric"
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux)
|
||||
|
||||
model_panneau=common.panneau_model(len(tab_panneau))
|
||||
checkpoint=tf.train.Checkpoint(model_panneau=model_panneau)
|
||||
checkpoint.restore(tf.train.latest_checkpoint("./training_panneau/"))
|
||||
|
||||
l=os.listdir(video_dir)
|
||||
random.shuffle(l)
|
||||
|
||||
for video in l:
|
||||
if not video.endswith("mp4"):
|
||||
continue
|
||||
cap=cv2.VideoCapture(video_dir+"/"+video)
|
||||
|
||||
print("video:", video)
|
||||
id_panneau=-1
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
break
|
||||
f_w, f_h, f_c=frame.shape
|
||||
frame=cv2.resize(frame, (int(f_h/1.5), int(f_w/1.5)))
|
||||
|
||||
image=frame[200:400, 700:1000]
|
||||
cv2.rectangle(frame, (700, 200), (1000, 400), (255, 255, 255), 1)
|
||||
|
||||
gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=th1, param2=th2, minRadius=5, maxRadius=45)
|
||||
if circles is not None:
|
||||
circles=np.int16(np.around(circles))
|
||||
for i in circles[0,:]:
|
||||
if i[2]!=0:
|
||||
panneau=cv2.resize(image[max(0, i[1]-i[2]):i[1]+i[2], max(0, i[0]-i[2]):i[0]+i[2]], (common.size, common.size))/255
|
||||
cv2.imshow("panneau", panneau)
|
||||
prediction=model_panneau(np.array([panneau]), training=False)
|
||||
print("Prediction:", prediction)
|
||||
if np.any(np.greater(prediction[0], 0.6)):
|
||||
id_panneau=np.argmax(prediction[0])
|
||||
print(" -> C'est un panneau:", tab_panneau[id_panneau], "KM/H")
|
||||
w, h, c=tab_image_panneau[id_panneau].shape
|
||||
else:
|
||||
print(" -> Ce n'est pas un panneau")
|
||||
if id_panneau!=-1:
|
||||
frame[0:h, 0:w, :]=tab_image_panneau[id_panneau]
|
||||
cv2.putText(frame, "fichier:"+video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('a'):
|
||||
for cpt in range(100):
|
||||
ret, frame=cap.read()
|
||||
if key==ord('f'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
140
Divers/tutoriel25-3/train_panneau.py
Normal file
140
Divers/tutoriel25-3/train_panneau.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
from sklearn.model_selection import train_test_split
|
||||
import cv2
|
||||
import os
|
||||
import time
|
||||
import common
|
||||
import dataset
|
||||
|
||||
batch_size=128
|
||||
nbr_entrainement=20
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux, common.size)
|
||||
|
||||
tab_images=np.array([]).reshape(0, common.size, common.size, 3)
|
||||
tab_labels=np.array([]).reshape(0, len(tab_image_panneau))
|
||||
|
||||
id=0
|
||||
for image in tab_image_panneau:
|
||||
lot=dataset.create_lot_img(image, 12000)
|
||||
tab_images=np.concatenate((tab_images, lot))
|
||||
tab_labels=np.concatenate([tab_labels, np.repeat([np.eye(len(tab_image_panneau))[id]], len(lot), axis=0)])
|
||||
id+=1
|
||||
|
||||
|
||||
files=os.listdir(common.dir_images_autres_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide:".format(common.dir_images_autres_panneaux))
|
||||
|
||||
nbr=0
|
||||
for file in files:
|
||||
if file.endswith("png"):
|
||||
path=os.path.join(common.dir_images_autres_panneaux, file)
|
||||
image=cv2.resize(cv2.imread(path), (common.size, common.size), cv2.INTER_LANCZOS4)
|
||||
lot=dataset.create_lot_img(image, 700)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
nbr+=len(lot)
|
||||
|
||||
tab_labels=np.concatenate([tab_labels, np.repeat([np.full(len(tab_image_panneau), 0)], nbr, axis=0)])
|
||||
|
||||
nbr_np=int(len(tab_images)/2)
|
||||
|
||||
id=1
|
||||
nbr=0
|
||||
tab=[]
|
||||
for cpt in range(nbr_np):
|
||||
file=common.dir_images_sans_panneaux+"/{:d}.png".format(id)
|
||||
if not os.path.isfile(file):
|
||||
break
|
||||
image=cv2.resize(cv2.imread(file), (common.size, common.size))
|
||||
tab.append(image)
|
||||
id+=1
|
||||
nbr+=1
|
||||
|
||||
tab_images=np.concatenate([tab_images, tab])
|
||||
tab_labels=np.concatenate([tab_labels, np.repeat([np.full(len(tab_image_panneau), 0)], nbr, axis=0)])
|
||||
|
||||
tab_panneau=np.array(tab_panneau)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels, dtype=np.float32) #.reshape([-1, 1])
|
||||
|
||||
train_images, test_images, train_labels, test_labels=train_test_split(tab_images, tab_labels, test_size=0.10)
|
||||
|
||||
train_ds=tf.data.Dataset.from_tensor_slices((train_images, train_labels)).batch(batch_size)
|
||||
test_ds=tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)
|
||||
|
||||
print("train_images", len(train_images))
|
||||
print("test_images", len(test_images))
|
||||
|
||||
@tf.function
|
||||
def train_step(images, labels):
|
||||
with tf.GradientTape() as tape:
|
||||
predictions=model_panneau(images)
|
||||
loss=my_loss(labels, predictions)
|
||||
gradients=tape.gradient(loss, model_panneau.trainable_variables)
|
||||
optimizer.apply_gradients(zip(gradients, model_panneau.trainable_variables))
|
||||
train_loss(loss)
|
||||
train_accuracy(labels, predictions)
|
||||
|
||||
def train(train_ds, nbr_entrainement):
|
||||
for entrainement in range(nbr_entrainement):
|
||||
start=time.time()
|
||||
for images, labels in train_ds:
|
||||
train_step(images, labels)
|
||||
message='Entrainement {:04d}: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(entrainement+1,
|
||||
train_loss.result(),
|
||||
train_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
train_loss.reset_states()
|
||||
train_accuracy.reset_states()
|
||||
test(test_ds)
|
||||
|
||||
def my_loss(labels, preds):
|
||||
labels_reshape=tf.reshape(labels, (-1, 1))
|
||||
preds_reshape=tf.reshape(preds, (-1, 1))
|
||||
result=loss_object(labels_reshape, preds_reshape)
|
||||
return result
|
||||
|
||||
def test(test_ds):
|
||||
start=time.time()
|
||||
for test_images, test_labels in test_ds:
|
||||
predictions=model_panneau(test_images)
|
||||
t_loss=my_loss(test_labels, predictions)
|
||||
test_loss(t_loss)
|
||||
test_accuracy(test_labels, predictions)
|
||||
message=' >>> Test: loss: {:6.4f}, accuracy: {:7.4f}%, temps: {:7.4f}'
|
||||
print(message.format(test_loss.result(),
|
||||
test_accuracy.result()*100,
|
||||
time.time()-start))
|
||||
test_loss.reset_states()
|
||||
test_accuracy.reset_states()
|
||||
|
||||
optimizer=tf.keras.optimizers.Adam()
|
||||
loss_object=tf.keras.losses.BinaryCrossentropy()
|
||||
train_loss=tf.keras.metrics.Mean()
|
||||
train_accuracy=tf.keras.metrics.BinaryAccuracy()
|
||||
test_loss=tf.keras.metrics.Mean()
|
||||
test_accuracy=tf.keras.metrics.BinaryAccuracy()
|
||||
model_panneau=common.panneau_model(len(tab_panneau))
|
||||
checkpoint=tf.train.Checkpoint(model_panneau=model_panneau)
|
||||
|
||||
print("Entrainement")
|
||||
train(train_ds, nbr_entrainement)
|
||||
checkpoint.save(file_prefix="./training_panneau/panneau")
|
||||
|
||||
quit()
|
||||
|
||||
for i in range(len(test_images)):
|
||||
prediction=model_panneau(np.array([test_images[i]]))
|
||||
print("prediction", prediction[0])
|
||||
if np.sum(prediction[0])<0.6:
|
||||
print("Ce n'est pas un panneau")
|
||||
else:
|
||||
print("C'est un panneau:", tab_panneau[np.argmax(prediction[0])])
|
||||
cv2.imshow("image", test_images[i])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
8
Divers/tutoriel25/README.md
Normal file
8
Divers/tutoriel25/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Tutoriel 25
|
||||
## Lecture des panneaux de limitation de vitesse
|
||||
|
||||
Les vidéos de ce tutoriel sont disponibles aux adresses suivantes:<br>
|
||||
partie 1: https://www.youtube.com/watch?v=PvD5POjXw8Q <br>
|
||||
partie 2: https://www.youtube.com/watch?v=TYDi0SNCUr0 <br>
|
||||
partie 3: https://www.youtube.com/watch?v=fBysd-Y17Tw
|
||||
|
||||
30
Divers/tutoriel25/common.py
Normal file
30
Divers/tutoriel25/common.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models
|
||||
import os
|
||||
import cv2
|
||||
|
||||
size=42
|
||||
dir_images_panneaux="images_panneaux"
|
||||
dir_images_autres_panneaux="images_autres_panneaux"
|
||||
dir_images_sans_panneaux="images_sans_panneaux"
|
||||
|
||||
def lire_images_panneaux(dir_images_panneaux, size=None):
|
||||
tab_panneau=[]
|
||||
tab_image_panneau=[]
|
||||
|
||||
if not os.path.exists(dir_images_panneaux):
|
||||
quit("Le repertoire d'image n'existe pas: {}".format(dir_images_panneaux))
|
||||
|
||||
files=os.listdir(dir_images_panneaux)
|
||||
if files is None:
|
||||
quit("Le repertoire d'image est vide: {}".format(dir_images_panneaux))
|
||||
|
||||
for file in sorted(files):
|
||||
if file.endswith("png"):
|
||||
tab_panneau.append(file.split(".")[0])
|
||||
image=cv2.imread(dir_images_panneaux+"/"+file)
|
||||
if size is not None:
|
||||
image=cv2.resize(image, (size, size), cv2.INTER_LANCZOS4)
|
||||
tab_image_panneau.append(image)
|
||||
|
||||
return tab_panneau, tab_image_panneau
|
||||
82
Divers/tutoriel25/dataset.py
Normal file
82
Divers/tutoriel25/dataset.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
from multiprocessing import Pool
|
||||
import multiprocessing
|
||||
import random
|
||||
|
||||
def bruit(image_orig):
|
||||
h, w, c=image_orig.shape
|
||||
n=np.random.randn(h, w, c)*random.randint(5, 30)
|
||||
return np.clip(image_orig+n, 0, 255).astype(np.uint8)
|
||||
|
||||
def change_gamma(image, alpha=1.0, beta=0.0):
|
||||
return np.clip(alpha*image+beta, 0, 255).astype(np.uint8)
|
||||
|
||||
def modif_img(img):
|
||||
h, w, c=img.shape
|
||||
|
||||
r_color=[np.random.randint(255), np.random.randint(255), np.random.randint(255)]
|
||||
img=np.where(img==[142, 142, 142], r_color, img).astype(np.uint8)
|
||||
|
||||
if np.random.randint(3):
|
||||
k_max=3
|
||||
kernel_blur=np.random.randint(k_max)*2+1
|
||||
img=cv2.GaussianBlur(img, (kernel_blur, kernel_blur), 0)
|
||||
|
||||
M=cv2.getRotationMatrix2D((int(w/2), int(h/2)), random.randint(-10, 10), 1)
|
||||
img=cv2.warpAffine(img, M, (w, h))
|
||||
|
||||
if np.random.randint(2):
|
||||
a=int(max(w, h)/5)+1
|
||||
pts1=np.float32([[0, 0], [w, 0], [0, h], [w, h]])
|
||||
pts2=np.float32([[0+random.randint(-a, a), 0+random.randint(-a, a)], [w-random.randint(-a, a), 0+random.randint(-a, a)], [0+random.randint(-a, a), h-random.randint(-a, a)], [w-random.randint(-a, a), h-random.randint(-a, a)]])
|
||||
M=cv2.getPerspectiveTransform(pts1,pts2)
|
||||
img=cv2.warpPerspective(img, M, (w, h))
|
||||
|
||||
if np.random.randint(2):
|
||||
r=random.randint(0, 5)
|
||||
h2=int(h*0.9)
|
||||
w2=int(w*0.9)
|
||||
if r==0:
|
||||
img=img[0:w2, 0:h2]
|
||||
elif r==1:
|
||||
img=img[w-w2:w, 0:h2]
|
||||
elif r==2:
|
||||
img=img[0:w2, h-h2:h]
|
||||
elif r==3:
|
||||
img=img[w-w2:w, h-h2:h]
|
||||
img=cv2.resize(img, (h, w))
|
||||
|
||||
if np.random.randint(2):
|
||||
r=random.randint(1, int(max(w, h)*0.15))
|
||||
img=img[r:w-r, r:h-r]
|
||||
img=cv2.resize(img, (h, w))
|
||||
|
||||
if not np.random.randint(4):
|
||||
t=np.empty((h, w, c) , dtype=np.float32)
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
for k in range(c):
|
||||
t[i][j][k]=(i/h)
|
||||
M=cv2.getRotationMatrix2D((int(w/2), int(h/2)), np.random.randint(4)*90, 1)
|
||||
t=cv2.warpAffine(t, M, (w, h))
|
||||
img=(cv2.multiply((img/255).astype(np.float32), t)*255).astype(np.uint8)
|
||||
|
||||
img=change_gamma(img, random.uniform(0.6, 1.0), -np.random.randint(50))
|
||||
|
||||
if not np.random.randint(4):
|
||||
p=(15+np.random.randint(10))/100
|
||||
img=(img*p+50*(1-p)).astype(np.uint8)+np.random.randint(100)
|
||||
|
||||
img=bruit(img)
|
||||
|
||||
return img
|
||||
|
||||
def create_lot_img(image, nbr, nbr_thread=None):
|
||||
if nbr_thread is None:
|
||||
nbr_thread=multiprocessing.cpu_count()
|
||||
lot_original=np.repeat([image], nbr, axis=0)
|
||||
with Pool(nbr_thread) as p:
|
||||
lot_result=p.map(modif_img, lot_original)
|
||||
p.close()
|
||||
return lot_result
|
||||
46
Divers/tutoriel25/extract_panneau.py
Normal file
46
Divers/tutoriel25/extract_panneau.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import cv2
|
||||
import os
|
||||
import numpy as np
|
||||
import random
|
||||
import common
|
||||
|
||||
video_dir="D:\dashcam Cedric"
|
||||
|
||||
l=os.listdir(video_dir)
|
||||
|
||||
for video in l:
|
||||
if not video.endswith("mp4"):
|
||||
continue
|
||||
cap=cv2.VideoCapture(video_dir+"/"+video)
|
||||
|
||||
print("video:", video)
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
if ret is False:
|
||||
break
|
||||
f_w, f_h, f_c=frame.shape
|
||||
frame=cv2.resize(frame, (int(f_h/1.5), int(f_w/1.5)))
|
||||
|
||||
image=frame[200:400, 700:1000]
|
||||
cv2.rectangle(frame, (700, 200), (1000, 400), (255, 255, 255), 1)
|
||||
|
||||
gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=30, param2=60, minRadius=5, maxRadius=45)
|
||||
if circles is not None:
|
||||
circles=np.int16(np.around(circles))
|
||||
for i in circles[0,:]:
|
||||
if i[2]!=0:
|
||||
panneau=cv2.resize(image[max(0, i[1]-i[2]):i[1]+i[2], max(0, i[0]-i[2]):i[0]+i[2]], (common.size, common.size))/255
|
||||
cv2.imshow("panneau", panneau)
|
||||
cv2.putText(frame, "fichier:"+video, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('a'):
|
||||
for cpt in range(100):
|
||||
ret, frame=cap.read()
|
||||
if key==ord('f'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
29
Divers/tutoriel25/genere_panneaux.py
Normal file
29
Divers/tutoriel25/genere_panneaux.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
from sklearn.utils import shuffle
|
||||
import cv2
|
||||
import common
|
||||
import dataset
|
||||
|
||||
tab_panneau, tab_image_panneau=common.lire_images_panneaux(common.dir_images_panneaux, common.size)
|
||||
|
||||
tab_images=np.array([]).reshape(0, common.size, common.size, 3)
|
||||
tab_labels=[]
|
||||
|
||||
id=0
|
||||
for image in tab_image_panneau:
|
||||
lot=dataset.create_lot_img(image, 1000)
|
||||
tab_images=np.concatenate([tab_images, lot])
|
||||
tab_labels=np.concatenate([tab_labels, np.full(len(lot), id)])
|
||||
id+=1
|
||||
|
||||
tab_panneau=np.array(tab_panneau)
|
||||
tab_images=np.array(tab_images, dtype=np.float32)/255
|
||||
tab_labels=np.array(tab_labels).reshape([-1, 1])
|
||||
|
||||
tab_images, tab_labels=shuffle(tab_images, tab_labels)
|
||||
|
||||
for i in range(len(tab_images)):
|
||||
cv2.imshow("panneau", tab_images[i])
|
||||
print("label", tab_labels[i], "panneau", tab_panneau[int(tab_labels[i])])
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
quit()
|
||||
36
Divers/tutoriel25/houghcircles.py
Normal file
36
Divers/tutoriel25/houghcircles.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
param1=30
|
||||
param2=55
|
||||
dp=1.0
|
||||
|
||||
cap=cv2.VideoCapture(0)
|
||||
|
||||
while True:
|
||||
ret, frame=cap.read()
|
||||
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
circles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp, 20, param1=param1, param2=param2, minRadius=10, maxRadius=50)
|
||||
if circles is not None:
|
||||
circles=np.around(circles).astype(np.int32)
|
||||
for i in circles[0, :]:
|
||||
if i[2]!=0:
|
||||
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 4)
|
||||
cv2.putText(frame, "[i|k]dp: {:4.2f} [o|l]param1: {:d} [p|m]param2: {:d}".format(dp, param1, param2), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1)
|
||||
cv2.imshow("Video", frame)
|
||||
key=cv2.waitKey(1)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
if key==ord('i'):
|
||||
dp=min(10, dp+0.1)
|
||||
if key==ord('k'):
|
||||
dp=max(0.1, dp-0.1)
|
||||
if key==ord('o'):
|
||||
param1=min(255, param1+1)
|
||||
if key==ord('l'):
|
||||
param1=max(1, param1-1)
|
||||
if key==ord('p'):
|
||||
param2=min(255, param2+1)
|
||||
if key==ord('m'):
|
||||
param2=max(1, param2-1)
|
||||
cv2.destroyAllWindows()
|
||||
7
Divers/tutoriel29/README.md
Normal file
7
Divers/tutoriel29/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Tutoriel 29
|
||||
## Segmentation d'image avec l'algorithme des K-moyennes
|
||||
|
||||
La vidéo de ce tutoriel est disponible l'adresse suivante:<br>
|
||||
https://www.youtube.com/watch?v=ytii3XvapRY
|
||||
|
||||
|
||||
46
Divers/tutoriel29/ishihara.py
Normal file
46
Divers/tutoriel29/ishihara.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from sklearn.cluster import KMeans
|
||||
import cv2
|
||||
import glob
|
||||
|
||||
k=2
|
||||
ESPACE="HSV"
|
||||
CH=[0, 2]
|
||||
size=200
|
||||
|
||||
for image in glob.glob('.\images\*.png'):
|
||||
print("Image:", image)
|
||||
|
||||
# Lecture et affichage de l'image
|
||||
img=cv2.imread(image)
|
||||
img=cv2.resize(img, (size, size))
|
||||
cv2.imshow("image", img)
|
||||
|
||||
# Changement d'espace colorimétrique
|
||||
img=cv2.cvtColor(img, eval("cv2.COLOR_BGR2"+ESPACE))
|
||||
X=img[:, :, CH].reshape(img.shape[0]*img.shape[1], len(CH))
|
||||
|
||||
# Graph 2D des couches A et B
|
||||
if len(CH)==2:
|
||||
plt.scatter(X[:,0], X[:,1], s=3)#, marker='+')
|
||||
plt.show()
|
||||
|
||||
# Algorithme K moyennes
|
||||
kmeans=KMeans(n_clusters=k)
|
||||
#kmeans.fit(X)
|
||||
pred=kmeans.fit_predict(X)
|
||||
|
||||
# Graph 2D des couches A et B après utilisation de l'algorithme K moyennes
|
||||
if len(CH)==2:
|
||||
plt.scatter(X[:,0], X[:,1], c=pred, s=3) #10, marker='+')
|
||||
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=50, c='red')
|
||||
plt.show()
|
||||
|
||||
# Affichage du résultat
|
||||
pred=pred.reshape(img.shape[0], img.shape[1])
|
||||
pred=pred/(k-1)
|
||||
cv2.imshow("kmeans", pred)
|
||||
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
35
Divers/tutoriel29/ishihara2.py
Normal file
35
Divers/tutoriel29/ishihara2.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from sklearn.cluster import KMeans
|
||||
import cv2
|
||||
import sys
|
||||
import glob
|
||||
|
||||
min_clusters=2
|
||||
max_clusters=4
|
||||
ESPACES=["YCrCb", "HSV", "LAB"]
|
||||
COUCHES=[[1], [0, 2], [1, 2]]
|
||||
size=200
|
||||
|
||||
for image in glob.glob('.\images\*.png'):
|
||||
print("Image: {} ".format(image), end='')
|
||||
tab=np.zeros([(len(ESPACES))*size, (max_clusters-min_clusters+1)*size], dtype=np.float32)
|
||||
img=cv2.imread(image)
|
||||
cv2.imshow("image", cv2.resize(img, (2*size, 2*size)))
|
||||
img=cv2.resize(img, (size, size))
|
||||
|
||||
for index in range(len(ESPACES)):
|
||||
img2=cv2.cvtColor(img, eval("cv2.COLOR_BGR2"+ESPACES[index]))
|
||||
X=img2[:, :, COUCHES[index]].reshape(img2.shape[0]*img2.shape[1], len(COUCHES[index]))
|
||||
for k in range(min_clusters, max_clusters+1):
|
||||
sys.stdout.write('.')
|
||||
sys.stdout.flush()
|
||||
kmeans=KMeans(n_clusters=k)
|
||||
pred=kmeans.fit_predict(X)
|
||||
pred=pred.reshape(img2.shape[0], img2.shape[1])
|
||||
pred=pred/(k-1)
|
||||
tab[index*size:(index+1)*size, (k-min_clusters)*size:(k-min_clusters)*size+size]=pred
|
||||
sys.stdout.write('\n')
|
||||
cv2.imshow("kmeans", tab)
|
||||
if cv2.waitKey()&0xFF==ord('q'):
|
||||
break
|
||||
5
Divers/tutoriel31/README.md
Normal file
5
Divers/tutoriel31/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Tutoriel 31
|
||||
## K-moyennes: coefficient silhouette
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=H_AW_lwvdDk
|
||||
|
||||
42
Divers/tutoriel31/silouhette.py
Normal file
42
Divers/tutoriel31/silouhette.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.figure import Figure
|
||||
from matplotlib.backends.backend_agg import FigureCanvas
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.datasets.samples_generator import make_blobs
|
||||
import cv2
|
||||
|
||||
cluster_std=1.30
|
||||
n_samples=300
|
||||
X, y=make_blobs(n_samples=n_samples, centers=5, cluster_std=cluster_std)
|
||||
|
||||
fig, (ax1, ax2)=plt.subplots(1, 2)
|
||||
canvas=FigureCanvas(fig)
|
||||
fig.set_size_inches(11, 6)
|
||||
|
||||
k=2
|
||||
while 1:
|
||||
ax1.cla()
|
||||
ax1.scatter(X[:,0], X[:,1], marker='+', c="#FF0000")
|
||||
|
||||
kmeans=KMeans(n_clusters=k)
|
||||
pred_y=kmeans.fit_predict(X)
|
||||
|
||||
ax2.cla()
|
||||
ax2.scatter(X[:,0], X[:,1], c=pred_y, marker='+')
|
||||
ax2.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=50, c='#0000FF')
|
||||
canvas.draw()
|
||||
|
||||
img=np.array(canvas.renderer.buffer_rgba())
|
||||
cv2.putText(img, "Nbr cluster={:02d} [p|m] nbr clusters [r] reset [q] quit".format(k), (250, 50), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
|
||||
|
||||
cv2.imshow("plot", img)
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('p'):
|
||||
k=min(99, k+1)
|
||||
if key==ord('m'):
|
||||
k=max(2, k-1)
|
||||
if key==ord('r'):
|
||||
X, y=make_blobs(n_samples=n_samples, centers=5, cluster_std=cluster_std)
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
43
Divers/tutoriel31/silouhette2.py
Normal file
43
Divers/tutoriel31/silouhette2.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.figure import Figure
|
||||
from matplotlib.backends.backend_agg import FigureCanvas
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.datasets.samples_generator import make_blobs
|
||||
import cv2
|
||||
import glob
|
||||
|
||||
k=5
|
||||
cluster_std=1.30
|
||||
n_samples=300
|
||||
X, y=make_blobs(n_samples=n_samples, centers=k, cluster_std=cluster_std)
|
||||
|
||||
fig, (ax1, ax2)=plt.subplots(1, 2)
|
||||
canvas=FigureCanvas(fig)
|
||||
fig.set_size_inches(10, 6)
|
||||
|
||||
while 1:
|
||||
ax1.cla()
|
||||
ax1.scatter(X[:,0], X[:,1], marker='+', c="#FF0000")
|
||||
ax1.set_title('Données')
|
||||
|
||||
wcss=[]
|
||||
for i in range(1, 11):
|
||||
kmeans=KMeans(n_clusters=i)
|
||||
kmeans.fit(X)
|
||||
wcss.append(kmeans.inertia_)
|
||||
|
||||
ax2.cla()
|
||||
ax2.plot(range(1, 11), wcss, c="#FF0000")
|
||||
ax2.set_title('WCSS pour "elbow method"')
|
||||
|
||||
canvas.draw()
|
||||
img=np.array(canvas.renderer.buffer_rgba())
|
||||
cv2.putText(img, "[r] reset [q] quit".format(k), (450, 40), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
|
||||
|
||||
cv2.imshow("plot", img)
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('r'):
|
||||
X, y=make_blobs(n_samples=n_samples, centers=k, cluster_std=cluster_std)
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
57
Divers/tutoriel31/silouhette3.py
Normal file
57
Divers/tutoriel31/silouhette3.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.figure import Figure
|
||||
from matplotlib.backends.backend_agg import FigureCanvas
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.datasets.samples_generator import make_blobs
|
||||
from sklearn.metrics import silhouette_score
|
||||
import cv2
|
||||
import glob
|
||||
|
||||
k=5
|
||||
cluster_std=1.30
|
||||
n_samples=300
|
||||
fig, ((ax1, ax2), (ax3, ax4))=plt.subplots(2, 2)
|
||||
canvas=FigureCanvas(fig)
|
||||
fig.set_size_inches(12, 8)
|
||||
|
||||
X, y=make_blobs(n_samples=n_samples, centers=k, cluster_std=cluster_std)
|
||||
|
||||
while 1:
|
||||
ax1.cla()
|
||||
ax1.plot(X[:,0], X[:,1], "+", c="#FF0000")
|
||||
ax1.set_title('Données')
|
||||
|
||||
wcss=[]
|
||||
tab_silhouette=[]
|
||||
for i in range(2, 11):
|
||||
kmeans=KMeans(n_clusters=i)
|
||||
cluster_labels=kmeans.fit_predict(X)
|
||||
wcss.append(kmeans.inertia_)
|
||||
tab_silhouette.append(silhouette_score(X, cluster_labels))
|
||||
|
||||
ax2.cla()
|
||||
ax2.plot(range(2, 11), wcss, c="#FF0000")
|
||||
ax2.set_title('WCSS pour "elbow method"')
|
||||
|
||||
ax3.cla()
|
||||
ax3.plot(range(2, 11), tab_silhouette, c="#FF0000")
|
||||
ax3.set_title('Coefficient silhouette')
|
||||
|
||||
kmeans=KMeans(n_clusters=np.argmax(tab_silhouette)+2)
|
||||
pred_y=kmeans.fit_predict(X)
|
||||
ax4.cla()
|
||||
ax4.scatter(X[:,0], X[:,1], c=pred_y, marker='+')
|
||||
ax4.set_title('Données + centre clusters')
|
||||
ax4.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=100, c="#0000FF")
|
||||
|
||||
canvas.draw()
|
||||
img=np.array(canvas.renderer.buffer_rgba())
|
||||
cv2.putText(img, "[r] reset [q] quit".format(k), (450, 40), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
|
||||
|
||||
cv2.imshow("plot", img)
|
||||
key=cv2.waitKey()&0xFF
|
||||
if key==ord('r'):
|
||||
X, y=make_blobs(n_samples=n_samples, centers=k, cluster_std=cluster_std)
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
115
Divers/tutoriel36-2/KalmanFilter.py
Normal file
115
Divers/tutoriel36-2/KalmanFilter.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import numpy as np
|
||||
|
||||
class KalmanFilter(object):
|
||||
def __init__(self, dt, point, box):
|
||||
self.dt=dt
|
||||
|
||||
# Vecteur d'etat initial
|
||||
self.E=np.matrix([[point[0]], [point[1]], [0], [0], [box[0]], [box[1]]])
|
||||
|
||||
# Matrice de transition
|
||||
self.A=np.matrix([[1, 0, self.dt, 0, 0, 0],
|
||||
[0, 1, 0, self.dt, 0, 0],
|
||||
[0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 1]])
|
||||
|
||||
# Matrice d'observation, on observe que x et y
|
||||
self.H=np.matrix([[1, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 1]])
|
||||
|
||||
v=1E-5
|
||||
#v=1
|
||||
self.Q=np.matrix([[v, 0, 0, 0, 0, 0],
|
||||
[0, v, 0, 0, 0, 0],
|
||||
[0, 0, v, 0, 0, 0],
|
||||
[0, 0, 0, v, 0, 0],
|
||||
[0, 0, 0, 0, v, 0],
|
||||
[0, 0, 0, 0, 0, v]])
|
||||
|
||||
v=1E-5
|
||||
#v=1
|
||||
self.R=np.matrix([[v, 0, 0, 0],
|
||||
[0, v, 0, 0],
|
||||
[0, 0, v, 0],
|
||||
[0, 0, 0, v]])
|
||||
|
||||
self.P=np.eye(self.A.shape[1])
|
||||
|
||||
def predict(self):
|
||||
self.E=np.dot(self.A, self.E)
|
||||
# Calcul de la covariance de l'erreur
|
||||
self.P=np.dot(np.dot(self.A, self.P), self.A.T)+self.Q
|
||||
return self.E
|
||||
|
||||
def update(self, z):
|
||||
# Calcul du gain de Kalman
|
||||
S=np.dot(self.H, np.dot(self.P, self.H.T))+self.R
|
||||
K=np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
|
||||
|
||||
# Correction / innovation
|
||||
self.E=np.round(self.E+np.dot(K, (z-np.dot(self.H, self.E))))
|
||||
I=np.eye(self.H.shape[1])
|
||||
self.P=(I-(K*self.H))*self.P
|
||||
|
||||
return self.E
|
||||
|
||||
class KalmanFilter_old(object):
|
||||
def __init__(self, dt, point, box):
|
||||
self.dt=dt
|
||||
|
||||
# Vecteur d'etat initial
|
||||
self.E=np.matrix([[point[0]], [point[1]], [0], [0], [box[0]], [box[1]], [0], [0]])
|
||||
|
||||
# Matrice de transition
|
||||
self.A=np.matrix([[1, 0, self.dt, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, self.dt, 0, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0, self.dt, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, self.dt],
|
||||
[0, 0, 0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 1]])
|
||||
|
||||
# Matrice d'observation, on observe que x et y
|
||||
self.H=np.matrix([[1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0]])
|
||||
|
||||
self.Q=np.matrix([[1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 1]])
|
||||
|
||||
self.R=np.matrix([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
self.P=np.eye(self.A.shape[1])
|
||||
|
||||
def predict(self):
|
||||
self.E=np.dot(self.A, self.E)
|
||||
# Calcul de la covariance de l'erreur
|
||||
self.P=np.dot(np.dot(self.A, self.P), self.A.T)+self.Q
|
||||
return self.E
|
||||
|
||||
def update(self, z):
|
||||
# Calcul du gain de Kalman
|
||||
S=np.dot(self.H, np.dot(self.P, self.H.T))+self.R
|
||||
K=np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
|
||||
|
||||
# Correction / innovation
|
||||
self.E=np.round(self.E+np.dot(K, (z-np.dot(self.H, self.E))))
|
||||
I=np.eye(self.H.shape[1])
|
||||
self.P=(I-(K*self.H))*self.P
|
||||
|
||||
return self.E
|
||||
4
Divers/tutoriel36-2/README.md
Normal file
4
Divers/tutoriel36-2/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Tutoriel 36
|
||||
## Filtre de Kalman partie 2
|
||||
|
||||
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=pR0TAFWnDdU
|
||||
33
Divers/tutoriel36-2/affiche_video_label.py
Normal file
33
Divers/tutoriel36-2/affiche_video_label.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
from numpy import genfromtxt
|
||||
import os
|
||||
import glob
|
||||
|
||||
datasets="2DMOT2015Labels/train/"
|
||||
dataset="PETS09-S2L1"
|
||||
|
||||
dir_images=datasets+"/"+dataset+"/img1/"
|
||||
fichier_label=datasets+"/"+dataset+"/gt/gt.txt"
|
||||
|
||||
if not os.path.exists(fichier_label):
|
||||
print("Le fichier de label n'existe pas ...", fichier)
|
||||
quit()
|
||||
|
||||
data=genfromtxt(fichier_label, delimiter=',')
|
||||
id_frame=0
|
||||
id_objet=0
|
||||
|
||||
for image in glob.glob(dir_images+"*.jpg"):
|
||||
frame=cv2.imread(image)
|
||||
|
||||
mask=data[:, 0]==id_frame
|
||||
for d in data[mask, :]:
|
||||
cv2.rectangle(frame, (int(d[2]), int(d[3])), (int(d[2]+d[4]), int(d[3]+d[5])), (0, 255, 0), 2)
|
||||
|
||||
cv2.imshow("frame", frame)
|
||||
|
||||
key=cv2.waitKey(70)&0xFF
|
||||
if key==ord('q'):
|
||||
quit()
|
||||
id_frame+=1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user