Initial commit

This commit is contained in:
2026-03-31 13:28:59 +02:00
commit 7ec43ca17d
314 changed files with 189852 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
# Tutoriel 20
## Dlib: Detection et évaluation de la position de la tête
La vidéo de ce tutoriel est disponible à l'adresse suivante: https://www.youtube.com/watch?v=ibuEFfpVWlU
![image](https://github.com/L42Project/Tutoriels/blob/master/Divers/tutoriel20/dlib-landmark-mean.png)

113
Divers/tutoriel20/cube.py Normal file
View File

@@ -0,0 +1,113 @@
import cv2
import numpy as np
import dlib
import math
cap=cv2.VideoCapture(0)
#cap=cv2.VideoCapture("debat.webm")
detector=dlib.get_frontal_face_detector()
predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def tr(c, o, coeff):
return(int((c-o)*coeff)+o)
def cube(image, pt1, pt2, a1, a2, a3):
color=(0, 255, 0)
epaisseur=2
offset=1.6
offset2=2
d_eyes=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(45).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(45).y, 2))
ox1=int((-(pt2.y-pt1.y)+pt2.x-pt1.x)/2)+pt1.x
oy1=int(((pt2.x-pt1.x+pt2.y)-pt1.y)/2)+pt1.y
cv2.line(image,
(tr(pt1.x, ox1, offset), tr(pt1.y, oy1, offset)),
(tr(pt2.x, ox1, offset), tr(pt2.y, oy1, offset)),
color, epaisseur)
cv2.line(image,
(tr(pt2.x, ox1, offset), tr(pt2.y, oy1, offset)),
(tr(-(pt2.y-pt1.y)+pt2.x, ox1, offset), tr(pt2.x-pt1.x+pt2.y, oy1, offset)),
color, epaisseur)
cv2.line(image,
(tr(-(pt2.y-pt1.y)+pt2.x, ox1, offset), tr(pt2.x-pt1.x+pt2.y, oy1, offset)),
(tr(-(pt2.y-pt1.y)+pt1.x, ox1, offset), tr(pt2.x-pt1.x+pt1.y, oy1, offset)),
color, epaisseur)
cv2.line(image,
(tr(-(pt2.y-pt1.y)+pt1.x, ox1, offset), tr(pt2.x-pt1.x+pt1.y, oy1, offset)),
(tr(pt1.x, ox1, offset), tr(pt1.y, oy1, offset)),
color, epaisseur)
ox2=int((-(pt2.y-pt1.y)+pt2.x-pt1.x)/2)+pt1.x+int(a2)
oy2=int(((pt2.x-pt1.x+pt2.y)-pt1.y)/2)+pt1.y+int(a3)
cv2.line(image,
(tr(pt1.x+a2, ox2, offset2), tr(pt1.y+a3, oy2, offset2)),
(tr(pt2.x+a2, ox2, offset2), tr(pt2.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(pt2.x+a2, ox2, offset2), tr(pt2.y+a3, oy2, offset2)),
(tr(-(pt2.y-pt1.y)+pt2.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt2.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(-(pt2.y-pt1.y)+pt2.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt2.y+a3, oy2, offset2)),
(tr(-(pt2.y-pt1.y)+pt1.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt1.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(-(pt2.y-pt1.y)+pt1.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt1.y+a3, oy2, offset2)),
(tr(pt1.x+a2, ox2, offset2), tr(pt1.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(pt1.x, ox1, offset), tr(pt1.y, oy1, offset)),
(tr(pt1.x+a2, ox2, offset2), tr(pt1.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(pt2.x, ox1, offset), tr(pt2.y, oy1, offset)),
(tr(pt2.x+a2, ox2, offset2), tr(pt2.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(-(pt2.y-pt1.y)+pt2.x, ox1, offset), tr(pt2.x-pt1.x+pt2.y, oy1, offset)),
(tr(-(pt2.y-pt1.y)+pt2.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt2.y+a3, oy2, offset2)),
color, epaisseur)
cv2.line(image,
(tr(-(pt2.y-pt1.y)+pt1.x, ox1, offset), tr(pt2.x-pt1.x+pt1.y, oy1, offset)),
(tr(-(pt2.y-pt1.y)+pt1.x+a2, ox2, offset2), tr(pt2.x-pt1.x+pt1.y+a3, oy2, offset2)),
color, epaisseur)
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=detector(gray)
for face in faces:
x1=face.left()
y1=face.top()
x2=face.right()
y2=face.bottom()
landmarks=predictor(gray, face)
d_eyes=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(45).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(45).y, 2))
d1=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(30).y, 2))
d2=math.sqrt(math.pow(landmarks.part(45).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(45).y-landmarks.part(30).y, 2))
coeff=d1+d2
a1=int(250*(landmarks.part(36).y-landmarks.part(45).y)/coeff)
a2=int(250*(d1-d2)/coeff)
cosb=min((math.pow(d2, 2)-math.pow(d1, 2)+math.pow(d_eyes, 2))/(2*d2*d_eyes), 1)
a3=int(250*(d2*math.sin(math.acos(cosb))-coeff/4)/coeff)
cube(frame, landmarks.part(36), landmarks.part(45), a1, a2, a3)
cv2.imshow("Frame", frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
1

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

View File

@@ -0,0 +1,66 @@
import cv2
import numpy as np
import dlib
import math
cap=cv2.VideoCapture(0)
detector=dlib.get_frontal_face_detector()
predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while True:
ret, frame=cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=detector(gray)
if faces is not None:
i=np.zeros(shape=(frame.shape), dtype=np.uint8)
for face in faces:
landmarks=predictor(gray, face)
d_eyes=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(45).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(45).y, 2))
d1=math.sqrt(math.pow(landmarks.part(36).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(36).y-landmarks.part(30).y, 2))
d2=math.sqrt(math.pow(landmarks.part(45).x-landmarks.part(30).x, 2)+math.pow(landmarks.part(45).y-landmarks.part(30).y, 2))
coeff=d1+d2
a1=int(250*(landmarks.part(36).y-landmarks.part(45).y)/coeff)
a2=int(250*(d1-d2)/coeff)
cosb=min((math.pow(d2, 2)-math.pow(d1, 2)+math.pow(d_eyes, 2))/(2*d2*d_eyes), 1)
a3=int(250*(d2*math.sin(math.acos(cosb))-coeff/4)/coeff)
for n in range(0, 68):
x=landmarks.part(n).x
y=landmarks.part(n).y
if n==30 or n==36 or n==45:
cv2.circle(i, (x, y), 3, (255, 255, 0), -1)
else:
cv2.circle(i, (x, y), 3, (255, 0, 0), -1)
print("{:+05d} {:+05d} {:+05d}".format(a1, a2, a3))
flag=1
txt="Laurent regarde "
if a2<-40:
txt+="a droite "
flag=0
if a2>40:
txt+="a gauche "
flag=0
if a3<-10:
txt+="en haut "
flag=0
if a3>10:
txt+="en bas "
flag=0
if flag:
txt+="la camera "
if a1<-40:
txt+="et incline la tete a gauche "
if a1>40:
txt+="et incline la tete a droite "
cv2.putText(frame, txt, (10, 30), cv2.FONT_HERSHEY_PLAIN, 1.2, (255, 255, 255), 2)
cv2.imshow("Frame", frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,35 @@
import cv2
import numpy as np
import dlib
import math
cap=cv2.VideoCapture(0)
detector=dlib.get_frontal_face_detector()
predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=detector(gray)
for face in faces:
landmarks=predictor(gray, face)
i=np.zeros(shape=(frame.shape), dtype=np.uint8)
for n in range(0, 68):
x=landmarks.part(n).x
y=landmarks.part(n).y
cv2.circle(frame, (x, y), 3, (255, 0, 0), -1)
if n==30 or n==36 or n==45:
cv2.circle(i, (x, y), 3, (255, 255, 0), -1)
else:
cv2.circle(i, (x, y), 3, (255, 0, 0), -1)
cv2.imshow("i", i)
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow("Frame", frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,27 @@
import cv2
import numpy as np
import dlib
cap=cv2.VideoCapture(0)
detector=dlib.get_frontal_face_detector()
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=detector(gray)
for face in faces:
x1=face.left()
y1=face.top()
x2=face.right()
y2=face.bottom()
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow("Frame", frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
cap.release()
cv2.destroyAllWindows()