Initial commit

This commit is contained in:
2026-03-31 13:28:59 +02:00
commit 7ec43ca17d
314 changed files with 189852 additions and 0 deletions

View File

@@ -0,0 +1,6 @@
# Tutoriel OpenCV
## Détection d'objet par soustraction
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=pkzT9MlICPE

Binary file not shown.

View File

@@ -0,0 +1,48 @@
import cv2
import numpy as np
import common
video='autoroute.mp4'
image_fond="img-0.png"
color_infos=(0, 0, 255)
nbr_old=0
vehicule=0
seuil=10
fond=common.moyenne_image(video, 100)
cv2.imshow('fond', fond.astype(np.uint8))
cap=cv2.VideoCapture(video)
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
mask=common.calcul_mask(frame, fond, seuil)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
nbr=0
for e in elements:
((x, y), rayon)=cv2.minEnclosingCircle(e)
if rayon>20:
cv2.circle(frame, (int(x), int(y)), 5, color_infos, 10)
nbr+=1
if nbr>nbr_old:
vehicule+=1
nbr_old=nbr
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f} Seuil: {:d}".format(fps, seuil), (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color_infos, 1)
cv2.imshow('video', frame)
cv2.imshow('mask', mask)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('p'):
seuil+=1
if key==ord('m'):
seuil-=1
if key==ord('a'):
for cpt in range(20):
ret, frame=cap.read()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,29 @@
import cv2
import numpy as np
def moyenne_image(video, nbr):
cap=cv2.VideoCapture(video)
tab_image=[]
for f in range(nbr):
ret, frame=cap.read()
if ret is False:
break
image=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
tab_image.append(image)
tab_image=np.array(tab_image)
cap.release()
return np.mean(tab_image, axis=0)
def calcul_mask(image, fond, seuil):
image=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
height, width=image.shape
mask=np.zeros([height, width], np.uint8)
image=image.astype(np.int32)
for y in range(height):
for x in range(width):
if abs(fond[y][x]-image[y][x])>seuil:
mask[y][x]=255
kernel=np.ones((5, 5), np.uint8)
mask=cv2.erode(mask, kernel, iterations=1)
mask=cv2.dilate(mask, kernel, iterations=3)
return mask

View File

@@ -0,0 +1,52 @@
import cv2
import numpy as np
import common
color_infos=(0, 0, 255)
xmin=90
xmax=510
ymin=315
ymax=360
video='autoroute.mp4'
nbr_old=0
vehicule=0
seuil=10
fond=common.moyenne_image(video, 500)
fond=fond[ymin:ymax, xmin:xmax]
cv2.imshow('fond', fond.astype(np.uint8))
fond=fond.astype(np.int32)
cap=cv2.VideoCapture(video)
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
mask=common.calcul_mask(frame[ymin:ymax, xmin:xmax], fond, seuil)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
nbr=0
for e in elements:
((x, y), rayon)=cv2.minEnclosingCircle(e)
if rayon>20:
cv2.circle(frame, (int(x)+xmin, int(y)+ymin), 5, color_infos, 10)
nbr+=1
if nbr>nbr_old:
vehicule+=1
nbr_old=nbr
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f} Seuil: {:d}".format(fps, seuil), (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color_infos, 1)
cv2.rectangle(frame, (xmin, ymin), (xmax+120, ymax), (255, 0, 0), 5)
cv2.rectangle(frame, (xmax, ymin), (xmax+120, ymax), (255, 0, 0), cv2.FILLED)
cv2.putText(frame, "{:04d}".format(vehicule), (xmax+10, ymin+35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 2)
cv2.imshow('video', frame)
cv2.imshow('mask', mask)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('p'):
seuil+=1
if key==ord('m'):
seuil-=1
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,87 @@
import cv2
import numpy as np
import common
color_infos=(0, 0, 255)
ymin=315
ymax=360
xmin1=110
xmax1=190
xmin2=250
xmax2=330
xmin3=380
xmax3=460
video='autoroute.mp4'
vehicule1=0
vehicule2=0
vehicule3=0
seuil=10
seuil2=100
fond=common.moyenne_image(video, 500)
fond=fond[ymin:ymax, xmin1:xmax3]
cv2.imshow('fond', fond.astype(np.uint8))
fond=fond.astype(np.int32)
cap=cv2.VideoCapture(video)
def calcul_mean(image):
height, width=image.shape
s=0
for y in range(height):
for x in range(width):
s+=image[y][x]
return s/(height*width)
old_1=0
old_2=0
old_3=0
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
mask=common.calcul_mask(frame[ymin:ymax, xmin1:xmax3], fond, seuil)
if calcul_mean(mask[0:ymax-ymin, 0:xmax1-xmin1])> seuil2:
if old_1==0:
vehicule1+=1
old_1=1
else:
old_1=0
if calcul_mean(mask[0:ymax-ymin, xmin2-xmin1:xmax2-xmin1])> seuil2:
if old_2==0:
vehicule2+=1
old_2=1
else:
old_2=0
if calcul_mean(mask[0:ymax-ymin, xmin3-xmin1:xmax3-xmin1])> seuil2:
if old_3==0:
vehicule3+=1
old_3=1
else:
old_3=0
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f} Seuil: {:d}".format(fps, seuil), (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color_infos, 1)
cv2.putText(frame, "{:04d} {:04d} {:04d}".format(vehicule1, vehicule2, vehicule3), (xmin1, ymin-10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 2)
cv2.rectangle(frame, (xmin1, ymin), (xmax1, ymax), (0, 0, 255) if old_1 else (255, 0, 0), 3)
cv2.rectangle(frame, (xmin2, ymin), (xmax2, ymax), (0, 0, 255) if old_2 else (255, 0, 0), 3)
cv2.rectangle(frame, (xmin3, ymin), (xmax3, ymax), (0, 0, 255) if old_3 else (255, 0, 0), 3)
cv2.imshow('video', frame)
cv2.imshow('mask', mask)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('p'):
seuil+=1
if key==ord('m'):
seuil-=1
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,9 @@
import cv2
import numpy as np
import common
image=common.moyenne_image('autoroute.mp4', 100)
cv2.imshow('fond', image.astype(np.uint8))
cv2.waitKey()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,7 @@
# Tutoriel OpenCV
## Détection de ligne avec Canny
Vous devez récupérer une vidéo car je ne peux pas la mettre sur le github.
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=bCElBNENCy4

View File

@@ -0,0 +1,53 @@
import cv2
import numpy as np
th1=75
th2=150
stop=0
k=3
cap=cv2.VideoCapture("autoroute.mp4")
while True:
if not stop:
ret, frame=cap.read()
if ret is False:
quit()
image=frame.copy()
cv2.putText(image, "[u|j]th1: {:d} [i|k]th2: {:d} [y|h]blur: {:d}".format(th1, th2, k), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1)
cv2.putText(image, "[a]>> [s]stop [q]quit", (10, 70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1)
cv2.imshow("image", image)
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if k!=1:
gray=cv2.blur(gray, (k, k))
gray_canny=cv2.Canny(gray, th1, th2)
cv2.imshow("blur", gray)
cv2.imshow("canny", gray_canny)
if not stop:
key=cv2.waitKey(10)&0xFF
else:
key=cv2.waitKey()
image=frame.copy()
if key==ord('q'):
break
if key==ord('y'):
k=min(255, k+2)
if key==ord('h'):
k=max(1, k-2)
if key==ord('u'):
th1=min(255, th1+1)
if key==ord('j'):
th1=max(0, th1-1)
if key==ord('i'):
th2=min(255, th2+1)
if key==ord('k'):
th2=max(0, th2-1)
if key==ord('s'):
stop=not stop
if key==ord('a'):
for cpt in range(200):
ret, frame=cap.read()
image=frame.copy()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,145 @@
import cv2
import numpy as np
import time
ymin=320
ymax=321
xmin1=80
xmax1=170
xmin2=410
xmax2=500
def point(capteur):
s1=len(capteur)-1
s2=len(capteur)-1
for i in range(len(capteur)):
if capteur[i]!=0:
s1=i
break
if s1!=len(capteur)-1:
for i in range(len(capteur)-1, s1-1, -1):
if capteur[i]!=0:
s2=i
break
return int((s1+s2)/2)
return -1
s1_old=0
s2_old=0
s1=0
s2=0
s1_time=0
s2_time=0
th1=75
th2=150
stop=0
k=1
cap=cv2.VideoCapture("autoroute.mp4")
while True:
if not stop:
ret, frame=cap.read()
if ret is False:
quit()
image=frame.copy()
gray1=cv2.cvtColor(image[ymin:ymax, xmin1:xmax1], cv2.COLOR_BGR2GRAY)
if k!=1:
gray1=cv2.blur(gray1, (k, k))
capteur1=cv2.Canny(gray1, th1, th2)
gray2=cv2.cvtColor(image[ymin:ymax, xmin2:xmax2], cv2.COLOR_BGR2GRAY)
if k!=1:
gray2=cv2.blur(gray2, (k, k))
capteur2=cv2.Canny(gray2, th1, th2)
cv2.rectangle(image, (xmin1, ymin), (xmax1, ymax), (0, 0, 255), 1)
cv2.rectangle(image, (xmin2, ymin), (xmax2, ymax), (0, 0, 255), 1)
s1=point(capteur1[0])
if s1!=-1:
cv2.circle(image, (s1+xmin1, ymin), 3, (0, 255, 0), 3)
s1_old=s1
s1_time=time.time()
else:
if time.time()-s1_time<1:
cv2.circle(image, (s1_old+xmin1, ymin), 3, (100, 255, 255), 3)
s1=s1_old
else:
s1=-1
s2=point(capteur2[0])
if s2!=-1:
cv2.circle(image, (s2+xmin2, ymin), 3, (0, 255, 0), 3)
s2_old=s2
s2_time=time.time()
else:
if time.time()-s2_time<1:
cv2.circle(image, (s2_old+xmin2, ymin), 3, (100, 255, 255), 3)
s2=s2_old
else:
s2=-1
if s1!=-1 and s2!=-1:
s2_=abs(xmax2-xmin2-s2)
if abs(s2_-s1)>20:
c=(0, max(0, 255-10*int(abs(s1-s2_)/2)), min(255, 10*int(abs(s1-s2_)/2)))
cv2.circle(image, (int((xmax2-xmin1)/2)+xmin1, ymax-25), 5, c, 7)
cv2.arrowedLine(image, (int((xmax2-xmin1)/2)+xmin1, ymax-25), (int((xmax2-xmin1)/2)+xmin1+2*int((s1-s2_)/2), ymax-25), c, 3, tipLength=0.4)
else:
cv2.putText(image, "OK", (int((xmax2-xmin1)/2)+xmin1-15, ymax-16), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0), 1)
cv2.putText(image, "[u|j]th1: {:d} [i|k]th2: {:d} [y|h]blur: {:d}".format(th1, th2, k), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1)
cv2.putText(image, "[a]>> [s]stop [q]quit", (10, 70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 1)
cv2.imshow("image", image)
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if k!=1:
gray=cv2.blur(gray, (k, k))
cv2.imshow("blur", gray)
gray_canny=cv2.Canny(gray, th1, th2)
cv2.imshow("canny", gray_canny)
if not stop:
key=cv2.waitKey(20)&0xFF
else:
key=cv2.waitKey()
image=frame.copy()
if key==ord('q'):
break
if key==ord('m'):
ymin+=1
ymax+=1
if key==ord('p'):
ymin-=1
ymax-=1
if key==ord('o'):
xmin1+=1
xmax1+=1
xmin2+=1
xmax2+=1
if key==ord('l'):
xmin1-=1
xmax1-=1
xmin2-=1
xmax2-=1
if key==ord('y'):
k=min(255, k+2)
if key==ord('h'):
k=max(1, k-2)
if key==ord('u'):
th1=min(255, th1+1)
if key==ord('j'):
th1=max(0, th1-1)
if key==ord('i'):
th2=min(255, th2+1)
if key==ord('k'):
th2=max(0, th2-1)
if key==ord('s'):
stop=not stop
if key==ord('a'):
for cpt in range(200):
ret, frame=cap.read()
image=frame.copy()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,37 @@
import cv2
import numpy as np
stop=0
cap=cv2.VideoCapture("autoroute.mp4")
while True:
if not stop:
ret, frame=cap.read()
if ret is False:
quit()
image=frame.copy()
cv2.imshow("image", image)
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
grad_x=cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=5)
cv2.imshow("grad x", grad_x)
grad_y=cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=5)
cv2.imshow("grad y", grad_y)
if not stop:
key=cv2.waitKey(10)&0xFF
else:
key=cv2.waitKey()
image=frame.copy()
if key==ord('q'):
break
if key==ord('s'):
stop=not stop
if key==ord('a'):
for cpt in range(200):
ret, frame=cap.read()
image=frame.copy()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,6 @@
# Tutoriel OpenCV
## Détection de mouvement
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=731tP7t1BOc

View File

@@ -0,0 +1,66 @@
import os
import sys
import time
import numpy as np
import cv2
cap=cv2.VideoCapture("chien.mp4") # Mettre votre video ou webcam!
kernel_blur=3
seuil=15
surface=6000
ret, originale=cap.read()
if ret is False:
quit()
originale=cv2.cvtColor(originale, cv2.COLOR_BGR2GRAY)
originale=cv2.GaussianBlur(originale, (kernel_blur, kernel_blur), 0)
kernel_dilate=np.ones((5, 5), np.uint8)
alarme=0
intrus=0
while True:
ret, frame=cap.read()
if ret is False:
quit()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_blur=cv2.GaussianBlur(gray, (kernel_blur, kernel_blur), 0)
mask=cv2.absdiff(originale, gray_blur)
mask=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)[1]
mask=cv2.dilate(mask, kernel_dilate, iterations=3)
contours, nada=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
frame_contour=frame.copy()
for c in contours:
if cv2.contourArea(c)<surface:
continue
cv2.drawContours(frame_contour, [c], 0, (0, 255, 0), 2)
x, y, w, h=cv2.boundingRect(c)
alarme=1
intrus=1
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
if intrus:
cv2.putText(frame, "INTRUS", (x, y-20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
originale=gray_blur
if alarme:
cv2.putText(frame, "ALARME", (10, 60), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
cv2.putText(frame, "[o|l]seuil: {:d} [p|m]blur: {:d} [i|k]surface: {:d}".format(seuil, kernel_blur, surface), (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 2)
cv2.imshow("frame", frame)
cv2.imshow("Mask", mask)
intrus=0
key=cv2.waitKey(30)&0xFF
if key==ord('q'):
break
if key==ord('p'):
kernel_blur=min(43, kernel_blur+2)
if key==ord('m'):
kernel_blur=max(1, kernel_blur-2)
if key==ord('o'):
seuil=min(255, seuil+1)
if key==ord('l'):
seuil=max(1, seuil-1)
if key==ord('i'):
surface+=1000
if key==ord('k'):
surface=max(1000, surface-1000)
if key==ord('a'):
alarme=0
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,55 @@
import os
import sys
import time
import numpy as np
import cv2
cap=cv2.VideoCapture(0)
#cap=cv2.VideoCapture("chien.mp4")
kernel_blur=5
seuil=15
surface=1000
ret, originale=cap.read()
originale=cv2.cvtColor(originale, cv2.COLOR_BGR2GRAY)
originale=cv2.GaussianBlur(originale, (kernel_blur, kernel_blur), 0)
kernel_dilate=np.ones((5, 5), np.uint8)
while True:
ret, frame=cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray, (kernel_blur, kernel_blur), 0)
mask=cv2.absdiff(originale, gray)
mask=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)[1]
mask=cv2.dilate(mask, kernel_dilate, iterations=3)
contours, nada=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
frame_contour=frame.copy()
for c in contours:
cv2.drawContours(frame_contour, [c], 0, (0, 255, 0), 5)
if cv2.contourArea(c)<surface:
continue
x, y, w, h=cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
originale=gray
cv2.putText(frame, "[o|l]seuil: {:d} [p|m]blur: {:d} [i|k]surface: {:d}".format(seuil, kernel_blur, surface), (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 255), 2)
cv2.imshow("frame", frame)
cv2.imshow("contour", frame_contour)
cv2.imshow("mask", mask)
intrus=0
key=cv2.waitKey(30)&0xFF
if key==ord('q'):
break
if key==ord('p'):
kernel_blur=min(43, kernel_blur+2)
if key==ord('m'):
kernel_blur=max(1, kernel_blur-2)
if key==ord('i'):
surface+=1000
if key==ord('k'):
surface=max(1000, surface-1000)
if key==ord('o'):
seuil=min(255, seuil+1)
if key==ord('l'):
seuil=max(1, seuil-1)
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,6 @@
# Tutoriel OpenCV
## Détection d'objet avec calcBackProject
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=nMJertuuO08

View File

@@ -0,0 +1,63 @@
import cv2
import numpy as np
from matplotlib import pyplot as plt
nbr_classes=180
seuil=30
objet=0
def click(event, x, y, flags, param):
global roi_hist, frame, objet
if event==cv2.EVENT_LBUTTONDBLCLK:
roi_x, roi_y, roi_w, roi_h=cv2.selectROI('ROI', frame, False, False)
roi=frame[roi_y: roi_y + roi_h, roi_x: roi_x + roi_w]
hsv_roi=cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_hist=cv2.calcHist([hsv_roi], [0], None, [nbr_classes], [0, nbr_classes])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
cv2.destroyWindow('ROI')
plt.clf()
plt.plot(roi_hist)
plt.show(block=False)
plt.pause(0.01)
objet=1
video=cv2.VideoCapture(0)
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', click)
while True:
ret, frame=video.read()
if ret is False:
quit()
if objet:
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask=cv2.calcBackProject([hsv], [0], roi_hist, [0, nbr_classes], 1)
_, mask2=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)
mask2=cv2.erode(mask2, None, iterations=3)
mask2=cv2.dilate(mask2, None, iterations=3)
cv2.imshow("Mask2", mask2)
image2=cv2.bitwise_and(frame, frame, mask=mask2)
cv2.imshow("Image2", image2)
elements=cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
x,y,w,h=cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.imshow("Mask", mask)
cv2.putText(frame, "seuil[p|m]: {:d}".format(seuil), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
cv2.imshow("Camera", frame)
key=cv2.waitKey(1)
if key==ord('q'):
quit()
if key==ord('p'):
seuil=min(250, seuil+1)
if key==ord('m'):
seuil=max(1, seuil-1)
video.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,69 @@
import cv2
import numpy as np
from matplotlib import pyplot as plt
objet=0
nbr_classes=180
seuil=30
v1=0
def click(event, x, y, flags, param):
global roi_hist, frame, objet
if event==cv2.EVENT_LBUTTONDBLCLK:
roi_x, roi_y, roi_w, roi_h=cv2.selectROI('ROI', frame, False, False)
roi=frame[roi_y: roi_y + roi_h, roi_x: roi_x + roi_w]
hsv_roi=cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_hist=cv2.calcHist([hsv_roi], [0, 1], None, [nbr_classes, 256], [0, nbr_classes, 0, 256])
cv2.normalize(roi_hist[0, :], roi_hist[0, :], 0, 255, cv2.NORM_MINMAX)
cv2.normalize(roi_hist[1, :], roi_hist[1, :], 0, 255, cv2.NORM_MINMAX)
cv2.destroyWindow('ROI')
plt.clf()
plt.plot(roi_hist[0, :])
plt.plot(roi_hist[1, :])
plt.show(block=False)
plt.pause(0.01)
objet=1
video=cv2.VideoCapture(0)
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', click)
while True:
ret, frame=video.read()
if ret is False:
quit()
if objet:
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask=cv2.calcBackProject([hsv], [0, 1], roi_hist, [0, nbr_classes, v1, 256], 1)
_, mask2=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)
mask2=cv2.erode(mask2, None, iterations=3)
mask2=cv2.dilate(mask2, None, iterations=5)
elements=cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
x,y,w,h=cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0, 255, 0), 2)
cv2.imshow("Mask", mask)
cv2.imshow("Mask2", mask2)
image2=cv2.bitwise_and(frame, frame, mask=mask2)
cv2.imshow("Image2", image2)
cv2.putText(frame, "seuil[p|m]: {:d} v1[o|l]: {:d}".format(seuil, v1), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
cv2.imshow("Camera", frame)
key=cv2.waitKey(5)
if key==ord('q'):
quit()
if key==ord('p'):
seuil=min(250, seuil+1)
if key==ord('m'):
seuil=max(1, seuil-1)
if key==ord('o'):
v1=min(250, v1+1)
if key==ord('l'):
v1=max(0, v1-1)
video.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,44 @@
import cv2
import numpy as np
from matplotlib import pyplot as plt
video=cv2.VideoCapture(0)
mode=0
nbr_classes=180
while True:
ret, frame=video.read()
if ret is False:
quit()
if mode:
frame=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hist=cv2.calcHist([frame], [0], None, [nbr_classes], [0, 256])
label="Intensité"
else:
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hist=cv2.calcHist([hsv], [0], None, [nbr_classes], [0, 180])
label="Couleur"
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
cv2.putText(frame, "Mode[o] {:d} Nbr classes[p|m]: {:d}".format(mode, nbr_classes), (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
plt.clf()
plt.plot(hist)
plt.title(label)
plt.show(block=False)
cv2.imshow("image", frame)
plt.pause(0.001)
key=cv2.waitKey(5)&0xFF
if key==ord('q'):
quit()
if key==ord('o'):
mode=not mode
nbr_classes=(255 if mode else 180)
if key==ord('p'):
nbr_classes*=2
if key==ord('m'):
nbr_classes//=2
video.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,6 @@
# Tutoriel OpenCV
## Suivi d'objet avec meanShift et CamShift
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=c4msQJACeX0

View File

@@ -0,0 +1,62 @@
import cv2
import numpy as np
from matplotlib import pyplot as plt
objet=0
nbr_classes=180
seuil=30
term_criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 1.0)
mode=0
def click(event, x, y, flags, param):
global roi_x, roi_y, roi_w, roi_h, roi_hist, frame, objet
if event==cv2.EVENT_LBUTTONDBLCLK:
roi_x, roi_y, roi_w, roi_h=cv2.selectROI('ROI', frame, False, False)
roi=frame[roi_y: roi_y + roi_h, roi_x: roi_x + roi_w]
hsv_roi=cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_hist=cv2.calcHist([hsv_roi], [0], None, [nbr_classes], [0, nbr_classes])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
cv2.destroyWindow('ROI')
plt.clf()
plt.plot(roi_hist)
plt.show(block=False)
plt.pause(0.01)
objet=1
video=cv2.VideoCapture(0)
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', click)
while True:
ret, frame=video.read()
if objet:
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask=cv2.calcBackProject([hsv], [0], roi_hist, [0, nbr_classes], 1)
_, mask=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)
mask=cv2.erode(mask, None, iterations=3)
mask=cv2.dilate(mask, None, iterations=3)
if mode:
_, rect=cv2.CamShift(mask, (roi_x, roi_y, roi_w, roi_h), term_criteria)
pts=cv2.boxPoints(_)
pts=np.int0(pts)
img2=cv2.polylines(frame, [pts], True, (255, 255, 255), 2)
else:
_, rect=cv2.meanShift(mask, (roi_x, roi_y, roi_w, roi_h), term_criteria)
roi_x, roi_y, w, h=rect
cv2.rectangle(frame, (roi_x, roi_y), (roi_x + w, roi_y + h), (255, 255, 255), 2)
cv2.imshow("Mask", mask)
cv2.putText(frame, "seuil[p|m]: {:d} mode[o]:{}".format(seuil, "CamShift" if mode else "meanshift"), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
cv2.imshow("Camera", frame)
key=cv2.waitKey(10)&0xFF
if key==ord('q'):
quit()
if key==ord('p'):
seuil=min(250, seuil+1)
if key==ord('m'):
seuil=max(1, seuil-1)
if key==ord('o'):
mode=not mode
video.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,53 @@
import cv2
import numpy as np
from matplotlib import pyplot as plt
objet=0
nbr_classes=180
seuil=30
term_criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1.0)
def click(event, x, y, flags, param):
global roi_x, roi_y, roi_w, roi_h, roi_hist, frame, objet
if event==cv2.EVENT_LBUTTONDBLCLK:
roi_x, roi_y, roi_w, roi_h=cv2.selectROI('ROI', frame, False, False)
roi=frame[roi_y: roi_y + roi_h, roi_x: roi_x + roi_w]
hsv_roi=cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_hist=cv2.calcHist([hsv_roi], [0], None, [nbr_classes], [0, nbr_classes])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
cv2.destroyWindow('ROI')
plt.clf()
plt.plot(roi_hist)
plt.show(block=False)
plt.pause(0.01)
objet=1
video=cv2.VideoCapture(0)
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', click)
while True:
ret, frame=video.read()
if objet:
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask=cv2.calcBackProject([hsv], [0], roi_hist, [0, nbr_classes], 1)
_, mask=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)
mask=cv2.erode(mask, None, iterations=3)
mask=cv2.dilate(mask, None, iterations=3)
_, rect=cv2.meanShift(mask, (roi_x, roi_y, roi_w, roi_h), term_criteria)
roi_x, roi_y, w, h=rect
cv2.rectangle(frame, (roi_x, roi_y), (roi_x + w, roi_y + h), (255, 255, 255), 2)
cv2.imshow("Mask", mask)
cv2.putText(frame, "seuil[p|m]: {:d}".format(seuil), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
cv2.imshow("Camera", frame)
key=cv2.waitKey(10)&0xFF
if key==ord('q'):
quit()
if key==ord('p'):
seuil=min(250, seuil+1)
if key==ord('m'):
seuil=max(1, seuil-1)
video.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,70 @@
import cv2
import numpy as np
from matplotlib import pyplot as plt
objet=0
nbr_classes=180
seuil=30
term_criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 1.0)
def click(event, x, y, flags, param):
global roi_x, roi_y, roi_w, roi_h, roi_hist, frame, objet
if event==cv2.EVENT_LBUTTONDBLCLK:
roi_x, roi_y, roi_w, roi_h=cv2.selectROI('ROI', frame, False, False)
roi=frame[roi_y: roi_y + roi_h, roi_x: roi_x + roi_w]
roi_x-=30
roi_y-=30
roi_w+=60
roi_h+=60
hsv_roi=cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roi_hist=cv2.calcHist([hsv_roi], [0], None, [nbr_classes], [0, nbr_classes])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
cv2.destroyWindow('ROI')
plt.clf()
plt.plot(roi_hist)
plt.show(block=False)
plt.pause(0.01)
objet=1
video=cv2.VideoCapture("foot.webm")
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', click)
while True:
ret, frame=video.read()
frame=cv2.resize(frame, (frame.shape[1]*3, frame.shape[0]*3))[200:1000, 400:2400] # A adapter selon la vidéo ou enlever !!!!
if objet:
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask=cv2.calcBackProject([hsv], [0], roi_hist, [0, nbr_classes], 1)
_, mask=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)
mask=cv2.erode(mask, None, iterations=3)
mask=cv2.dilate(mask, None, iterations=3)
_, rect=cv2.meanShift(mask, (roi_x, roi_y, roi_w, roi_h), term_criteria)
roi_x, roi_y, w, h=rect
if np.sum(mask[roi_y: roi_y + roi_h, roi_x: roi_x + roi_w])==0:
objet=0
cv2.putText(frame, "Marty McFly", (roi_x-40, roi_y-20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0), 1)
cv2.rectangle(mask, (roi_x, roi_y), (roi_x + w, roi_y + h), (255, 255, 255), 2)
cv2.imshow("Mask", mask)
cv2.putText(frame, "seuil[p|m]: {:d}".format(seuil), (10, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
cv2.imshow("Camera", frame)
key=cv2.waitKey(50)&0xFF
if key==ord('q'):
quit()
if key==ord('p'):
seuil=min(250, seuil+1)
if key==ord('m'):
seuil=max(1, seuil-1)
if key==ord('a'):
for cpt in range(1800):
ret, frame=video.read()
video.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,5 @@
# Tutoriel OpenCV
## Caméra de surveillance
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=2JaXsL0pUzo

108
OpenCV/tutoriel26/camera.py Normal file
View File

@@ -0,0 +1,108 @@
import os
import sys
import time
import numpy as np
import cv2
# mode global=1 local=0
mode=1
label_mode=["local", "global"]
kernel_blur=3
seuil=15
seuil_nbr_pixel=5000
dir_videos="d:\\enregistrements\\"
couleur_fond =(100, 100, 100)
couleur_infos =(255, 255, 255)
couleur_alarme =( 0, 0, 255)
couleur_fin_alarme=( 0, 128, 255)
cap=cv2.VideoCapture(1)
ret, originale=cap.read()
if ret is False:
quit()
hauteur, largeur, nbr_couche=originale.shape
tab_hauteur=50
originale=cv2.cvtColor(originale, cv2.COLOR_BGR2GRAY)
originale=cv2.GaussianBlur(originale, (kernel_blur, kernel_blur), 0)
kernel_dilate=np.ones((3, 3), np.uint8)
tab=np.zeros((largeur), dtype=np.int32)
image_finale=np.zeros((hauteur+tab_hauteur, largeur, nbr_couche), dtype=np.uint8)
fichier_video=None
if not os.path.isdir(dir_videos):
os.mkdir(dir_videos)
fin_mouvement=40
cpt_fin_mouvement=0
while True:
alarme=0
ret, frame=cap.read()
if ret is False:
quit()
image_finale[:hauteur, :, :]=frame
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_blur=cv2.GaussianBlur(gray, (kernel_blur, kernel_blur), 0)
mask=cv2.absdiff(originale, gray_blur)
mask=cv2.threshold(mask, seuil, 255, cv2.THRESH_BINARY)[1]
if mode==0:
mask=cv2.dilate(mask, kernel_dilate, iterations=2)
contours, hierarchy=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if hierarchy is not None:
c=max(contours, key=cv2.contourArea)
nbr_pixel=int(cv2.contourArea(c))
couleur=(0, 0, 255) if nbr_pixel>seuil_nbr_pixel else (0, 255, 0)
cv2.drawContours(image_finale, [c], 0, couleur, 3)
if nbr_pixel>seuil_nbr_pixel:
alarme=1
else:
nbr_pixel=int(np.sum(mask)/255)
if nbr_pixel>seuil_nbr_pixel:
alarme=1
cv2.rectangle(image_finale, (0, 0), (image_finale.shape[1], 30), couleur_fond, cv2.FILLED)
if alarme:
cv2.putText(image_finale, "ALARME", (image_finale.shape[1]-80, 20), cv2.FONT_HERSHEY_PLAIN, 1, couleur_alarme, 2)
if fichier_video is None:
fichier_video=dir_videos+time.strftime("%Y_%m_%d_%H_%M_%S")+".avi"
video=cv2.VideoWriter(fichier_video, cv2.VideoWriter_fourcc(*'DIVX'), 15, (largeur, hauteur))
video.write(frame)
cpt_fin_mouvement=fin_mouvement
else:
cpt_fin_mouvement=cpt_fin_mouvement-1
if fichier_video is not None:
if cpt_fin_mouvement==0:
video.release()
fichier_video=None
else:
cv2.putText(image_finale, "ALARME", (image_finale.shape[1]-80, 20), cv2.FONT_HERSHEY_PLAIN, 1, couleur_fin_alarme, 2)
video.write(frame)
txt="[p|m]Nbr pixel: {:d} [o]Mode:{} nbr pixel: {:06d}".format(seuil_nbr_pixel, label_mode[mode], nbr_pixel)
tab=np.roll(tab, 1)
tab[0]=int(nbr_pixel/300)
tab_image=np.full((tab_hauteur, largeur, 3), couleur_fond, dtype=np.float32)
ligne_seuil=int(seuil_nbr_pixel/300)
for i in range(largeur):
couleur=(0, 0, 255) if tab[i]>ligne_seuil else (0, 255, 0)
cv2.line(tab_image, (i, tab_hauteur), (i, tab_hauteur-tab[i]), couleur, 1)
cv2.line(tab_image, (0, tab_hauteur-ligne_seuil), (largeur, tab_hauteur-ligne_seuil), (0, 0, 255), 1)
image_finale[hauteur:, :, :]=tab_image
cv2.putText(image_finale, txt, (5, 20), cv2.FONT_HERSHEY_PLAIN, 1, couleur_infos, 2)
originale=gray_blur
cv2.imshow("Camera", image_finale)
cv2.imshow("Mask", mask)
key=cv2.waitKey(50)&0xFF
if key==ord('q'):
break
if key==ord('p'):
seuil_nbr_pixel+=100
if key==ord('m'):
seuil_nbr_pixel=max(100, seuil_nbr_pixel-100)
if key==ord('o'):
mode=not mode
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,7 @@
# Tutoriel OpenCV
## Laisser une trace
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=358qiW8nXpw

View File

@@ -0,0 +1,48 @@
import cv2
import numpy as np
marge=15
def souris(event, x, y, flags, param):
global lo, hi, color
if event==cv2.EVENT_LBUTTONDBLCLK:
color=image[y, x][0]
if event==cv2.EVENT_MOUSEWHEEL:
if flags<0:
if color>5:
color-=1
else:
if color<250:
color+=1
lo[0]=color-marge
hi[0]=color+marge
color=90
lo=np.array([color-marge, 50, 50])
hi=np.array([color+marge, 255,255])
color_info=(0, 255, 0)
cap=cv2.VideoCapture(0)
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', souris)
while True:
ret, frame=cap.read()
image=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image=cv2.blur(image, (5, 5))
mask=cv2.inRange(image, lo, hi)
mask=cv2.erode(mask, None, iterations=2)
mask=cv2.dilate(mask, None, iterations=8)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), radius)=cv2.minEnclosingCircle(c)
if radius>10:
cv2.circle(frame, (int(x), int(y)), 5, color_info, 10)
cv2.line(frame, (int(x), int(y)), (int(x)+150, int(y)), color_info, 2)
cv2.putText(frame, "Objet !!!", (int(x)+10, int(y) -10), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA)
cv2.putText(frame, "Couleur: {:d}".format(color), (10, 30), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA)
cv2.imshow('Camera', frame)
cv2.imshow('Mask', mask)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,75 @@
import cv2
import numpy as np
lo=np.array([60, 30, 30])
hi=np.array([100, 255, 255])
cap=cv2.VideoCapture(0)
width=cap.get(3)
height=cap.get(4)
nbr_point=100
tab_point=np.full((nbr_point, 2), -1, dtype=np.int32)
# mode 0 : point
# mode 1 : ligne
mode=1
degrade=1
taille_objet=15
def dessine_point(tab_point):
for i in range(len(tab_point)):
if tab_point[nbr_point-i-1, 0]!=-1:
if degrade:
couleur=(0, 255-2*(nbr_point-i-1), 0)
else:
couleur=(0, 255, 0)
cv2.circle(frame, (tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1]), 5, couleur, 10)
def dessine_ligne(tab_point):
old_x, old_y=(-1, -1)
for i in range(nbr_point):
if tab_point[nbr_point-i-1, 0]!=-1:
if old_x!=-1:
if degrade:
couleur=(0, 255-2*(nbr_point-i-1), 0)
else:
couleur=(0, 255, 0)
cv2.line(frame, (old_x, old_y), (tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1]), couleur, 10)
old_x, old_y=(tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1])
while True:
ret, frame=cap.read()
image=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image=cv2.blur(image, (5, 5))
mask=cv2.inRange(image, lo, hi)
mask=cv2.erode(mask, None, iterations=2)
mask=cv2.dilate(mask, None, iterations=4)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
tab_point=np.roll(tab_point, 1, axis=0)
tab_point[0]=[-1, -1]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), rayon)=cv2.minEnclosingCircle(c)
if rayon>taille_objet:
tab_point[0]=[int(x), int(y)]
if mode:
dessine_ligne(tab_point)
else:
dessine_point(tab_point)
cv2.rectangle(frame, (0, 0), (int(width), 30), (100, 100, 100), cv2.FILLED)
cv2.putText(frame, "Mode[m]: {:d} Degrade[p]: {:d}".format(mode, degrade), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 2)
cv2.imshow('Camera', frame)
cv2.imshow('Mask', mask)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('m'):
mode=not mode
if key==ord('p'):
degrade=not degrade
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,41 @@
import cv2
import numpy as np
lo=np.array([60, 30, 30])
hi=np.array([100, 255, 255])
cap=cv2.VideoCapture(0)
taille_objet=15
nbr_point=100
tab_point=np.full((nbr_point, 2), -1, dtype=np.int32)
while True:
ret, frame=cap.read()
image=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image=cv2.blur(image, (5, 5))
mask=cv2.inRange(image, lo, hi)
mask=cv2.erode(mask, None, iterations=2)
mask=cv2.dilate(mask, None, iterations=4)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
tab_point=np.roll(tab_point, 1, axis=0)
tab_point[0]=[-1, -1]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), rayon)=cv2.minEnclosingCircle(c)
if rayon>taille_objet:
tab_point[0]=[int(x), int(y)]
old_x, old_y=(-1, -1)
for i in range(nbr_point):
if tab_point[nbr_point-i-1, 0]!=-1:
if old_x!=-1:
cv2.line(frame, (old_x, old_y), (tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1]), (0, 255-2*(nbr_point-i-1), 0), 10)
#cv2.line(frame, (old_x, old_y), (tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1]), (0, 255, 0), 10)
old_x, old_y=(tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1])
cv2.imshow('Camera', frame)
cv2.imshow('Mask', mask)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,38 @@
import cv2
import numpy as np
lo=np.array([60, 30, 30])
hi=np.array([100, 255, 255])
cap=cv2.VideoCapture(0)
taille_objet=15
nbr_point=100
tab_point=np.full((nbr_point, 2), -1, dtype=np.int32)
while True:
ret, frame=cap.read()
image=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image=cv2.blur(image, (5, 5))
mask=cv2.inRange(image, lo, hi)
mask=cv2.erode(mask, None, iterations=2)
mask=cv2.dilate(mask, None, iterations=4)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
tab_point=np.roll(tab_point, 1, axis=0)
tab_point[0]=[-1, -1]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), rayon)=cv2.minEnclosingCircle(c)
if rayon>taille_objet:
tab_point[0]=[int(x), int(y)]
for i in range(nbr_point):
if tab_point[nbr_point-i-1, 0]!=-1:
cv2.circle(frame, (tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1]), 5, (0, 255, 0), 10)
#cv2.circle(frame, (tab_point[nbr_point-i-1, 0], tab_point[nbr_point-i-1, 1]), 5, (0, 255-2*(nbr_point-i-1), 0), 10)
cv2.imshow('Camera', frame)
cv2.imshow('Mask', mask)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,17 @@
# Tutoriel OpenCV
## Détection d'objet avec la fonction detectMultiScale
Si vous souhaitez me soutenir: <https://fr.tipeee.com/l42-project>
Pour utiliser OpenCV, il suffit d'installer le package suivant:
`# pip install opencv-python`
Je vous conseille aussi d'installer:
`# pip install opencv-contrib-python`
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=-3xbAkCWJCc

View File

@@ -0,0 +1,19 @@
import cv2
face_cascade=cv2.CascadeClassifier("./haarcascade_fullbody.xml")
cap=cv2.VideoCapture(0)
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3)
for x, y, w, h in face:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
if cv2.waitKey(1)&0xFF==ord('q'):
break
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow('video', frame)
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:57b6dc1aa21d50310d46a63a8cbd4ac47843c9195fac1be8259d6e1e604c4372
size 81001739

20
OpenCV/tutoriel3/cars.py Normal file
View File

@@ -0,0 +1,20 @@
import cv2
import numpy as np
object_cascade=cv2.CascadeClassifier("./cars.xml")
cap=cv2.VideoCapture('cars.mp4')
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
object=object_cascade.detectMultiScale(gray, scaleFactor=1.10, minNeighbors=3)
for x, y, w, h in object:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow('video', frame)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

3654
OpenCV/tutoriel3/cars.xml Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,19 @@
import cv2
face_cascade=cv2.CascadeClassifier("./haarcascade_frontalface_alt2.xml")
cap=cv2.VideoCapture(0)
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3)
for x, y, w, h in face:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
if cv2.waitKey(1)&0xFF==ord('q'):
break
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow('video', frame)
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,27 @@
import numpy as np
import cv2
import picamera
import picamera.array
WIDTH=640
HEIGHT=480
face_cascade=cv2.CascadeClassifier("./haarcascade_frontalface_alt2.xml")
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution=(WIDTH, HEIGHT)
while True:
camera.capture(stream, 'bgr', use_video_port=True)
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=4)
for x, y, w, h in face:
cv2.rectangle(stream.array, (x, y), (x+w, y+h), (255, 0, 0), 2)
if cv2.waitKey(1)&0xFF==ord('q'):
break
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(stream.array, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow('video', stream.array)
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,37 @@
import cv2
import operator
face_cascade=cv2.CascadeClassifier("./haarcascade_frontalface_alt2.xml")
profile_cascade=cv2.CascadeClassifier("./haarcascade_profileface.xml")
cap=cv2.VideoCapture(0)
width=int(cap.get(3))
marge=70
while True:
ret, frame=cap.read()
tab_face=[]
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4, minSize=(5, 5))
for x, y, w, h in face:
tab_face.append([x, y, x+w, y+h])
face=profile_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4)
for x, y, w, h in face:
tab_face.append([x, y, x+w, y+h])
gray2=cv2.flip(gray, 1)
face=profile_cascade.detectMultiScale(gray2, scaleFactor=1.2, minNeighbors=4)
for x, y, w, h in face:
tab_face.append([width-x, y, width-(x+w), y+h])
tab_face=sorted(tab_face, key=operator.itemgetter(0, 1))
index=0
for x, y, x2, y2 in tab_face:
if not index or (x-tab_face[index-1][0]>marge or y-tab_face[index-1][1]>marge):
cv2.rectangle(frame, (x, y), (x2, y2), (0, 0, 255), 2)
index+=1
if cv2.waitKey(1)&0xFF==ord('q'):
break
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow('video', frame)
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,43 @@
import cv2
import operator
import picamera
import picamera.array
face_cascade=cv2.CascadeClassifier("./haarcascade_frontalface_alt2.xml")
profile_cascade=cv2.CascadeClassifier("./haarcascade_profileface.xml")
marge=70
WIDTH=640
HEIGHT=480
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution=(WIDTH, HEIGHT)
while True:
camera.capture(stream, 'bgr', use_video_port=True)
tab_face=[]
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4, minSize=(5, 5))
for x, y, w, h in face:
tab_face.append([x, y, x+w, y+h])
face=profile_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4)
for x, y, w, h in face:
tab_face.append([x, y, x+w, y+h])
gray2=cv2.flip(gray, 1)
face=profile_cascade.detectMultiScale(gray2, scaleFactor=1.2, minNeighbors=4)
for x, y, w, h in face:
tab_face.append([WIDTH-x, y, WIDTH-(x+w), y+h])
tab_face=sorted(tab_face, key=operator.itemgetter(0, 1))
index=0
for x, y, x2, y2 in tab_face:
if not index or (x-tab_face[index-1][0]>marge or y-tab_face[index-1][1]>marge):
cv2.rectangle(stream.array, (x, y), (x2, y2), (0, 0, 255), 2)
index+=1
if cv2.waitKey(1)&0xFF==ord('q'):
break
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(stream.array, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv2.imshow('video', stream.array)
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,7 @@
# Tutoriel OpenCV
## Detecter des oeufs
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=iRqlGfReGG8

View File

@@ -0,0 +1,15 @@
import cv2
import numpy as np
import glob
fichiers=glob.glob("*.jfif")
for fichier in fichiers:
img=cv2.imread(fichier, 1)
edges=~cv2.Canny(img, 80, 150)
edges=cv2.erode(edges, None, iterations=3)
edges=cv2.dilate(edges, None, iterations=2)
cv2.imshow("image", img)
cv2.imshow("edges", edges)
cv2.waitKey()

View File

@@ -0,0 +1,28 @@
import cv2
import numpy as np
import glob
fichiers=glob.glob("*.jfif")
for fichier in fichiers:
img=cv2.imread(fichier, 1)
edges=~cv2.Canny(img, 80, 150)
edges=cv2.erode(edges, None, iterations=3)
edges=cv2.dilate(edges, None, iterations=2)
presence=0
elements=cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
for e in elements:
c=cv2.contourArea(e)
if c>150:
cv2.drawContours(img, [e], 0, (255, 0, 0), 2)
presence=1
if presence:
cv2.putText(img, "OK", (50, 50), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 0), 2, cv2.LINE_AA)
else:
cv2.putText(img, "KO", (50, 50), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow("image", img)
cv2.imshow("image2", edges)
cv2.waitKey()

View File

@@ -0,0 +1,34 @@
import cv2
import numpy as np
import glob
x_min=600
y_min=350
x_max=750
y_max=500
fichiers=glob.glob("poulailler*.jpg")
for fichier in fichiers:
img=cv2.imread(fichier, 1)
img_cible=img[y_min:y_max, x_min:x_max]
edges=~cv2.Canny(img_cible, 30, 80)
edges=cv2.erode(edges, None, iterations=3)
edges=cv2.dilate(edges, None, iterations=2)
presence=0
elements=cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
for e in elements:
c=cv2.contourArea(e)
if c>150:
presence=1
if presence:
cv2.putText(img, "OK", (50, 100), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 0), 2, cv2.LINE_AA)
else:
cv2.putText(img, "KO", (50, 100), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 255), 2, cv2.LINE_AA)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (0, 255, 0), 1)
cv2.imshow("image", img)
cv2.imshow("image2", edges)
cv2.waitKey()

View File

@@ -0,0 +1,15 @@
# Tutoriel OpenCV
## Détection de couleur/objet avec la fonction inRange
Si vous souhaitez me soutenir: <https://fr.tipeee.com/l42-project>
Pour utiliser OpenCV, il suffit d'installer le package suivant:
`# pip install opencv-python`
Je vous conseille aussi d'installer:
`# pip install opencv-contrib-python`
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=VKSHZGLg4zk

View File

@@ -0,0 +1,32 @@
import cv2
import numpy as np
lo=np.array([95, 100, 50])
hi=np.array([105, 255, 255])
color_infos=(0, 255, 255)
cap=cv2.VideoCapture(0)
while True:
ret, frame=cap.read()
image=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask=cv2.inRange(image, lo, hi)
image=cv2.blur(image, (7, 7))
mask=cv2.erode(mask, None, iterations=4)
mask=cv2.dilate(mask, None, iterations=4)
image2=cv2.bitwise_and(frame, frame, mask=mask)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), rayon)=cv2.minEnclosingCircle(c)
if rayon>30:
cv2.circle(image2, (int(x), int(y)), int(rayon), color_infos, 2)
cv2.circle(frame, (int(x), int(y)), 5, color_infos, 10)
cv2.line(frame, (int(x), int(y)), (int(x)+150, int(y)), color_infos, 2)
cv2.putText(frame, "Objet !!!", (int(x)+10, int(y) -10), cv2.FONT_HERSHEY_DUPLEX, 1, color_infos, 1, cv2.LINE_AA)
cv2.imshow('Camera', frame)
cv2.imshow('image2', image2)
cv2.imshow('Mask', mask)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,39 @@
import cv2
import numpy as np
import picamera
import picamera.array
lo=np.array([95, 100, 50])
hi=np.array([105, 255, 255])
color_infos=(0, 255, 255)
WIDTH=640
HEIGHT=480
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution=(WIDTH, HEIGHT)
while True:
camera.capture(stream, 'bgr', use_video_port=True)
image=cv2.cvtColor(stream.array, cv2.COLOR_BGR2HSV)
mask=cv2.inRange(image, lo, hi)
image=cv2.blur(image, (7, 7))
mask=cv2.erode(mask, None, iterations=4)
mask=cv2.dilate(mask, None, iterations=4)
image2=cv2.bitwise_and(stream.array, stream.array, mask=mask)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), rayon)=cv2.minEnclosingCircle(c)
if rayon>30:
cv2.circle(image2, (int(x), int(y)), int(rayon), color_infos, 2)
cv2.circle(stream.array, (int(x), int(y)), 5, color_infos, 10)
cv2.line(stream.array, (int(x), int(y)), (int(x)+150, int(y)), color_infos, 2)
cv2.putText(stream.array, "Objet !!!", (int(x)+10, int(y) -10), cv2.FONT_HERSHEY_DUPLEX, 1, color_infos, 1, cv2.LINE_AA)
cv2.imshow('Camera', stream.array)
cv2.imshow('image2', image2)
cv2.imshow('Mask', mask)
if cv2.waitKey(1)&0xFF==ord('q'):
break
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,7 @@
# Tutoriel OpenCV
## Gestion des évènements souris
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=jLPSnlaAnb4

View File

@@ -0,0 +1,48 @@
import cv2
import numpy as np
def souris(event, x, y, flags, param):
global lo, hi, color
if event==cv2.EVENT_LBUTTONDBLCLK:
color=image[y, x][0]
if event==cv2.EVENT_MOUSEWHEEL:
if flags<0:
if color>5:
color-=1
else:
if color<250:
color+=1
lo[0]=color-5
hi[0]=color+5
color=100
lo=np.array([color-5, 100, 50])
hi=np.array([color+5, 255,255])
color_info=(0, 0, 255)
cap=cv2.VideoCapture(0)
cv2.namedWindow('Camera')
cv2.setMouseCallback('Camera', souris)
while True:
ret, frame=cap.read()
image=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image=cv2.blur(image, (5, 5))
mask=cv2.inRange(image, lo, hi)
mask=cv2.erode(mask, None, iterations=1)
mask=cv2.dilate(mask, None, iterations=1)
image2=cv2.bitwise_and(frame, frame, mask= mask)
elements=cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(elements) > 0:
c=max(elements, key=cv2.contourArea)
((x, y), radius)=cv2.minEnclosingCircle(c)
if radius>30:
cv2.circle(frame, (int(x), int(y)), 5, color_info, 10)
cv2.line(frame, (int(x), int(y)), (int(x)+150, int(y)), color_info, 2)
cv2.putText(frame, "Objet !!!", (int(x)+10, int(y) -10), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA)
cv2.putText(frame, "Couleur: {:d}".format(color), (10, 30), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA)
cv2.imshow('Camera', frame)
cv2.imshow('image2', image2)
cv2.imshow('Mask', mask)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,6 @@
# Tutoriel OpenCV
## Identification avec face.LBPHFaceRecognizer partie 2
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=UNZ06RZRTUQ

View File

@@ -0,0 +1,38 @@
import cv2
import os
import numpy as np
import pickle
import common as c
image_dir="./photos/"
current_id=0
label_ids={}
x_train=[]
y_labels=[]
for root, dirs, files in os.walk(image_dir):
if len(files):
label=root.split("/")[-1]
for file in files:
if file.endswith("png"):
path=os.path.join(root, file)
if not label in label_ids:
label_ids[label]=current_id
current_id+=1
id_=label_ids[label]
image=cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (c.min_size, c.min_size))
fm=cv2.Laplacian(image, cv2.CV_64F).var()
if fm<250:
print("Photo exclue:", path, fm)
else:
x_train.append(image)
y_labels.append(id_)
with open("labels.pickle", "wb") as f:
pickle.dump(label_ids, f)
x_train=np.array(x_train)
y_labels=np.array(y_labels)
recognizer=cv2.face.LBPHFaceRecognizer_create()
recognizer.train(x_train, y_labels)
recognizer.save("trainner.yml")

View File

@@ -0,0 +1 @@
min_size=70

View File

@@ -0,0 +1,28 @@
import cv2
import operator
import common as c
face_cascade=cv2.CascadeClassifier("./haarcascade_frontalface_alt2.xml")
#cap=cv2.VideoCapture("Plan 9 from Outer Space Charles Burg, J. Edward Reynolds, Hu.mp4")
cap=cv2.VideoCapture("Plan_9_from_Outer_Space_1959_512kb.mp4")
id=0
while True:
ret, frame=cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4, minSize=(c.min_size, c.min_size))
for x, y, w, h in face:
cv2.imwrite("non-classees/p-{:d}.png".format(id), frame[y:y+h, x:x+w])
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
id+=1
cv2.imshow('video', frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('a'):
for cpt in range(100):
ret, frame=cap.read()
for cpt in range(4):
ret, frame=cap.read()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,46 @@
import cv2
import operator
import os
import common as c
video="Plan_9_from_Outer_Space_1959_512kb.mp4"
cascade="./haarcascade_frontalface_alt2.xml"
img_non_classees='non-classees'
if not os.path.exists(video):
print("Le fichier video n'existe pas", video)
quit()
if not os.path.exists(cascade):
print("Le fichier cascade n'existe pas", cascade)
quit()
face_cascade=cv2.CascadeClassifier(cascade)
cap=cv2.VideoCapture(video)
if not os.path.isdir(img_non_classees):
os.mkdir(img_non_classees)
id=0
while True:
ret, frame=cap.read()
if ret is False:
break
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4, minSize=(c.min_size, c.min_size))
for x, y, w, h in face:
cv2.imwrite("{}/p-{:d}.png".format(img_non_classees, id), frame[y:y+h, x:x+w])
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
id+=1
cv2.imshow('video', frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('a'):
for cpt in range(100):
ret, frame=cap.read()
for cpt in range(4):
ret, frame=cap.read()
cap.release()
cv2.destroyAllWindows()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env python
import cv2
import pickle
import numpy as np
import common as c
face_cascade= cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
recognizer=cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")
id_image=0
color_info=(255, 255, 255)
color_ko=(0, 0, 255)
color_ok=(0, 255, 0)
with open("labels.pickle", "rb") as f:
og_labels=pickle.load(f)
labels={v:k for k, v in og_labels.items()}
cap=cv2.VideoCapture("Plan 9 from Outer Space Charles Burg, J. Edward Reynolds, Hu.mp4")
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray, scaleFactor=1.2,minNeighbors=4, minSize=(c.min_size, c.min_size))
for (x, y, w, h) in faces:
roi_gray=cv2.resize(gray[y:y+h, x:x+w], (c.min_size, c.min_size))
id_, conf=recognizer.predict(roi_gray)
if conf<=95:
color=color_ok
name=labels[id_]
else:
color=color_ko
name="Inconnu"
label=name+" "+'{:5.2f}'.format(conf)
cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, color_info, 2)
cv2.imshow('L42Project', frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('a'):
for cpt in range(100):
ret, frame=cap.read()
cv2.destroyAllWindows()
print("Fin")

View File

@@ -0,0 +1,7 @@
# Tutoriel OpenCV
## Identification avec face.LBPHFaceRecognizer partie 1
La vidéo du tutoriel est à l'adresse:
https://www.youtube.com/watch?v=tsiy3DgAKHk

View File

@@ -0,0 +1,33 @@
import cv2
import os
import numpy as np
import pickle
image_dir="./photos/"
current_id=0
label_ids={}
x_train=[]
y_labels=[]
for root, dirs, files in os.walk(image_dir):
if len(files):
label=root.split("/")[-1]
for file in files:
if file.endswith("png"):
path=os.path.join(root, file)
if not label in label_ids:
label_ids[label]=current_id
current_id+=1
id_=label_ids[label]
image=cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x_train.append(image)
y_labels.append(id_)
with open("labels.pickle", "wb") as f:
pickle.dump(label_ids, f)
x_train=np.array(x_train)
y_labels=np.array(y_labels)
recognizer=cv2.face.LBPHFaceRecognizer_create()
recognizer.train(x_train, y_labels)
recognizer.save("trainner.yml")

View File

@@ -0,0 +1 @@
min_size=50

View File

@@ -0,0 +1,26 @@
import cv2
import operator
import common as c
face_cascade=cv2.CascadeClassifier("./haarcascade_frontalface_alt2.xml")
cap=cv2.VideoCapture("Plan 9 from Outer Space Charles Burg, J. Edward Reynolds, Hu.mp4")
id=0
while True:
ret, frame=cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4, minSize=(c.min_size, c.min_size))
for x, y, w, h in face:
cv2.imwrite("non-classees/p-{:d}.png".format(id), frame[y:y+h, x:x+w])
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
id+=1
cv2.imshow('video', frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('a'):
for cpt in range(100):
ret, frame=cap.read()
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,44 @@
import cv2
import operator
import os
import common as c
video="Plan_9_from_Outer_Space_1959_512kb.mp4"
cascade="./haarcascade_frontalface_alt2.xml"
img_non_classees='non-classees'
if not os.path.exists(video):
print("Le fichier video n'existe pas", video)
quit()
if not os.path.exists(cascade):
print("Le fichier cascade n'existe pas", cascade)
quit()
if not os.path.isdir(img_non_classees):
os.mkdir(img_non_classees)
face_cascade=cv2.CascadeClassifier(cascade)
cap=cv2.VideoCapture(video)
id=0
while True:
ret, frame=cap.read()
if ret is False:
break
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face=face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=4, minSize=(c.min_size, c.min_size))
for x, y, w, h in face:
cv2.imwrite("{}/p-{:d}.png".format(img_non_classees, id), frame[y:y+h, x:x+w])
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
id+=1
cv2.imshow('video', frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('a'):
for cpt in range(100):
ret, frame=cap.read()
cap.release()
cv2.destroyAllWindows()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env python
import cv2
import pickle
import numpy as np
import common as c
face_cascade= cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
recognizer=cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")
id_image=0
color_info=(255, 255, 255)
color_ko=(0, 0, 255)
color_ok=(0, 255, 0)
with open("labels.pickle", "rb") as f:
og_labels=pickle.load(f)
labels={v:k for k, v in og_labels.items()}
cap=cv2.VideoCapture("Plan 9 from Outer Space Charles Burg, J. Edward Reynolds, Hu.mp4")
while True:
ret, frame=cap.read()
tickmark=cv2.getTickCount()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray, scaleFactor=1.2,minNeighbors=4, minSize=(c.min_size, c.min_size))
for (x, y, w, h) in faces:
roi_gray=gray[y:y+h, x:x+w]
#id_, conf=recognizer.predict(cv2.resize(roi_gray, (c.min_size, c.min_size)))
id_, conf=recognizer.predict(roi_gray)
if conf<=95:
color=color_ok
name=labels[id_]
else:
color=color_ko
name="Inconnu"
label=name+" "+'{:5.2f}'.format(conf)
cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_DUPLEX, 1, color_info, 1, cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
fps=cv2.getTickFrequency()/(cv2.getTickCount()-tickmark)
cv2.putText(frame, "FPS: {:05.2f}".format(fps), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, color_info, 2)
cv2.imshow('L42Project', frame)
key=cv2.waitKey(1)&0xFF
if key==ord('q'):
break
if key==ord('a'):
for cpt in range(100):
ret, frame=cap.read()
cv2.destroyAllWindows()
print("Fin")