-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrealtime_face_landmarks.py
76 lines (56 loc) · 2.46 KB
/
realtime_face_landmarks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 15:26:36 2023
@author: slbouknight
"""
import face_recognition
from PIL import Image, ImageDraw
import numpy as np
import cv2
#capture the video from default camera
webcam_video_stream = cv2.VideoCapture(0)
#webcam_video_stream = cv2.VideoCapture('images/testing/modi.mp4')
#initialize the array variable to hold all face locations in the frame
all_face_locations = []
#loop through every frame in the video
while True:
#get the current frame from the video stream as an image
ret,current_frame = webcam_video_stream.read()
#get the face landmarks list
face_landmarks_list = face_recognition.face_landmarks(current_frame)
#print the face landmarks list
#print(len(face_landmarks_list))
#convert the numpy array image into pil image object
pil_image = Image.fromarray(current_frame)
#convert the pil image to draw object
d = ImageDraw.Draw(pil_image)
#loop through every face
index=0
while index < len(face_landmarks_list):
# loop through face landmarks
for face_landmarks in face_landmarks_list:
#join each face landmark points
d.line(face_landmarks['chin'],fill=(255,255,255), width=2)
d.line(face_landmarks['left_eyebrow'],fill=(255,255,255), width=2)
d.line(face_landmarks['right_eyebrow'],fill=(255,255,255), width=2)
d.line(face_landmarks['nose_bridge'],fill=(255,255,255), width=2)
d.line(face_landmarks['nose_tip'],fill=(255,255,255), width=2)
d.line(face_landmarks['left_eye'],fill=(255,255,255), width=2)
d.line(face_landmarks['right_eye'],fill=(255,255,255), width=2)
d.line(face_landmarks['top_lip'],fill=(255,255,255), width=2)
d.line(face_landmarks['bottom_lip'],fill=(255,255,255), width=2)
index +=1
#convert PIL image to RGB to show in opencv window
rgb_image = pil_image.convert('RGB')
rgb_open_cv_image = np.array(pil_image)
# Convert RGB to BGR
bgr_open_cv_image = cv2.cvtColor(rgb_open_cv_image, cv2.COLOR_RGB2BGR)
bgr_open_cv_image = bgr_open_cv_image[:, :, ::-1].copy()
#showing the current face with rectangle drawn
cv2.imshow("Webcam Video",bgr_open_cv_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#release the stream and cam
#close all opencv windows open
webcam_video_stream.release()
cv2.destroyAllWindows()