import face_recognition
import os, sys
import cv2
import numpy as np
import math
def face_confidence(face_distance, face_match_threshold=0.6):
range = (1.0-face_match_threshold)
linear_valu=(1.0-face_distance)/(range*2.0)
if face_distance> face_match_threshold:
return str(round(linear_valu*100/2))+'%'
else:
value= (linear_valu+((1-linear_valu)*math.pow((linear_valu-0.5)*2,0.2)))*100
return str(round(value,2))+"%"
class FaceRecognition:
face_location=[]
face_encodings=[]
face_names=[]
known_face_encodings=[]
known_face_names=[]
process_current_frame= True
def __init__(self):
self.encode_faces()
def encode_faces(self):
for image in os.listdir('faces'):
face_image= face_recognition.load_image_file(f'faces/{image}')
face_encoding= face_recognition.face_encodings(face_image)[0]
self.known_face_encodings.append(face_encoding)
self.known_face_encodings.append(image)
print(self.known_face_names)
def run_recognition(self):
video_capture= cv2.VideoCapture(0)
if not video_capture.isOpened():
sys.exit("Video source not found")
while True:
ret, frame= video_capture.read()
if self.process_current_frame:
small_frame=cv2.resize(frame,(0,0),fx=0.25,fy=0.25)
rgb_small_frame= small_frame[:,:,::-1]
#find all faces in frame
self.face_location= face_recognition.face_locations(rgb_small_frame)
self.face_encodings=face_recognition.face_encodings(rgb_small_frame, self.face_location)
self.face_names=[]
for face_encoding in self.face_encodings:
matches= face_recognition.compare_faces(self.known_face_encodings, face_encoding)
name= 'Unknown'
confidence= 'Unknown'
face_distances= face_recognition.face_distance(self.known_face_encodings,face_encoding)
best_match_index=np.argmin(face_distances)
if matches[best_match_index]:
name= self.known_face_names[best_match_index]
confidence=face_confidence(face_distances[best_match_index])
self.face_names.append(f'{name}({confidence}')
self.process_current_frame = not self.process_current_frame
#desplay annotaations
for(top,right,bottom,left), name in zip(self.face_location, self.face_names):
top*=4
right*=4
bottom*=4
left*=4
cv2.rectangle(frame,(left,top),(right,bottom),(0,0,255),2)
cv2.rectangle(frame,(left,bottom-35),(right,bottom),(0,0,255),-1)
cv2.putText(frame,name,(left+6,bottom-6),cv2.FONT_HERSHEY_DUPLEX,0.8,(255,255,255),1)
cv2.imshow("face recognition",frame)
if cv2.waitKey(1)== ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
if __name__=='__main__':
fr = FaceRecognition()
fr.run_recognition()
我真的不知道为什么它不起作用,我尝试保存不同的图片,但我没有尝试过,但它只是在看到一张脸时崩溃。 有谁知道可能是什么原因
Traceback (most recent call last):
File "/Users/youssefmajdalani/PycharmProjects/faceproj/main.py", line 96, in <module>
fr.run_recognition()
File "/Users/youssefmajdalani/PycharmProjects/faceproj/main.py", line 56, in run_recognition
self.face_encodings=face_recognition.face_encodings(rgb_small_frame, self.face_location)
File "/Users/youssefmajdalani/PycharmProjects/faceproj/venv/lib/python3.9/site-packages/face_recognition/api.py", line 214, in face_encodings
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
File "/Users/youssefmajdalani/PycharmProjects/faceproj/venv/lib/python3.9/site-packages/face_recognition/api.py", line 214, in <listcomp>
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
TypeError: compute_face_descriptor(): incompatible function arguments. The following argument types are supported:
1. (self: _dlib_pybind11.face_recognition_model_v1, img: numpy.ndarray[(rows,cols,3),numpy.uint8], face: _dlib_pybind11.full_object_detection, num_jitters: int = 0, padding: float = 0.25) -> _dlib_pybind11.vector
2. (self: _dlib_pybind11.face_recognition_model_v1, img: numpy.ndarray[(rows,cols,3),numpy.uint8], num_jitters: int = 0) -> _dlib_pybind11.vector
3. (self: _dlib_pybind11.face_recognition_model_v1, img: numpy.ndarray[(rows,cols,3),numpy.uint8], faces: _dlib_pybind11.full_object_detections, num_jitters: int = 0, padding: float = 0.25) -> _dlib_pybind11.vectors
4. (self: _dlib_pybind11.face_recognition_model_v1, batch_img: List[numpy.ndarray[(rows,cols,3),numpy.uint8]], batch_faces: List[_dlib_pybind11.full_object_detections], num_jitters: int = 0, padding: float = 0.25) -> _dlib_pybind11.vectorss
5. (self: _dlib_pybind11.face_recognition_model_v1, batch_img: List[numpy.ndarray[(rows,cols,3),numpy.uint8]], num_jitters: int = 0) -> _dlib_pybind11.vectors
Invoked with: <_dlib_pybind11.face_recognition_model_v1 object at 0x10533b830>, array([[[134, 105, 82],
[135, 106, 83],
[136, 107, 82],
...,
[132, 111, 81],
[133, 111, 82],
[132, 110, 81]],
[[131, 102, 79],
[136, 107, 84],
[136, 106, 81],
...,
[131, 111, 81],
[133, 112, 82],
[133, 111, 81]],
[[127, 100, 76],
[131, 104, 81],
[133, 104, 80],
...,
[132, 112, 82],
[132, 111, 81],
[134, 112, 80]],
...,
[[138, 116, 89],
[139, 117, 89],
[139, 116, 87],
...,
[168, 140, 121],
[ 37, 18, 8],
[ 44, 30, 17]],
[[137, 115, 86],
[138, 116, 89],
[140, 117, 90],
...,
[146, 105, 97],
[ 39, 17, 7],
[ 42, 24, 10]],
[[134, 115, 85],
[137, 115, 86],
[138, 116, 87],
...,
[129, 92, 88],
[ 45, 24, 12],
[ 42, 23, 9]]], dtype=uint8), <_dlib_pybind11.full_object_detection object at 0x1078d3d30>, 1
Process finished with exit code 1
这些是错误代码
乍一看我猜你的问题就在这里:
self.known_face_encodings.append(face_encoding)
self.known_face_encodings.append(image)
您要添加到
known_face_encodings
列表中,同时添加编码和 str 文件路径,然后进一步使用 known_face_encodings
作为编码列表。