我正在尝试使用 lfw 数据集和 mtcnn 模型创建 Sphereface 模型来显示评估值,如精度、召回率、F1 分数等,而不使用 dlib 库。
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, average_precision_score
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from joblib import dump
from mtcnn.mtcnn import MTCNN
import torch
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
# Load the LFW dataset
def load_lfw_dataset(data_folder):
images = []
labels = []
for person_name in os.listdir(data_folder):
person_folder = os.path.join(data_folder, person_name)
if os.path.isdir(person_folder):
for filename in os.listdir(person_folder):
if filename.endswith(".jpg"):
image_path = os.path.join(person_folder, filename)
images.append(cv2.imread(image_path, cv2.IMREAD_GRAYSCALE))
labels.append(person_name)
return images, labels
# Detect and align faces using MTCNN
def detect_and_align_faces(images):
detector = MTCNN()
aligned_faces = []
for img in images:
faces = detector.detect_faces(img)
if faces:
x, y, w, h = faces[0]['box']
face = img[y:y + h, x:x + w]
aligned_faces.append(face)
return aligned_faces
# Sphereface-like model architecture (example, adjust as needed)
class SphereFaceModel(torch.nn.Module):
def __init__(self, num_classes):
super(SphereFaceModel, self).__init__()
# Example: Add Sphereface model layers
def forward(self, x):
# Example: Forward pass
return x
# Extract features using Sphereface
def extract_features(model, aligned_faces):
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((112, 96)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5]),
])
features = []
for face in aligned_faces:
face = transform(face)
face = face.unsqueeze(0) # Add batch dimension
# Example: Forward pass through the Sphereface model
output = model(face)
features.append(output.detach().numpy())
return np.array(features)
# Train the Sphereface model (example, adjust as needed)
def train_sphereface_model(model, train_loader, optimizer, criterion, num_epochs=5):
for epoch in range(num_epochs):
for inputs, labels in tqdm(train_loader):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Main function
def main():
# Load LFW dataset
data_folder = "lfw" # Change this to the actual path of your LFW dataset
images, labels = load_lfw_dataset(data_folder)
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42)
# Detect and align faces
X_train_aligned = detect_and_align_faces(X_train)
X_test_aligned = detect_and_align_faces(X_test)
# Example: Initialize Sphereface model (adjust as needed)
num_classes = len(set(y_train))
model = SphereFaceModel(num_classes)
# Extract features
X_train_features = extract_features(model, X_train_aligned)
X_test_features = extract_features(model, X_test_aligned)
# Train SVM classifier on extracted features
classifier = make_pipeline(OneVsRestClassifier(SVC(probability=True)))
classifier.fit(X_train_features.reshape(X_train_features.shape[0], -1), y_train)
# Save the trained model
model_filename = 'sphereface_model.joblib'
dump(classifier, model_filename) # Adjust the filename as needed
# Evaluate the model
y_pred = classifier.predict(X_test_features.reshape(X_test_features.shape[0], -1))
# Confusion matrix
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
# Precision, Recall, F1 Score
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
# ROC Curve and AUC
y_test_bin = label_binarize(y_test, classes=np.unique(y_test))
y_pred_prob = classifier.predict_proba(X_test_features.reshape(X_test_features.shape[0], -1))
fpr, tpr, _ = roc_curve(y_test_bin.ravel(), y_pred_prob.ravel())
roc_auc = auc(fpr, tpr)
# Mean Average Precision
average_precision = average_precision_score(y_test_bin, y_pred_prob, average='micro')
# Equal Error Rate (EER)
eer = 0.5 * (1 - recall_score(y_test, y_pred, pos_label=0))
# Print the results
print(f'True Positive: {tp}')
print(f'True Negative: {tn}')
print(f'False Positive: {fp}')
print(f'False Negative: {fn}')
print(f'Precision: {precision}')
print(f'Recall: {recall}')
print(f'F1 Score: {f1}')
print(f'ROC AUC: {roc_auc}')
print(f'Mean Average Precision: {average_precision}')
print(f'Equal Error Rate: {eer}')
if __name__ == "__main__":
main()
回溯(最近一次调用最后一次): 文件“c:\Users\EN\Desktop\Face Recognition Models\Sphereface\lfw_main.py”,第 156 行,位于 主要的() 文件“c:\Users\EN\Desktop\Face Recognition Models\Sphereface\lfw_main.py”,第 100 行,在 main 中 X_train_aligned = detector_and_align_faces(X_train) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 文件“c:\Users\EN\Desktop\Face Recognition Models\Sphereface\lfw_main.py”,第 40 行,位于 detector_and_align_faces 中 面孔 = detector.detect_faces(img) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 文件“C:\Users\EN\Desktop\Face Recognition Models\Sphereface.venv\Lib\site-packages\mtcnn\mtcnn.py”,第 287 行,在 detector_faces 中 高度,宽度,_ = img.shape ^^^^^^^^^^^^^^^^^ ValueError:没有足够的值来解压(预期为 3,实际为 2) 如何让它发挥作用并将其应用到其他人脸识别算法中?
我能看到的错误的唯一原因是你的函数
detect_and_align_faces()
需要一个cv2图像列表,但得到了其他东西(检查train_test_split()
返回什么)。
然后检查
img
在
for img in images:
faces = detector.detect_faces(img)
事实上是这样的。