这是可能的! 🚀(迟到总比没有好)
I使用MediaPipe与可以找到的典型面部模型
Herey(我将其导出为.OBJ以后使用)。然后,我使用Blender的Python API实时改变规范面部模型的顶点:
I使用了下面的实现,并使用此VSCODE扩展程序运行它:可以这样下载模型:
# install packages:
# blender-4.2.1-macos-arm64/Blender.app/Contents/Resources/4.2/python/bin/python3.11 -m pip install mediapipe
# Blender: Start
# Blender: Run Script
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import cv2
import bpy
import bmesh
import threading
import queue
image_queue = queue.Queue()
class FaceMask:
def __init__(self):
self._face_obj = None
def clean_scene(self):
print("Cleaning scene, context:", bpy.context.mode)
print("poll", bpy.ops.object.mode_set.poll())
if bpy.context.mode != "OBJECT":
bpy.ops.object.mode_set(mode="OBJECT")
print("Context changed to OBJECT")
bpy.ops.object.select_all(action="SELECT")
print("All objects selected")
bpy.ops.object.delete()
print("Scene cleaned")
def load_face(self, file_path):
bpy.ops.wm.obj_import(filepath=file_path)
self._face_obj = bpy.context.selected_objects[0]
self._face_obj.name = "face_mask"
self._face_obj.location = (0, 0, 0)
self._face_obj.scale = (1, 1, 1)
self._face_obj.rotation_euler = (0, 0, 0)
def change_vertices(self, mp_vertices):
# set context to face_obj
self._face_obj.select_set(True)
bpy.context.view_layer.objects.active = self._face_obj
bpy.ops.object.mode_set(mode="EDIT")
bm = bmesh.from_edit_mesh(self._face_obj.data)
# Ensure proper vertex lookup and deselect all vertices
bm.verts.ensure_lookup_table()
for vert in bm.verts:
vert.select = False
for face_index, vertex in enumerate(bm.verts):
mp_index = face_index
vertex.co.x = mp_vertices[mp_index][0]
vertex.co.y = mp_vertices[mp_index][1]
vertex.co.z = mp_vertices[mp_index][2]
bmesh.update_edit_mesh(
self._face_obj.data, loop_triangles=True, destructive=False
)
bpy.ops.object.mode_set.poll() # Check if mode can be set
bpy.ops.object.mode_set(mode="OBJECT")
def capture_webcam_frames():
video_capture = cv2.VideoCapture(0)
try:
while True:
_, image = video_capture.read()
if image is not None:
image_queue.put(image)
finally:
video_capture.release()
cv2.destroyAllWindows()
def process_frames(face_mask: FaceMask, detector):
while not image_queue.empty():
image = image_queue.get()
cv2.imshow("Image", image)
if cv2.waitKey(1) == ord("q"):
break
# Mediapipe image conversion
mp_image_data = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
detection_result = detector.detect(mp_image_data)
# Extract and apply face landmarks
detections = detection_result.face_landmarks
if not detections:
continue
coords = [[landmark.x, landmark.y, landmark.z] for landmark in detections[0]]
face_mask.change_vertices(coords)
return 0.1
if __name__ == "__main__" or __name__ == "<run_path>":
base_options = python.BaseOptions(
model_asset_path="face_landmarker_v2_with_blendshapes.task"
)
options = vision.FaceLandmarkerOptions(
base_options=base_options,
output_face_blendshapes=True,
output_facial_transformation_matrixes=True,
num_faces=1,
running_mode=vision.FaceLandmarkerOptions.running_mode.IMAGE,
)
detector = vision.FaceLandmarker.create_from_options(options)
face_mask = FaceMask()
face_mask.clean_scene()
face_mask.load_face("3d-models/canonical_face_model.obj")
capture_thread = threading.Thread(target=capture_webcam_frames)
capture_thread.start()
# Register a timer to process frames
bpy.app.timers.register(lambda: process_frames(face_mask, detector))