我想通过使用Vision检测脸部点来绘制脸部的矢量图像我创建了一个代码,其中我得到了一些点,并且我已经下载了绘制线条的代码,但我无法理解它并且自定义线路。请帮助我理解代码,如果有人可以帮助我如何使用我拥有的人脸检测代码在脸上绘制点,我检测人脸的代码是:
func drawOnImage(source: UIImage,
boundingRect: CGRect,
faceLandmarkRegions: [VNFaceLandmarkRegion2D]) -> UIImage {
UIGraphicsBeginImageContextWithOptions(source.size, false, 1)
let context = UIGraphicsGetCurrentContext()!
context.translateBy(x: 0, y: source.size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.setBlendMode(CGBlendMode.colorBurn)
context.setLineJoin(.round)
context.setLineCap(.round)
context.setShouldAntialias(true)
context.setAllowsAntialiasing(true)
let rectWidth = source.size.width * boundingRect.size.width
let rectHeight = source.size.height * boundingRect.size.height
//draw image
let rect = CGRect(x: 0, y:0, width: source.size.width, height: source.size.height)
context.draw(source.cgImage!, in: rect)
//draw bound rect
var fillColor = UIColor.green
fillColor.setFill()
context.addRect(CGRect(x: boundingRect.origin.x * source.size.width, y:boundingRect.origin.y * source.size.height, width: rectWidth, height: rectHeight))
context.drawPath(using: CGPathDrawingMode.stroke)
//draw overlay
fillColor = UIColor.red
fillColor.setStroke()
context.setLineWidth(2.0)
for faceLandmarkRegion in faceLandmarkRegions {
var points: [CGPoint] = []
for i in 0..<faceLandmarkRegion.pointCount {
let point = faceLandmarkRegion.normalizedPoints[i]
let p = CGPoint(x: CGFloat(point.x), y: CGFloat(point.y))
points.append(p)
}
let mappedPoints = points.map { CGPoint(x: boundingRect.origin.x * source.size.width + $0.x * rectWidth, y: boundingRect.origin.y * source.size.height + $0.y * rectHeight) }
context.addLines(between: mappedPoints)
context.drawPath(using: CGPathDrawingMode.stroke)
}
let coloredImg : UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return coloredImg
}
我想绘制矢量图像,脸上有点和线
伙计们,我已经使用 ARKit 实现了我想要的输出,它提供了 Face Mask 的功能。我现在可以在检测到的脸上添加面罩,我在这里分享代码。
import ARKit
在类中声明一个属性
let sceneView = ARSCNView()
将以下代码放入ViewDidLoad()类的方法中
let frame = CGRect(x:0, y: 0, width: self.view.frame.size.width,
height: self.cameraView.bounds.height)
sceneView.frame = frame
self.view.addSubview(sceneView)
sceneView.delegate = self
guard ARFaceTrackingConfiguration.isSupported else { return }
let configuration = ARFaceTrackingConfiguration()
configuration.isLightEstimationEnabled = true
sceneView.session.run(configuration, options: [.resetTracking,
.removeExistingAnchors])
在您的类中添加以下两个委托方法并从ARSCNViewDelegate
继承您的类func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) ->
SCNNode? {
guard let device = sceneView.device else {
return nil
}
let faceGeometry = ARSCNFaceGeometry(device: device)
let node = SCNNode(geometry: faceGeometry)
node.geometry?.firstMaterial?.fillMode = .lines
return node
}
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for
anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor,
let faceGeometry = node.geometry as? ARSCNFaceGeometry else {
return
}
faceGeometry.update(from: faceAnchor.geometry)
}
现在,当您运行代码时,您将能够看到检测到的脸部有一个矢量线面罩