在
Swift
中,我有一个 iOS
数组。我通过一个函数将其传递,将视频资源拼接/合并到一个最终视频中。对于每个子视频,我的目标是覆盖位于视频帧中心的文本。到目前为止,我已经实现了这个的半工作版本基于我以前的帖子,但遇到了以下问题我无法理解......
AVURLAsset
AVMutableVideoCompositionLayerInstruction
设置为相应的
.preferredTransform
视频
AVAssetTrack
可以做到这一点,但事实并非如此。视频方向为 -90 度,屏幕下半部分为黑色。
.preferredTransform
播放最终视频资源的,它是
AVAssetExportSession
属性。但我注意到,只有当我从
.asset
的
AVAsset
属性创建新的
AVAssetExportSession
时,才会显示文本叠加。这需要
.outputUrl
在播放视频之前完全完成,而之前当我从
AVAssetExportSession
属性播放时,最终视频会立即播放已加载的内容,而无需等待整个内容加载(但同样,没有文本)覆盖)。有什么方法可以在渲染文本叠加时返回到原始状态吗?
非常赞赏..
.asset
func merge(videos: [AVURLAsset], completion: @escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
var lastTime: CMTime = .zero
var maxVideoSize = CGSize.zero
guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
let mainComposition = AVMutableVideoComposition()
let mainParentLayer = CALayer()
let mainVideoLayer = CALayer()
mainParentLayer.frame = CGRect(x: 0, y: 0, width: maxVideoSize.width, height: maxVideoSize.height)
mainVideoLayer.frame = CGRect(x: 0, y: 0, width: maxVideoSize.width, height: maxVideoSize.height)
mainParentLayer.addSublayer(mainVideoLayer)
var instructions = [AVMutableVideoCompositionInstruction]()
for video in videos {
if let videoTrack = video.tracks(withMediaType: .video)[safe: 0], let text = savedTexts[video.url] {
videoCompositionTrack.preferredTransform = videoTrack.preferredTransform
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: videoTrack, at: lastTime)
if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: audioTrack, at: lastTime)
}
lastTime = CMTimeAdd(lastTime, video.duration)
let videoSize = videoTrack.naturalSize.applying(videoTrack.preferredTransform)
let videoRect = CGRect(x: 0, y: 0, width: abs(videoSize.width), height: abs(videoSize.height))
maxVideoSize = CGSize(width: max(maxVideoSize.width, videoRect.width), height: max(maxVideoSize.height, videoRect.height))
let textLayer = CATextLayer()
textLayer.string = text
textLayer.foregroundColor = UIColor.white.cgColor
textLayer.font = UIFont(name: "Helvetica-Bold", size: min(videoRect.height / 10, 100))
textLayer.shadowOpacity = 0.5
textLayer.alignmentMode = .center
textLayer.contentsScale = UIScreen.main.scale
textLayer.isWrapped = true
let textHeight: CGFloat = min(videoRect.height / 10, 120)
let textWidth: CGFloat = videoRect.width
let xPos = (videoRect.width - textWidth) / 2
let yPos = (videoRect.height - textHeight) / 2
textLayer.frame = CGRect(x: xPos, y: yPos, width: textWidth, height: textHeight)
textLayer.zPosition = 1
let parentLayer = CALayer()
parentLayer.backgroundColor = UIColor.clear.cgColor
parentLayer.frame = videoRect
parentLayer.addSublayer(textLayer)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(start: lastTime - video.duration, duration: video.duration)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
layerInstruction.setTransform(videoTrack.preferredTransform, at: lastTime)
videoCompositionInstruction.layerInstructions = [layerInstruction]
instructions.append(videoCompositionInstruction)
parentLayer.zPosition = 0
mainParentLayer.addSublayer(parentLayer)
} catch {
print("Failed to insert track: \(error.localizedDescription)")
return
}
}
}
mainParentLayer.frame = CGRect(x: 0, y: 0, width: maxVideoSize.width, height: maxVideoSize.height)
mainVideoLayer.frame = mainParentLayer.frame
mainComposition.renderSize = maxVideoSize
mainComposition.instructions = instructions
mainComposition.frameDuration = CMTime(value: 1, timescale: 30)
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: mainVideoLayer, in: mainParentLayer)
let outputUrl = NSURL.fileURL(withPath: NSTemporaryDirectory() + "merged" + ".mp4")
guard let exporter = AVAssetExportSession(asset: videoComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.videoComposition = mainComposition
exporter.outputURL = outputUrl
exporter.outputFileType = .mp4
exporter.shouldOptimizeForNetworkUse = true
exporter.exportAsynchronously {
DispatchQueue.main.async {
if let outputUrl = exporter.outputURL {
if exporter.status == .completed {
self.play(video: AVAsset(url: exporter.outputURL!))
completion(outputUrl, exporter)
} else if let error = exporter.error {
print("Export failed: \(error.localizedDescription)")
} else {
print("Export status:", exporter.status)
}
}
}
}
//Originally played video here via AVPlayer, which played back immediately
//play(video: exporter.asset)
}