使用 AVMutableComposition 转换问题合并视频

问题描述 投票:0回答:1

我有一个

AVURLAsset
数组。它包含使用后置摄像头、前置摄像头(未镜像)拍摄的视频以及转换为视频的图像。全部为纵向。

我创建了一个函数来返回单个合并视频进行播放。

我的问题是最终播放时不支持每个视频的原始演示。视频可以旋转、拉伸/压缩或镜像。

我尝试关注现有的 SO 帖子,但没有任何效果

以下功能是我迄今为止所实现的。为了添加上下文,如果我为单个

AVURLAsset
数组项运行该函数,输出视频将正确返回。任何指导都将非常感激。

func merge(assets: [AVURLAsset], completion: @escaping (URL?, AVAssetExportSession?) -> Void) {

let mainComposition = AVMutableComposition()
var lastTime = CMTime.zero

guard let videoCompositionTrack = mainComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
guard let audioCompositionTrack = mainComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }

let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoCompositionTrack)

for asset in assets {
                                    
    if let videoTrack = asset.tracks(withMediaType: .video)[safe: 0] {
        
        let t = videoTrack.preferredTransform
        layerInstruction.setTransform(t, at: lastTime)
        
        if let audioTrack = asset.tracks(withMediaType: .audio)[safe: 0] {
            
            do {
                
                try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: asset.duration), of: videoTrack, at: lastTime)
                try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: asset.duration), of: audioTrack, at: lastTime)
                
                print("Inserted audio + video track")
                
            } catch {
                print("Failed to insert audio or video track")
                return
            }
            
            lastTime = CMTimeAdd(lastTime, asset.duration)
            
        } else {
            
            do {
                
                try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: asset.duration), of: videoTrack, at: lastTime)
                
                print("Inserted just video track")
                
            } catch {
                
                print("Failed to insert just video track")
                return
            }
            
            lastTime = CMTimeAdd(lastTime, asset.duration)
        }
    }
}

let outputUrl = NSURL.fileURL(withPath: NSTemporaryDirectory() + "test" + ".mp4")

let videoComposition = AVMutableVideoComposition()
let instruction = AVMutableVideoCompositionInstruction()
instruction.layerInstructions = [layerInstruction]
instruction.timeRange = videoCompositionTrack.timeRange
videoComposition.instructions = [instruction]
videoComposition.frameDuration = videoCompositionTrack.minFrameDuration
videoComposition.renderSize = CGSize(width: 720, height: 1280) // Adjust as per your video dimensions

guard let exporter = AVAssetExportSession(asset: mainComposition, presetName: AVAssetExportPresetHighestQuality) else { return }

exporter.outputURL = outputUrl
exporter.outputFileType = .mp4
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = videoComposition

exporter.exportAsynchronously {
    
    if let outputUrl = exporter.outputURL {
        completion(outputUrl, exporter)
    }
}

play(video: exporter.asset)
}
ios swift avfoundation avmutablecomposition avmutablevideocomposition
1个回答
0
投票
let orientations: [UIInterfaceOrientation] = [.landscapeRight, .landscapeLeft, .portraitUpsideDown, .portrait]

func rotateVideo(to orientation: UIInterfaceOrientation) {
    guard let videoURL = "YOUR VIDEO URL" else { return }
    let asset = AVAsset(url: videoURL)
    let composition = AVMutableComposition()
    
    guard let videoTrack = asset.tracks(withMediaType: .video).first else {
        print("Error: No video track found")
        return
    }
    
    guard let videoCompositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid) else {
        print("Error: Unable to add video track")
        return
    }
    
    do {
        try videoCompositionTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: videoTrack, at: .zero)
    } catch {
        print("Error: \(error.localizedDescription)")
        return
    }
    
    let videoComposition = AVMutableVideoComposition()
    videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
    videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
    
    let instruction = AVMutableVideoCompositionInstruction()
    instruction.timeRange = CMTimeRange(start: .zero, duration: asset.duration)
    
    let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoCompositionTrack)
    
    var transform = CGAffineTransform.identity
    switch orientation {
    case .portrait:
        print("portrait")
        transform = CGAffineTransform.identity
        videoComposition.renderSize = videoTrack.naturalSize
    case .portraitUpsideDown:
        print("portraitUpsideDown")
        transform = CGAffineTransform(rotationAngle: .pi)
        transform = transform.concatenating(CGAffineTransform(translationX: videoTrack.naturalSize.width, y: videoTrack.naturalSize.height))
        videoComposition.renderSize = videoTrack.naturalSize
    case .landscapeLeft:
        print("landscapeLeft")
        transform = CGAffineTransform(rotationAngle: .pi / 2)
        transform = transform.concatenating(CGAffineTransform(translationX: videoTrack.naturalSize.height, y: 0))
        videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
    case .landscapeRight:
        print("landscapeRight")
        transform = CGAffineTransform(rotationAngle: -.pi / 2)
        transform = transform.concatenating(CGAffineTransform(translationX: 0, y: videoTrack.naturalSize.width))
        videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
    default:
        break
    }
    
    layerInstruction.setTransform(transform, at: .zero)
    instruction.layerInstructions = [layerInstruction]
    videoComposition.instructions = [instruction]
    
    let filemanager = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
    let outputURL = filemanager.appendingPathComponent(UUID().uuidString + ".mp4")
    
    guard let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
        print("Error: Failed to create export session")
        return
    }
    
    exportSession.outputURL = outputURL
    exportSession.outputFileType = .mp4
    exportSession.videoComposition = videoComposition
    
    exportSession.exportAsynchronously {
        switch exportSession.status {
        case .completed:
            print(outputURL)
        case .failed, .cancelled:
            print("Error: \(exportSession.error?.localizedDescription ?? "Unknown error")")
        default:
            break
        }
    }
}

你能尝试一下这个解决方案吗?它之前对我有用。

我认为您不想分离

audio
video
轨道进行变换。

© www.soinside.com 2019 - 2024. All rights reserved.