我的整个代码: -
import AVFoundation
import Metal
import MetalKit
func createGradientVideoComposition() -> (AVMutableComposition ,AVMutableVideoComposition) {
let composition = AVMutableComposition()
guard let videoTrack = composition.addMutableTrack(withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
fatalError("Unable to add video track")
}
let duration = CMTime(seconds: 20, preferredTimescale: 600)
try? videoTrack.insertEmptyTimeRange(CMTimeRange(start: .zero, duration: duration))
// Create an AVMutableVideoComposition with a 60fps frame rate and desired render size
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: duration)
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTime(value: 1, timescale: 60)
videoComposition.renderSize = CGSize(width: 1920, height: 1080)
videoComposition.instructions = [instruction]
// Set the custom video compositor class that uses our Metal compute shader
videoComposition.customVideoCompositorClass = GradientVideoCompositoraa.self
return (composition, videoComposition)
}
class GradientVideoCompositoraa: NSObject, AVVideoCompositing {
// MARK: - AVVideoCompositing Protocol Properties
var sourcePixelBufferAttributes: [String : Any]? = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)
]
var requiredPixelBufferAttributesForRenderContext: [String : Any] = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)
]
// MARK: - Metal Properties
private let device: MTLDevice
private let commandQueue: MTLCommandQueue
private var pipelineState: MTLComputePipelineState!
private var textureCache: CVMetalTextureCache?
// MARK: - Initialization
override init() {
guard let device = MTLCreateSystemDefaultDevice() else {
fatalError("Metal is not supported on this device")
}
self.device = device
self.commandQueue = device.makeCommandQueue()!
super.init()
// Create a Metal texture cache for efficient texture creation
CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, device, nil, &textureCache)
// Load the default Metal library and get the compute function
let defaultLibrary = device.makeDefaultLibrary()!
guard let kernelFunction = defaultLibrary.makeFunction(name: "gradientKernel") else {
fatalError("Could not find gradientKernel function")
}
do {
pipelineState = try device.makeComputePipelineState(function: kernelFunction)
} catch {
fatalError("Failed to create pipeline state: \(error)")
}
}
// MARK: - AVVideoCompositing Protocol Methods
func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) {
// You can handle changes to the render context here if needed.
}
func startRequest(_ asyncVideoCompositionRequest: AVAsynchronousVideoCompositionRequest) {
autoreleasepool {
// Obtain a new pixel buffer for the output frame
guard let dstBuffer = asyncVideoCompositionRequest.renderContext.newPixelBuffer() else {
asyncVideoCompositionRequest.finish(with: NSError(domain: "com.example", code: 0, userInfo: nil))
return
}
// Get the output image dimensions
let width = CVPixelBufferGetWidth(dstBuffer)
let height = CVPixelBufferGetHeight(dstBuffer)
// Create a Metal texture from the pixel buffer
guard let textureCache = self.textureCache else {
asyncVideoCompositionRequest.finish(with: NSError(domain: "com.example", code: 0, userInfo: nil))
return
}
var cvTextureOut: CVMetalTexture?
let result = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
textureCache,
dstBuffer,
nil,
.bgra8Unorm,
width,
height,
0,
&cvTextureOut)
if result != kCVReturnSuccess {
asyncVideoCompositionRequest.finish(with: NSError(domain: "com.example", code: 0, userInfo: nil))
return
}
guard let cvTexture = cvTextureOut,
let outputTexture = CVMetalTextureGetTexture(cvTexture) else {
asyncVideoCompositionRequest.finish(with: NSError(domain: "com.example", code: 0, userInfo: nil))
return
}
// Calculate the current presentation time (in seconds) to animate the gradient
let presentationTime = asyncVideoCompositionRequest.compositionTime.seconds
// Create a command buffer and compute command encoder
guard let commandBuffer = commandQueue.makeCommandBuffer(),
let computeEncoder = commandBuffer.makeComputeCommandEncoder() else {
asyncVideoCompositionRequest.finish(with: NSError(domain: "com.example", code: 0, userInfo: nil))
return
}
computeEncoder.setComputePipelineState(pipelineState)
computeEncoder.setTexture(outputTexture, index: 0)
// Pass the current time to the shader as a constant buffer
var time = Float(presentationTime)
computeEncoder.setBytes(&time, length: MemoryLayout<Float>.size, index: 0)
// Determine thread group sizes based on the output dimensions
let threadGroupSize = MTLSize(width: 8, height: 8, depth: 1)
let threadGroups = MTLSize(width: (width + threadGroupSize.width - 1) / threadGroupSize.width,
height: (height + threadGroupSize.height - 1) / threadGroupSize.height,
depth: 1)
computeEncoder.dispatchThreadgroups(threadGroups, threadsPerThreadgroup: threadGroupSize)
computeEncoder.endEncoding()
// Commit the command buffer and wait for the GPU to finish
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
// Finish the composition request with the rendered pixel buffer
asyncVideoCompositionRequest.finish(withComposedVideoFrame: dstBuffer)
}
}
func cancelAllPendingVideoCompositionRequests() {
// Implement cancellation logic if needed.
}
}
class PreviewView: NSView {
let playerView: AVPlayerView
init() {
playerView = AVPlayerView(frame: .zero)
super.init(frame: .zero)
playerView.translatesAutoresizingMaskIntoConstraints = false
addSubview(playerView)
NSLayoutConstraint.activate([
playerView.widthAnchor.constraint(equalToConstant: 500),
playerView.heightAnchor.constraint(equalToConstant: 300),
playerView.centerXAnchor.constraint(equalTo: centerXAnchor),
playerView.centerYAnchor.constraint(equalTo: centerYAnchor)
])
let (composition, videoCompositon) = createGradientVideoComposition()
let playerItem = AVPlayerItem(asset: composition)
playerItem.videoComposition = videoCompositon
let player = AVPlayer(playerItem: playerItem)
playerView.player = player
player.play()
}
// Required for NSCoding compliance.
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
#include <metal_stdlib>
using namespace metal;
kernel void gradientShader(texture2d<float, access::write> output [[texture(0)]],
constant float &time [[buffer(0)]],
uint2 id [[thread_position_in_grid]]) {
float2 uv = float2(id) / float2(output.get_width(), output.get_height());
// Animated colors based on time
float3 color1 = float3(sin(time) * 0.8 + 0.1, 0.6, 1.0);
float3 color2 = float3(0.12, 0.99, cos(time) * 0.9 + 0.3);
// Linear interpolation for gradient
float3 gradientColor = mix(color1, color2, uv.y);
output.write(float4(gradientColor, 1.0), id);
}
Visual isTranslatable: NO; reason: observation failure: noObservations
是由于启用了默认的视觉文本分析,该文本分析可用于许多视图,您可以通过更新诸如False之类的字段来禁用此内容。我认为日志是一个警告,该子系统无法检测到任何基于视觉kit的OCR输出。 (即屏幕上的文本)。我相信这可以安全地忽略,并且与您的渲染代码完全无关,乍一看闻起来很正确。请共享AvMutableComposition代码,而不仅仅是指令。构图的创建缺失了,我相信时机是错误的,因为您在Avplayer View中列出了00:00:00至00:00:00。