/// Convert RTCVideoFrame to CVPixelBuffer
func getCVPixelBuffer(frame: RTCVideoFrame?) -> CVPixelBuffer? {
var buffer : RTCCVPixelBuffer?
var pixelBuffer: CVPixelBuffer?
if let inputBuffer = frame?.buffer {
if let iBuffer = inputBuffer as? RTCI420Buffer {
if let cvPixelBuffer = iBuffer.convertToCVPixelBuffer() {
// Use the cvPixelBuffer as an RTCCVPixelBuffer
// ...
pixelBuffer = cvPixelBuffer
return pixelBuffer
}
return pixelBuffer
}
}
buffer = frame?.buffer as? RTCCVPixelBuffer
pixelBuffer = buffer?.pixelBuffer
return pixelBuffer
}
这是来自text的问题,但没有显示
convertToCVPixelBuffer
的功能
所以我不知道convertToCVPixelBuffer是如何工作的!
我现在找到答案了,我分享它。
您需要导入
(CVPixelBufferRef)convertToCVPixelBufferWithI420Buffer:(RTCI420Buffer *)缓冲区 { CVPixelBufferRef PixelBuffer = NULL;
NSDictionary *pixelBufferAttributes = [NSDictionarydictionaryWithObjectsAndKeys: [NSDictionary 词典], (id)kCVPixelBufferIOSurfacePropertiesKey, 无];
CVReturn 结果 = CVPixelBufferCreate(kCFAllocatorDefault, 缓冲区宽度, 缓冲区高度, kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, (__bridge CFDictionaryRef)pixelBufferAttributes, &像素缓冲区);
if(结果!= kCVReturnSuccess){ 返回空值; }
结果 = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
if(结果!= kCVReturnSuccess){ NSLog(@"convertToCVPixelBufferWithI420Buffer 结果 = %d",结果); CFRelease(像素缓冲区); 返回空值; }
uint8 dstY = (uint8 )CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0); int dstStrideY = (int)CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0); uint8 dstUV = (uint8)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1); int dstStrideUV = (int)CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
int ret = I420ToNV12(buffer.dataY, 缓冲区.strideY, 缓冲区.dataU, 缓冲区.strideU, 缓冲区.dataV, 缓冲区.strideV, dstY, dstStrideY, dstUV, dstStrideUV, 缓冲区宽度, 缓冲区.高度);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); 如果(返回){ NSLog(@"I420ToNV12 ret = %d",ret); CFRelease(像素缓冲区); 返回空值; }
返回像素缓冲区;
}