在 React Native 和 Swift 中通过 WebSocket 播放 PCM 音频数据流时仅遇到噪音

问题描述 投票:0回答:1

我正在开发一个 React Native 应用程序,它通过 WebSocket 接收 PCM 音频数据,从 Base64 字符串中对其进行解码,并尝试使用 Swift 中的 AVAudioEngine 设置实时播放它。尽管通过写入文件并在 Audacity 中播放来确认音频数据已正确接收,但我的应用程序中的播放只是噪音。我无法确定原因。

JavaScript 代码:

const handleAudioOutput = async (event) => {
    const arrayBuffer = event.data;
    const base64Data = Buffer.from(arrayBuffer).toString('base64');
    // await RNFS.appendFile(`${RNFS.DocumentDirectoryPath}/audioData.raw`, base64Data, 'base64');
    audioPlayer.current.playAudioData(base64Data);
};

useEffect(() => {
    audioOutputWs.current = new WebSocket(audioOutputUrl);
    audioOutputWs.current.onopen = () => console.log("Audio output ws open");
    audioOutputWs.current.binaryType = 'arraybuffer';
    audioOutputWs.current.onmessage = handleAudioOutput;
  }, []);

在 swift 方面,我们缓冲接收到的音频并在不同的线程中从缓冲区播放。

快速端实现:

import Foundation
import AVFoundation

@objc(AudioPlayer)
class AudioPlayer: NSObject {
  
  private var audioEngine: AVAudioEngine
  private var audioPlayerNode: AVAudioPlayerNode
  private var audioFormat: AVAudioFormat
  private var audioQueue: [Data]
  private let queueLock = NSLock()
  private var isProcessing = false
  
  override init() {
    audioEngine = AVAudioEngine()
    audioPlayerNode = AVAudioPlayerNode()
    
    // Try using a common sample rate and format
    let sampleRate: Double = 24000.0
    let channelCount: AVAudioChannelCount = 1
    let commonFormat: AVAudioCommonFormat = .pcmFormatInt16
      
    guard let format = AVAudioFormat(commonFormat: commonFormat, sampleRate: sampleRate, channels: channelCount, interleaved: false) else {
        fatalError("Failed to create AVAudioFormat")
    }

    
    audioFormat = format
    audioQueue = []
    
    super.init()
    
     audioEngine.attach(audioPlayerNode)
    // If I did format: audioFormat the constructor would crash So I found the below solution from some corner of the internet.
    audioEngine.connect(audioPlayerNode, to: audioEngine.outputNode, format: AVAudioFormat.init(standardFormatWithSampleRate: 24000.0, channels: 1)!)

     do {
       try audioEngine.start()
     } catch {
       print("Failed to start AVAudioEngine: \(error)")
     }
    
     DispatchQueue.global(qos: .background).async {
       self.processAudioQueue()
     }
  }

  @objc(multiply:withB:withResolver:withRejecter:)
  func multiply(a: Float, b: Float, resolve: RCTPromiseResolveBlock, reject: RCTPromiseRejectBlock) -> Void {
    resolve(a * b)
  }

  @objc(add:withB:withResolver:withRejecter:)
  func add(a: Float, b: Float, resolve: RCTPromiseResolveBlock, reject: RCTPromiseRejectBlock) -> Void {
    resolve(a + b)
  }

  @objc(playAudioData:withResolver:withRejecter:)
  func playAudioData(base64String: String, resolve: RCTPromiseResolveBlock, reject: RCTPromiseRejectBlock) -> Void {
        guard let data = Data(base64Encoded: base64String) else {
            reject("error", "Invalid base64 string", nil)
            return
        }

        queueLock.lock()
        audioQueue.append(data)
        queueLock.unlock()
        
        resolve(true) // Immediately resolve to not block the JS thread
  }

  private func processAudioQueue() {
      isProcessing = true
      while isProcessing {
          queueLock.lock()
          if !audioQueue.isEmpty {
              let data: Data = audioQueue.removeFirst()
              queueLock.unlock()
              
              let frameCount = AVAudioFrameCount(data.count / MemoryLayout<Int16>.size)
              guard let audioBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: frameCount) else {
                  continue
              }
              audioBuffer.frameLength = frameCount
              
              // Safe memory transfer using buffer pointers
              data.withUnsafeBytes { rawBufferPointer in
                  guard let bufferPointer = rawBufferPointer.bindMemory(to: Int16.self).baseAddress else {
                      return
                  }
                  guard let channelData = audioBuffer.int16ChannelData else {
                      return
                  }
                  
                  for frameIndex in 0..<Int(frameCount) {
                      channelData[0][frameIndex] = bufferPointer[frameIndex]
                  }
              }

              audioPlayerNode.scheduleBuffer(audioBuffer, at: nil, options: [], completionHandler: nil)
              
              if !audioPlayerNode.isPlaying {
                  audioPlayerNode.play()
              }
          } else {
              queueLock.unlock()
              usleep(10000) // Sleep for 10ms to avoid busy waiting
          }
      }
  }


  deinit {
    isProcessing = false
  }
}

我尝试过的:

  1. 尝试了 Int16 和 Float32
  2. 尝试了交错和非交错设置。
  3. 通过将其写入文件并播放来验证。 (完美播放)
  4. 为了验证更多,我尝试使用 pyaudio 通过 python 脚本从同一个 websocket 播放流,并且播放得很好。这是代码:
async def _audio_player(self):
    """Asynchronously plays audio from the data received from ws."""
    self.stream_out = self.audio.open(
        format=8,
        channels=1,
        rate=24000,
        output=True,
    )
    while True:
        audio_bytes = await self.listen_to_audio_output()
        self.stream_out.write(audio_bytes)
python ios swift react-native audio
1个回答
0
投票

我最终创建了循环缓冲区来存储帧,而不是使用

AVFoundation
,而是使用了
AudioToolbox
。我看到了很多关于这个的问题,但没有好的解决方案,所以我创建了一个可以在 IOS 中播放 pcm 流的库。我计划添加对 android 以及其他格式的支持。

我希望它有帮助:https://www.npmjs.com/package/react-native-realtime-audio-player

© www.soinside.com 2019 - 2024. All rights reserved.