更改 WebRTC 流中的播放延迟

问题描述 投票:0回答:2

我正在尝试将实时媒体流(最终来自摄像机)从对等体 A 投射到对等体 B,并且我希望对等体 B 实时接收实时流,然后以增加的延迟重播它。不幸的是,不可能简单地暂停流并继续播放,因为它会跳转到实时时刻。

所以我发现我可以使用 MediaRecorder + SourceBuffer 重新观看直播。记录流并将缓冲区附加到 MSE (SourceBuffer) 并在 5 秒后播放。 这在本地设备(流)上有效。但是,当我尝试在接收器 MediaStream(来自

pc.onaddstream
)上使用 Media Recorder 时,看起来它会获取一些数据并且能够将缓冲区附加到 sourceBuffer 中。但它不会重播。有时我只得到一帧。

const [pc1, pc2] = localPeerConnectionLoop()
const canvasStream = canvas.captureStream(200)

videoA.srcObject = canvasStream
videoA.play()

// Note: using two MediaRecorder at the same time seem problematic
// But this one works
// stream2mediaSorce(canvasStream, videoB)
// setTimeout(videoB.play.bind(videoB), 5000)

pc1.addTransceiver(canvasStream.getTracks()[0], {
  streams: [ canvasStream ]
})

pc2.onaddstream = (evt) => {
  videoC.srcObject = evt.stream
  videoC.play()

  // Note: using two MediaRecorder at the same time seem problematic
  // THIS DOSE NOT WORK
  stream2mediaSorce(evt.stream, videoD)
  setTimeout(() => videoD.play(), 2000)
}

/**
 * Turn a MediaStream into a SourceBuffer
 * 
 * @param  {MediaStream}      stream   Live Stream to record
 * @param  {HTMLVideoElement} videoElm Video element to play the recorded video in
 * @return {undefined}
 */
function stream2mediaSorce (stream, videoElm) {
  const RECORDER_MIME_TYPE = 'video/webm;codecs=vp9'
  const recorder = new MediaRecorder(stream, { mimeType : RECORDER_MIME_TYPE })

  const mediaSource = new MediaSource()
  videoElm.src = URL.createObjectURL(mediaSource)
  mediaSource.onsourceopen = (e) => {
    sourceBuffer = mediaSource.addSourceBuffer(RECORDER_MIME_TYPE);

    const fr = new FileReader()
    fr.onerror = console.log
    fr.onload = ({ target }) => {
      console.log(target.result)
      sourceBuffer.appendBuffer(target.result)
    }
    recorder.ondataavailable = ({ data }) => {
      console.log(data)
      fr.readAsArrayBuffer(data)
    }
    setInterval(recorder.requestData.bind(recorder), 1000)
  }

  console.log('Recorder created')
  recorder.start() 
}

你知道为什么无法播放视频吗?

我创建了一个fiddle,其中包含所有必要的代码来尝试一下,javascript选项卡与上面的代码相同,(html大部分是不相关的,不需要更改)

有些人尝试减少延迟,但我实际上想将延迟增加到约 10 秒,以便重新观看您在高尔夫挥杆或其他操作中做错的事情,如果可能的话,完全避免使用 MediaRecorder

编辑: 我在一些 RTC 扩展中发现了名为“playout-delay”的东西

允许发送者控制从捕获到渲染时间的最小和最大延迟

我该如何使用它? 对我有帮助吗?

javascript google-chrome webrtc web-mediarecorder
2个回答
6
投票

更新,有一个新功能可以启用此功能,称为

playoutDelayHint

我们希望为 JavaScript 应用程序提供一种方法来设置他们想要渲染音频或视频数据的速度的首选项。对于专注于实时体验的应用程序来说,尽可能快可能是有益的。对于其他人来说,在出现网络问题时,额外的数据缓冲可能会提供更顺畅的体验。

参考资料:
https://discourse.wicg.io/t/hint-attribute-in-webrtc-to-influence-underlying-audio-video-buffering/4038

https://bugs.chromium.org/p/webrtc/issues/detail?id=10287

演示:https://jsfiddle.net/rvekxns5/ doe 我只能在浏览器中设置最长 10 秒,但更多的是由 UA 供应商利用可用资源来做到最好

import('https://jimmy.warting.se/packages/dummycontent/canvas-clock.js')
.then(({AnalogClock}) => {
  const {canvas} = new AnalogClock(100)
  document.querySelector('canvas').replaceWith(canvas)
  
  const [pc1, pc2] = localPeerConnectionLoop()
  const canvasStream = canvas.captureStream(200)

  videoA.srcObject = canvasStream
  videoA.play()

  pc1.addTransceiver(canvasStream.getTracks()[0], {
    streams: [ canvasStream ]
  })

  pc2.onaddstream = (evt) => {
    videoC.srcObject = evt.stream
    videoC.play()
  }

  $dur.onchange = () => {
    pc2.getReceivers()[0].playoutDelayHint = $dur.valueAsNumber
  }
})
<!-- all the irrelevant part, that you don't need to know anything about -->
<h3 style="border-bottom: 1px solid">Original canvas</h3>
<canvas id="canvas" width="100" height="100"></canvas>
<script>
function localPeerConnectionLoop(cfg = {sdpSemantics: 'unified-plan'}) {
  const setD = (d, a, b) => Promise.all([a.setLocalDescription(d), b.setRemoteDescription(d)]);
  return [0, 1].map(() => new RTCPeerConnection(cfg)).map((pc, i, pcs) => Object.assign(pc, {
    onicecandidate: e => e.candidate && pcs[i ^ 1].addIceCandidate(e.candidate),
    onnegotiationneeded: async e => {
      try {
        await setD(await pc.createOffer(), pc, pcs[i ^ 1]);
        await setD(await pcs[i ^ 1].createAnswer(), pcs[i ^ 1], pc);
      } catch (e) {
        console.log(e);
      }
    }
  }));
}
</script>
<h3 style="border-bottom: 1px solid">Local peer (PC1)</h3>
<video id="videoA" muted width="100" height="100"></video>

<h3 style="border-bottom: 1px solid">Remote peer (PC2)</h3>
<video id="videoC" muted width="100" height="100"></video>
<label> Change playoutDelayHint
<input type="number" value="1" id="$dur">
</label>


0
投票

Endless的答案使用了

playoutDelayHint
,这似乎不适用于Firefox。我费了很大劲才找到将音频延迟固定量的方法,甚至在尝试使用 DelayNode

时都失败了

我的解决方案是创建我自己的AudioWorkletProcessor,将传入的音频添加到数组中,并在缓冲区填满后提取它。

class DelayProcessor extends AudioWorkletProcessor {
    constructor() {
        super();
        this.buffer_size = 1000;
        this.buffer = [];
    }

    process(inputs, outputs, parameters) {
        this.set_buffer_size(parameters.delayTime[0]); // Unfortunately, I did not find a way to set the buffer size once. This method gets called every "frame"
        var input = this.delay(inputs); // Save current frame and grab an old one

        // Copying the information from one array into another.
        for(let y = 0; y < input.length; y++){
            for(let x = 0; x < input[y].length; x ++){
                for (let i = 0; i < input[y][x].length; i++) {
                    outputs[y][x][i] = input[y][x][i];
                }
            }
        }
        return true;
    }

    static get parameterDescriptors() { // Setting this up allows to controll the delay trough DOM
        return [
            {
                name: "delayTime",
                defaultValue: 0,
                minValue: 0,
                maxValue: 100000,
            },
        ];
    }
      

    set_buffer_size(new_size) { // Decrease size of buffer if the new size is smaller if needed
        if(new_size === this.buffer_size) {return;}
        if(this.buffer.length > new_size) {
            this.buffer = this.buffer.slice(this.buffer_size - new_size, -1);
        }
        this.buffer_size = new_size;
    }

    delay(data) {
        if(data[0].length == 0){ // Ignore if the inputs are empty. Empty is different from silence. 
            return []
        }
        var new_data = []; // Must copy the structures from `data`, as they are repourposed by the AudioWorklet 
        for(let y = 0; y < data.length; y++){
            new_data.push([])
            for(let x = 0; x < data[y].length; x ++){
                new_data[y].push(new Float32Array(128))
                for (let i = 0; i < data[y][x].length; i++) {
                    new_data[y][x][i] = data[y][x][i];
                }
            }
        }
        this.buffer.push(new_data);
        if(this.buffer.length > this.buffer_size) {
            return this.buffer.shift(); // If the buffer is bigger than the target, return the first in the queue
        }
        return []; // Not enough buffer, keep buffering
    }
}

// Must call this function to register your custom Processor
registerProcessor("delay-processor", DelayProcessor);

为了能够使用此工作集,您必须在主脚本中创建音频上下文,以与正常情况不同的方式初始化工作集,然后将其附加到音频流。我将展示尽可能少的代码。

const audio_ctx = new AudioContext(); // Init audio context
const gain = audio_ctx.createGain(); // Example of a native worklet
let delay_p; // Must be created within an async function

async function init(){
    await audio_ctx.audioWorklet.addModule("delay.js");
    delay_p = new AudioWorkletNode(
        audio_ctx,
        "delay-processor",
    );
}

function some_function(track) { // This function receives a MediaTrack and returns a HTMLElement to be attached anywhere
        const audio_element = document.createElement("audio");
        ....
        audio_element.srcObject = track.streams[0];

        const context_track = audio_ctx.createMediaStreamSource(track.streams[0]);

        // You must connect the delay node first, otherwise you will apply delay to you user's input as well
        context_track.connect(delay_p).connect(gain).connect(audio_ctx.destination);
        ...
        return audio_element;

}

这可能会产生一些意想不到的影响,但据我测试,我没有注意到任何不当行为。

© www.soinside.com 2019 - 2024. All rights reserved.