我正在使用AudioWorkletProcessor,我需要将所有音频流数据存储到一个文件中并在最后播放。
下面是我的 AudioWorkletProcessor 代码:
class RecorderProcessor extends AudioWorkletProcessor {
// 0. Determine the buffer size (this is the same as the 1st argument of ScriptProcessor)
//bufferSize = 4096
bufferSize = 256
// 1. Track the current buffer fill level
_bytesWritten = 0
// 2. Create a buffer of fixed size
_buffer = new Float32Array(this.bufferSize)
constructor() {
super(); // exception thrown here when not called
this.initBuffer()
}
initBuffer() {
this._bytesWritten = 0
}
isBufferEmpty() {
return this._bytesWritten === 0
}
isBufferFull() {
return this._bytesWritten === this.bufferSize
}
/**
* @param {Float32Array[][]} inputs
* @returns {boolean}
*/
process(inputs) {
// Grabbing the 1st channel similar to ScriptProcessorNode
this.append(inputs[0][0])
// this.append(outputs[0][0])
return true
}
/**
*
* @param {Float32Array} channelData
*/
append(channelData) {
if (this.isBufferFull()) {
this.flush()
}
if (!channelData) return
for (let i = 0; i < channelData.length; i++) {
this._buffer[this._bytesWritten++] = channelData[i]
}
}
flush() {
// trim the buffer if ended prematurely
this.port.postMessage(
this._bytesWritten < this.bufferSize
? this._buffer.slice(0, this._bytesWritten)
: this._buffer
)
this.initBuffer()
}
}
registerProcessor("recorderWorkletProcessor", RecorderProcessor)
下面是我的javascript代码:
var recordingNode; //audio worklet node
var micSourceNode; //mic node
const chunks = []; // storing all stream audio chunks
try {
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
microphone = navigator.getUserMedia({
audio: true,
video: false
}, onMicrophoneGranted, onMicrophoneDenied);
} catch (e) {
alert(e)
}
function onMicrophoneDenied() {
console.log('denied')
}
async function onMicrophoneGranted(stream) {
context = new AudioContext({
sampleRate: 48000
});
micSourceNode = context.createMediaStreamSource(stream);
await context.audioWorklet.addModule('/app_resources/recorderWorkletProcessor.js');
recordingNode = new AudioWorkletNode(context, "recorderWorkletProcessor")
recordingNode.port.onmessage = function(e) {
chunks.push(e.data); //storing chunks in arrau
}
micSourceNode
.connect(recordingNode)
.connect(context.destination);
}
function stopstream() {
if (micSourceNode)
micSourceNode.disconnect(recordingNode);
var blob = new Blob(chunks, {
type: "audio/webm;codecs=opus"
});
console.log(blob.size)
const audioUrl = URL.createObjectURL(blob);
document.getElementById('song').innerHTML = '<audio id="audio-player" controls="controls" src="' + audioUrl + '" type="audio/mpeg">';
}
我无法将浮点 32 位数组转换为音频文件。我可以看到斑点的大小,但无法播放音频。请帮助我了解我可以在这里做什么才能使其正常工作。
将所有音频流数据存储到一个文件中并在最后播放,步骤如下:
第1步:添加以缓冲区形式获取原始音频数据并将其转换为WAV文件的功能,包括必要的WAV头信息:
function convertToWav(buffer, sampleRate) {
const numberOfChannels = 1; // Assuming mono audio
const bytesPerSample = 2; // 16-bit PCM
const dataSize = buffer.length * bytesPerSample;
const bufferLength = buffer.length;
const newBuffer = new ArrayBuffer(44 + dataSize);
const view = new DataView(newBuffer);
// WAV header
writeString(view, 0, "RIFF");
view.setUint32(4, 36 + dataSize, true);
writeString(view, 8, "WAVE");
writeString(view, 12, "fmt ");
view.setUint32(16, 16, true);
view.setUint16(20, 1, true); // AudioFormat: 1 (PCM)
view.setUint16(22, numberOfChannels, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * numberOfChannels * bytesPerSample, true);
view.setUint16(32, numberOfChannels * bytesPerSample, true);
view.setUint16(34, bytesPerSample * 8, true);
writeString(view, 36, "data");
view.setUint32(40, dataSize, true);
// PCM data
const data = new Int16Array(newBuffer, 44);
for (let i = 0; i < bufferLength; i++) {
const val = Math.max(-1, Math.min(1, buffer[i]));
data[i] = val < 0 ? val * 0x8000 : val * 0x7FFF;
}
return new Blob([view], { type: "audio/wav" });
}
function writeString(view, offset, string) {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
第 2 步: 更新您的
stopstream
功能:
function stopstream() {
const wavBlob = convertToWav(chunks, 48000);
const url = URL.createObjectURL(wavBlob);
const audio = new Audio(url);
audio.play();
if (micSourceNode) micSourceNode.disconnect(recordingNode);
}
调用
stopstream
后将播放音频。