在lamejs中填充音频源

问题描述 投票:0回答:1

我有来自lamejs示例的代码:

let channels = 1; //1 for mono or 2 for stereo
let sampleRate = 44100; //44.1khz (normal mp3 samplerate)
let kbps = 128; //encode 128kbps mp3
let mp3encoder = new lamejs.Mp3Encoder(channels, sampleRate, kbps);
let mp3Data;

********** this line is stumping me: **********
let samples = new Int16Array(44100); //one second of silence (get your data from the source you have) 
let sampleBlockSize = 1152; //can be anything but make it a multiple of 576 to make encoders life easier
let mp3buf;

mp3Data = [];
for (let i = 0; i < samples.length; i += sampleBlockSize) {
  let sampleChunk = samples.subarray(i, i + sampleBlockSize);
  mp3buf = mp3encoder.encodeBuffer(sampleChunk);
  if (mp3buf.length > 0) {
    mp3Data.push(mp3buf);
  }
}
mp3buf = mp3encoder.flush(); //finish writing mp3

if (mp3buf.length > 0) {
  mp3Data.push(new Int8Array(mp3buf));
}

let audioBlob = new Blob(mp3Data, { type: "audio/mp3" });

我不知道如何添加我自己的源/样本:我正在使用浏览器媒体记录器:

mediaRecorder.ondataavailable = (e) => {
  chunks.push(e.data);
};

我尝试使用

chunks
作为源/样本,但我对音频的了解不够,所以任何人都可以帮助我了解如何格式化数据和根本原因吗?我只是不知道从哪里开始。预先感谢。

javascript audio browser recording
1个回答
0
投票

这就是我让它工作的方法,以防其他人遇到同样的问题:

let mp3Data = [];
let audioContext = new (window.AudioContext || window.webkitAudioContext)();

let audioBuffer = await getPCM();
let samples = getSamples(audioBuffer);
let audioBlob = getBlobMp3(samples);


// pcm: pulse-code modulation, used in audio
async function getPCM() {
  // chunks from mediaRecorder
  let blob = new Blob(chunks, { type: "audio/webm" });
  let arrayBuffer = await blob.arrayBuffer();

  return await audioContext.decodeAudioData(arrayBuffer);
}

function getSamples(audioBuffer) {
  let channelData = audioBuffer.getChannelData(0); // Mono channel (0 for left)

  return float32To16BitPCM(channelData);
}

function float32To16BitPCM(float32Array) {
  let int16Array = new Int16Array(float32Array.length);

  for (let i = 0; i < float32Array.length; i++) {
    int16Array[i] = Math.max(-1, Math.min(1, float32Array[i])) * 0x7fff; // 0x7FFF = 32767
  }

  return int16Array;
}

function getBlobMp3(samples) {
  let mp3encoder = new lamejs.Mp3Encoder(
    1, // Mono channel
    44100, // 44.1kHz
    128, // 128 kbps for MP3 encoding
  );

  let sampleBlockSize = 1152; // multiple of 576

  for (let i = 0; i < samples.length; i += sampleBlockSize) {
    let sampleChunk = samples.subarray(i, i + sampleBlockSize);
    let bufferMp3 = mp3encoder.encodeBuffer(sampleChunk);
    if (bufferMp3.length > 0) {
      mp3Data.push(bufferMp3);
    }
  }

  let finalBufferMp3 = mp3encoder.flush();
  if (finalBufferMp3.length > 0) {
    mp3Data.push(finalBufferMp3);
  }

  return new Blob(mp3Data, { type: "audio/mp3" });
}

我必须:

  • 从由 mediarecorder 的音频块组成的 blob 中获取数组缓冲区
  • 使用
    audioContext
  • 从该数组缓冲区获取音频缓冲区
  • 将该音频缓冲区从 32 位转换为 16 位
  • 将16位数组传递给lamejs以转换为mp3
  • 从 mp3 中制作另一个 blob
  • 将音频 blob 传递到服务器进行处理。

我不确定这是否是正确的答案,但它确实对我有用。如果有人知道更好的方法,请评论/回答。

© www.soinside.com 2019 - 2024. All rights reserved.