我当前的解决方案在 chrome、firefox 和 opera 等浏览器上工作正常,但在 safari 中我听到机器人声音。当我在 safari 中删除约束“echoChancellation”时,录音听起来像花栗鼠。 如果可能的话我不想使用包。
import {useEffect, useRef, useState} from "react";
import Button from "@components/Button.tsx";
import MicIcon from "@assets/icons/mic.svg"
import MicOffIcon from "@assets/icons/mic-off.svg"
import DarkMicIcon from "@assets/icons/dark-mic.svg"
import PauseIcon from "@assets/icons/pause.svg"
import PlayIcon from "@assets/icons/play.svg"
import StopIcon from "@assets/icons/stop.svg"
import EqualizerIcon from "@assets/icons/animated-equalizer.svg"
import DownloadIcon from "@assets/icons/download.svg"
import DeleteIcon from "@assets/icons/delete.svg"
import {addRecording} from "@lib/api.ts";
import {Recording} from "@lib/interfaces.ts";
import {useNavigate} from "react-router-dom";
import {formatSecondsToMMSS} from "@lib/functions.ts";
interface Props {
textId: string;
caseId: string;
}
export default function AudioRecorder({ textId, caseId }: Props) {
const navigate = useNavigate();
const numChannels = 2;
const constraints: MediaStreamConstraints = {
audio: {
sampleRate: 48000,
echoCancellation: false,
autoGainControl: false,
noiseSuppression: false,
channelCount: numChannels,
}
}
const [error, setError] = useState<string>('Mikrofon wird geladen.');
const [devices, setDevices] = useState<MediaDeviceInfo[]>();
const [isRecording, setIsRecording] = useState<boolean>(false);
const [isPaused, setIsPaused] = useState<boolean>(false);
const [audioUrl, setAudioUrl] = useState<string>('');
const [audioBlob, setAudioBlob] = useState<Blob>();
const [elapsedTime, setElapsedTime] = useState<number>(0);
const [intervalId, setIntervalId] = useState<number>();
const [audioWorkletNode, setAudioWorkletNode] = useState<AudioWorkletNode>();
const [mediaStreamSource, setMediaStreamSource] = useState<MediaStreamAudioSourceNode>();
const [audioContext, setAudioContext] = useState<AudioContext>();
const recordedChunksRef = useRef<Float32Array[]>([]);
useEffect(() => {
loadMediaStream();
return () => {
if (audioWorkletNode) audioWorkletNode.disconnect();
if (mediaStreamSource) mediaStreamSource.disconnect();
if (audioContext) audioContext.close();
if (intervalId) clearInterval(intervalId);
}
}, []);
const loadMediaStream = async () => {
try {
if (!navigator.mediaDevices?.getUserMedia) {
setError("Der Browser unterstützt keinen Mikrofonzugriff.")
return;
}
const stream = await navigator.mediaDevices.getUserMedia(constraints)
await getAvailableAudioDevices();
const context = new AudioContext();
const source = context.createMediaStreamSource(stream);
await context.audioWorklet.addModule(
URL.createObjectURL(new Blob([audioWorkletProcessorCode()], {type: "text/javascript"}))
);
const worklet = new AudioWorkletNode(context, 'recorder-worklet');
recordedChunksRef.current = []
worklet.port.onmessage = (event) => {
recordedChunksRef.current.push(event.data);
}
worklet.port.start();
source.connect(worklet);
worklet.connect(context.destination);
setAudioContext(context);
setMediaStreamSource(source)
setAudioWorkletNode(worklet)
setError('')
} catch (error) {
handleMediaStreamError(error as DOMException);
}
}
const handleMediaStreamError = (error: DOMException) => {
let errorMessage = 'Ein unbekannter Fehler ist aufgetreten.';
switch (error.name) {
case 'AbortError':
errorMessage = 'Abbruch: Die Anfrage wurde abgebrochen.';
break;
case 'InvalidStateError':
errorMessage = 'Ungültiger Zustand: Die Anfrage kann im aktuellen Zustand des Geräts nicht ausgeführt werden.';
break;
case 'NotAllowedError':
errorMessage = 'Zugriff verweigert: Sie haben den Zugriff auf das Mikrofon abgelehnt.';
break;
case 'NotFoundError':
errorMessage = 'Fehler: Es wurde kein geeignetes Mikrofon gefunden.';
break;
case 'NotReadableError':
errorMessage = 'Gerätefehler: Das Mikrofon kann nicht gelesen werden. Es könnte bereits in Verwendung sein.';
break;
case 'OverconstrainedError':
errorMessage = 'Einschränkungsfehler: Die angeforderten Mikrofoneinstellungen sind nicht verfügbar.';
break;
case 'SecurityError':
errorMessage = 'Sicherheitsfehler: Ein Sicherheitsproblem verhindert den Zugriff auf das Mikrofon.';
break;
}
setError(errorMessage)
}
const getAvailableAudioDevices = async () => {
try {
const mediaDevices = await navigator.mediaDevices.enumerateDevices()
const audioInputDevices = mediaDevices.filter((device) => device.kind === 'audioinput');
setDevices(audioInputDevices);
} catch (error) {
console.error('Error accessing audio devices:', error);
}
}
const startTimer = () => {
const id = setInterval(() => setElapsedTime(prev => prev + 1), 1000);
setIntervalId(id);
}
const stopTimer = () => {
if (intervalId) clearInterval(intervalId);
setIntervalId(undefined);
}
const startRecording = async () => {
if (!audioWorkletNode) return;
recordedChunksRef.current = []
audioWorkletNode.port.postMessage("start");
setIsRecording(true);
setIsPaused(false);
setElapsedTime(0);
startTimer();
};
const stopRecording = () => {
if (!audioWorkletNode || !audioContext) return;
audioWorkletNode.port.postMessage("stop");
const blob = exportWAV(recordedChunksRef.current, audioContext.sampleRate);
const url = URL.createObjectURL(blob);
setAudioBlob(blob);
setAudioUrl(url);
setIsRecording(false);
setIsPaused(false);
stopTimer();
};
const pauseRecording = () => {
if (!audioContext || audioContext.state !== "running") return;
audioContext.suspend().then(() => {
if (audioWorkletNode) audioWorkletNode.port.postMessage("stop");
setIsPaused(true);
stopTimer();
});
}
const resumeRecording = () => {
if (!audioContext || audioContext.state !== "suspended") return;
audioContext.resume().then(() => {
if (audioWorkletNode) audioWorkletNode.port.postMessage("start");
setIsPaused(false);
startTimer();
});
}
const saveRecording = () => {
if (!audioBlob) return;
const recording: Recording = {
id: `rec-${Date.now()}`,
audioBlob: audioBlob,
date: new Date().toISOString(),
textId: textId,
caseId: caseId
}
addRecording(recording).then(() => navigate(`/case/${caseId}/record-success`));
}
const removeRecording = () => {
setAudioBlob(undefined);
setAudioUrl('');
}
const exportWAV = (buffers: Float32Array[], sampleRate: number) => {
const bufferLength = buffers.reduce((sum, b) => sum + b.length, 0);
const data = new Float32Array(bufferLength);
let offset = 0;
// Merge buffers
for (const buffer of buffers) {
data.set(buffer, offset);
offset += buffer.length;
}
return encodeWAV(data, sampleRate);
};
const encodeWAV = (samples: Float32Array, sampleRate: number) => {
const buffer = new ArrayBuffer(44 + samples.length * 2);
const view = new DataView(buffer);
// RIFF header
view.setUint8(0, 'R'.charCodeAt(0)) // identifier
view.setUint8(1, 'I'.charCodeAt(0)) // identifier
view.setUint8(2, 'F'.charCodeAt(0)) // identifier
view.setUint8(3, 'F'.charCodeAt(0)) // identifier
view.setUint32(4, 36 + samples.length * 2, true); // chunk length
view.setUint8(8, 'W'.charCodeAt(0)) // type
view.setUint8(9, 'A'.charCodeAt(0)) // type
view.setUint8(10, 'V'.charCodeAt(0)) // type
view.setUint8(11, 'E'.charCodeAt(0)) // type
// format chunk
view.setUint8(12, 'f'.charCodeAt(0)) // identifier
view.setUint8(13, 'm'.charCodeAt(0)) // identifier
view.setUint8(14, 't'.charCodeAt(0)) // identifier
view.setUint8(15, ' '.charCodeAt(0)) // identifier
view.setUint32(16, 16, true); // length
view.setUint16(20, 1, true); // sample format (raw)
view.setUint16(22, numChannels, true);// channel count
view.setUint32(24, sampleRate, true); // sample rate
view.setUint32(28, sampleRate * numChannels * 2, true); // byte rate (sample rate * block align)
view.setUint16(32, numChannels * 2, true); // block align (channel count * bytes per sample)
view.setUint16(34, 16, true); // bits per sample
// data chunk
view.setUint8(36, 'd'.charCodeAt(0)) // identifier
view.setUint8(37, 'a'.charCodeAt(0)) // identifier
view.setUint8(38, 't'.charCodeAt(0)) // identifier
view.setUint8(39, 'a'.charCodeAt(0)) // identifier
view.setUint32(40, samples.length * 2, true); // data chunk length
// write samples
let offset = 44;
for (const s of samples) {
view.setInt16(offset, s * 0x7FFF, true);
offset += 2;
}
return new Blob([view], { type: "audio/wav" });
};
const audioWorkletProcessorCode = () => `
class RecorderWorkletProcessor extends AudioWorkletProcessor {
constructor() {
super();
this.recording = false;
this.port.onmessage = (event) => {
if (event.data === "start") this.recording = true;
if (event.data === "stop") this.recording = false;
};
}
process(inputs) {
const input = inputs[0];
if (this.recording && input && input.length > 0) {
if (input.length > 1) { // Stereo: Combine left and right channels
const left = input[0];
const right = input[1];
const stereo = new Float32Array(left.length + right.length);
for (let i = 0; i < left.length; i++) {
stereo[i * 2] = left[i];
stereo[i * 2 + 1] = right[i];
}
this.port.postMessage(stereo);
} else { // Mono: Use the first channel
this.port.postMessage(input[0]);
}
}
return true;
}
}
registerProcessor('recorder-worklet', RecorderWorkletProcessor);
`;
if (error) return (
<Button icon={MicOffIcon} label={error} onClick={loadMediaStream}/>
)
return (
<div className="text-right">
{!!devices && <p className="mb-2">{devices[0].label}</p>}
{isRecording && (
<div className="flex gap-2 items-center">
<img src={DarkMicIcon} alt={"Mikrofon"} className="-mr-1 h-6" />
{!isPaused && <img src={EqualizerIcon} alt={"Aufnahme läuft"} className={"h-6"} />}
<div>{formatSecondsToMMSS(elapsedTime)}</div>
<Button icon={StopIcon} label="Stoppen" onClick={stopRecording} />
{!isPaused
? <Button icon={PauseIcon} label="Pause" onClick={pauseRecording} />
: <Button icon={PlayIcon} label="Fortsetzen" onClick={resumeRecording} />
}
</div>
)}
{!audioUrl && !isRecording && (
<Button icon={MicIcon} label="Aufnahme starten" onClick={startRecording} />
)}
{audioUrl && !isRecording && (
<div className="flex gap-2 items-center">
<Button icon={DownloadIcon} onClick={saveRecording}>
Aufnahme speichern
</Button>
<Button icon={DeleteIcon} onClick={removeRecording}>
Aufnahme verwerfen
</Button>
</div>
)}
</div>
)
}
真的没有更简单的方法吗?我读到了例如MediaRecorder 不支持无损。
Safari 在使用 echoCancellation 时存在音频录制问题。要修复这些花栗鼠和机器人的声音哈哈,请尝试将 echoCancellation 的约束更改为 false 并将采样率直接设置为例如44100。如果您使用 AudioContext 还可以在那里设置采样率。