我正在尝试创建一个nextjs应用程序,它使用媒体记录器api来录制屏幕、屏幕音频和用户麦克风,但我遇到音频问题(用户语音无法清晰录制,有时无法录制) ,我在这里展示我的代码,希望有人能帮助我,谢谢大家!
'use client'
import React, { useState, useRef, useCallback, useEffect } from 'react';
const ScreenRecorder: React.FC = () => {
const [recording, setRecording] = useState<boolean>(false);
const [mediaUrl, setMediaUrl] = useState<string>();
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | null>(null);
const [uStream, setUStream] = useState<MediaStream>()
const [mStream, setMStream] = useState<MediaStream>()
const startRecording = useCallback(async () => {
let mediaParts: any[] = [];
try {
const stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
setMStream(stream)
const userStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
setUStream(userStream)
const audioContext = new window.AudioContext()
const micSource = audioContext.createMediaStreamSource(userStream)
const speakerSource = audioContext.createMediaStreamSource(stream);
const destination = audioContext.createMediaStreamDestination();
micSource.connect(destination);
speakerSource.connect(destination)
const combinedStream = new MediaStream([
...destination.stream.getAudioTracks(),
...stream.getVideoTracks(),
]);
const recorder = new MediaRecorder(combinedStream);
setMediaRecorder(recorder);
recorder.start();
setRecording(true);
recorder.onstop = () => {
const blob = new Blob(mediaParts, { type: 'video/mp4' });
const url = URL.createObjectURL(blob);
setMediaUrl(url)
console.log('On Stop')
recorder.stop()
};
recorder.addEventListener('dataavailable', (e) => {
var data = e.data
if (data && data.size > 0) {
mediaParts.push(data);
}
})
} catch (error) {
console.error('Error starting recording:', error);
}
}, []);
const stopRecording = useCallback(() => {
if (mediaRecorder) {
console.log('Click to Stop')
mediaRecorder.stop()
uStream!.getTracks().forEach(track => track.stop())
mStream!.getTracks().forEach(track => track.stop())
setRecording(false);
}
}, [mediaRecorder, uStream, mStream]);
return (
<div>
<h1>State: {mediaRecorder?.state}</h1>
{!recording && (
<button onClick={startRecording}>Start Recording</button>
)}
{recording && (
<button onClick={stopRecording}>Stop Recording</button>
)}
<video src={mediaUrl} controls autoPlay/>
</div>
);
};
export default ScreenRecorder;
第一次我没有使用audioContext,只是组合曲目,但我只录制了屏幕和用户语音(没有获取屏幕音频)
我们很高兴您成为我们团队的一员!当您将 MediaRecorder API 集成到 Next 应用程序中时,一个可能的问题是它可能无法捕获用户麦克风中的声音以及捕获显示器(屏幕)上的所有内容。这可能取决于浏览器对此的关心。另请始终检查权限。
具体操作方法如下:
'use client'
import React, { useState, useCallback } from 'react';
const ScreenRecorder: React.FC = () => {
const [recording, setRecording] = useState<boolean>(false);
const [mediaUrl, setMediaUrl] = useState<string>();
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | null>(null);
const [uStream, setUStream] = useState<MediaStream | null>(null);
const [mStream, setMStream] = useState<MediaStream | null>(null);
const [audioContext] = useState(() => new window.AudioContext());
const startRecording = useCallback(async () => {
let mediaParts: any[] = [];
try {
const displayStream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
setMStream(displayStream);
const userStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
setUStream(userStream);
const micSource = audioContext.createMediaStreamSource(userStream);
const speakerSource = audioContext.createMediaStreamSource(displayStream);
const destination = audioContext.createMediaStreamDestination();
micSource.connect(destination);
speakerSource.connect(destination);
const combinedStream = new MediaStream([
...destination.stream.getAudioTracks(),
...displayStream.getVideoTracks(),
]);
const recorder = new MediaRecorder(combinedStream);
setMediaRecorder(recorder);
recorder.ondataavailable = (event) => {
if (event.data && event.data.size > 0) {
mediaParts.push(event.data);
}
};
recorder.onstop = () => {
const blob = new Blob(mediaParts, { type: 'video/mp4' });
const url = URL.createObjectURL(blob);
setMediaUrl(url);
};
recorder.start();
setRecording(true);
} catch (error) {
console.error('Error starting recording:', error);
}
}, [audioContext]);
const stopRecording = useCallback(() => {
if (mediaRecorder) {
mediaRecorder.stop();
uStream?.getTracks().forEach(track => track.stop());
mStream?.getTracks().forEach(track => track.stop());
setRecording(false);
}
}, [mediaRecorder, uStream, mStream]);
return (
<div>
<h1>State: {mediaRecorder?.state}</h1>
{!recording && (
<button onClick={startRecording}>Start Recording</button>
)}
{recording && (
<button onClick={stopRecording}>Stop Recording</button>
)}
{mediaUrl && (
<video src={mediaUrl} controls autoPlay />
)}
</div>
);
};
export default ScreenRecorder;
如果您有任何其他问题或需要进一步帮助,请随时与我们联系。祝您的项目顺利!