const command = ffmpeg();
const mp4Path = path.join(__dirname, '..', '..', 'temp', `q-video-${new Date().getTime()}.mp4`);
fs.writeFileSync(mp4Path, videoBuff);
console.log('mp4 file created at: ', mp4Path);
// Set the video stream as the input for ffmpeg
command.input(mp4Path);
const mp3Paths = [];
for (let i = 0; i < audios.length; i++) {
const audio = audios[i];
const mp3Path = path.join(__dirname, '..', '..', 'temp', `q-audio-${new Date().getTime()}-${i}.mp3`);
mp3Paths.push(mp3Path);
fs.writeFileSync(mp3Path, audio.questionBuf);
console.log('mp3 file created at: ', mp3Path);
// Set the audio stream as the input for ffmpeg
command.input(mp3Path);
}
// -------
// ChatGPT take 1
const audioTags = [];
const audioFilters = audios.map((audio, index) => {
const startTime = audio.start_at; // Replace with your logic to calculate start time
const endTime = audio.end_at; // Replace with your logic to calculate end time
audioTags.push(`[delayed${index}]`);
// Working
// return `[${index + 1}:a]atrim=start=0:end=${(endTime - startTime) / 1000},adelay=${startTime}[delayed${index}]`;
return `[${index + 1}:a]dynaudnorm=p=0.9:m=100:s=5,atrim=start=0:end=${(endTime - startTime) / 1000},adelay=${startTime}[delayed${index}]`;
});
// Concatenate the delayed audio streams
const concatFilter = audioFilters.join(';');
// Mix the concatenated audio streams
const mixFilter = `${concatFilter};[0:a]${audioTags.join('')}amix=inputs=${audios.length + 1}:duration=first:dropout_transition=2[out]`;
// Set the complex filter for ffmpeg
command.complexFilter([mixFilter]);
// Set the output size
if (!isScreen) {
command.videoFilter('scale=720:-1');
}
else {
command.videoFilter('scale=1920:-1');
}
// Set input options
command.inputOptions([
'-analyzeduration 20M',
'-probesize 100M'
]);
// Set output options
command.outputOptions([
'-c:v libx264', // Specify a video codec
'-c:a aac',
'-map 0:v', // Map the video stream from the first input
'-map [out]' // Map the audio stream from the complex filter
]);
// Set the output format
command.toFormat('mp4');
// Set the output file path
command.output(outputFilePath);
// Event handling
command
.on('start', commandLine => {
console.log('Spawned Ffmpeg with command: ' + commandLine);
})
.on('codecData', data => {
console.log('Input is ' + data.audio + ' audio ' +
'with ' + data.video + ' video');
})
.on('progress', progress => {
// console.log('progress: ', progress);
console.log(`Processing: ${
progress.percent ?
progress.percent.toFixed(2)
:
'0.00'
}% done`);
})
.on('stderr', stderrLine => {
console.log('Stderr output: ' + stderrLine);
})
.on('error', (err, stdout, stderr) => {
console.error('Error merging streams:', err);
console.error('ffmpeg stdout:', stdout);
console.error('ffmpeg stderr:', stderr);
reject(err);
})
.on('end', () => {
console.log('Merging finished successfully.');
const file = fs.readFileSync(outputFilePath);
console.log('File read successfully.');
setTimeout(() => {
fs.unlinkSync(outputFilePath);
console.log('Output file deleted successfully.');
fs.unlinkSync(mp4Path);
console.log('MP4 file deleted successfully.');
console.log('mp3Paths: ', mp3Paths);
for (let mp3Path of mp3Paths) {
fs.unlinkSync(mp3Path);
}
console.log('MP3 file deleted successfully.');
if (isScreen) {
for (let path of pathsScreen) {
fs.unlinkSync(path);
}
}
else {
for (let path of pathsCamera) {
fs.unlinkSync(path);
}
}
console.log('All temp files deleted successfully.');
}, 3000);
resolve(file);
});
// Run the command
command.run();
这就是我现在合并视频文件(这是一组 webm 文件)的方式。看来这个命令导致视频的音量从开始到结束逐渐增加(视频的早期部分的音量比视频的后期部分低得多)。我该如何解决这个问题?
到目前为止尝试和调查的事情:
我最终为问题中该命令生成的视频运行了另一个 ffmpeg 命令,它实际上有效并解决了我的问题。所以似乎没有办法在合并函数中运行它以使其按预期工作。