正如我说的,随着我做更多的视频,音频变得越来越不同步。我怎样才能解决这个问题?我有以下代码来添加视频;
public class ConcatenateVideos extends ExecutorAsyncTask<String, Void, Boolean> {
private ArrayList<String> video_urls = null;
private final String TAG = ConcatenateVideos.class.getSimpleName();
public void setUris(ArrayList<String> videos) {
LogService.log(TAG, "set uris");
if (videos != null) {
video_urls = videos;
this.execute();
}
}
@Override
protected Boolean doInBackground(String... params) {
boolean success = false;
FileInputStream[] videos = new FileInputStream[video_urls.size()];
try {
for (int i = 0; i < video_urls.size(); i++) {
videos[i] = new FileInputStream(video_urls.get(i));
}
success = concatenateVideos(videos);
} catch (Exception e) {
success = false;
LogService.err(TAG, e.getMessage(), e);
}
return success;
}
private boolean concatenateVideos(InputStream[] video_streams) {
boolean success = false;
Movie[] inMovies = new Movie[video_streams.length];
FileChannel fc = null;
Movie result = new Movie();
IsoFile out = null;
try {
for (int i = 0; i < inMovies.length; i++) {
if (video_streams[i] != null) {
inMovies[i] = MovieCreator.build(Channels.newChannel(video_streams[i]));
}
}
List<Track> videoTracks = new LinkedList<Track>();
List<Track> audioTracks = new LinkedList<Track>();
for (Movie m : inMovies) {
for (Track t : m.getTracks()) {
if (t.getHandler().equals("soun")) {
audioTracks.add(t);
}
if (t.getHandler().equals("vide")) {
videoTracks.add(t);
}
}
}
if (audioTracks.size() > 0) {
result.addTrack(new AppendTrack(audioTracks.toArray(new Track[audioTracks.size()])));
}
if (videoTracks.size() > 0) {
result.addTrack(new AppendTrack(videoTracks.toArray(new Track[videoTracks.size()])));
}
out = new DefaultMp4Builder().build(result);
fc = new RandomAccessFile(video_urls.get(0), "rw").getChannel();
for (int i = 1; i < video_urls.size(); i++) {
File f = new File(video_urls.get(i));
LogService.log(TAG, "delete file : " + f.delete());
}
success = true;
} catch (Exception e) {
LogService.err(TAG, e.getMessage(), e);
success = false;
} finally {
try {
LogService.log(TAG, "==========finally");
if (fc != null) {
fc.position(0);
out.getBox(fc);
fc.close();
}
} catch (Exception e) {
LogService.err(TAG, e.getMessage(), e);
}
}
return success;
}
}
这是我用来调用此ConcatenateVideos函数的服务:
private final String TAG = ConcatenateVideosService.class.getSimpleName();
final Messenger myMessenger = new Messenger(new IncomingHandler());
class IncomingHandler extends Handler {
private Messenger client = null;
@Override
public void handleMessage(Message msg) {
// init messenger
if (client == null) {
client = msg.replyTo;
}
// get the message
Bundle data = msg.getData();
byte dataString = data.getByte("message");
switch (dataString) {
case Constants.INIT_CMD_SERVICE:
LogService.log(TAG, "INIT_CMD_SERVICE:");
break;
case Constants.CONCATE_CMD_SERVICE:
LogService.log(TAG, "CONCATE_CMD_SERVICE:");
ArrayList<String> videos = data.getStringArrayList(Constants.SERVICE_VIDEO_URLS);
ConcatenateVideos concatenateVideos = new ConcatenateVideos() {
@Override
protected void onPostExecute(Boolean result) {
LogService.log(TAG, "onPostExecute() , result : " + result);
super.onPostExecute(result);
// setup the answer
Message answer = Message.obtain();
Bundle bundle = new Bundle();
bundle.putBoolean("result", result);
answer.setData(bundle);
// send the answer
try {
client.send(answer);
} catch (RemoteException e) {
LogService.err(TAG, e.getMessage(), e);
}
}
};
concatenateVideos.setUris(videos);
break;
}
}
}
@Override
public boolean onUnbind(Intent intent) {
stopSelf();
return super.onUnbind(intent);
}
@Override
public IBinder onBind(Intent intent) {
return myMessenger.getBinder();
}
@Override
public void onDestroy() {
super.onDestroy();
}
以以下质量录制我的视频:VideoBitrate - 800000
,audioBR - 64000
,audioSamplingRate - 44100
,MPEG_4. H264 Container
,.AAC at 30fps
。现在,我进行了测试,如果我制作了4个视频,则每个视频的视频Timescale is 90000
和音频Timescale is 44100
。但是在添加视频后,视频的音频TimeScale
仍为44100
,但视频Timescale
为:900
。为什么VideoTimeScale
改变而不是音频改变?
在许多情况下,录音(音频/视频)的长度不同。假设音频记录始终为10.0 s,视频始终为10.1 s。如果您只播放一部电影,那么音频可能会在视频之前结束。它会自动静音。
[如果您将其中两个视频相加,则第一个音频的开始时间为0 s,第二个音频的开始时间为10.0-不幸的是,第二个视频的开始时间是10.1,并且您遇到同步问题。
您将需要通过添加静音甚至丢弃某些帧来补偿不同的行程长度!
我知道这个问题很旧,但是我遇到了同样的问题,没有一个明确的解决方案,我从那里到那里编写了一些函数来解决这个问题。
@Throws(Exception::class)
fun appendVideos(videoPathList: List<String>, targetFilePath: String) {
val movies = videoPathList.flatMap { file -> listOf(MovieCreator.build(file)) }
val finalMovie = Movie()
val videoTracksTotal = mutableListOf<Track>()
val audioTracksTotal = mutableListOf<Track>()
var audioDuration = 0.0
var videoDuration = 0.0
movies.forEach { movie ->
val videoTracks = mutableListOf<Track>()
val audioTracks = mutableListOf<Track>()
movie.tracks.forEach { track ->
val trackDuration = track.sampleDurations.toList()
.map { t -> t.toDouble() / track.trackMetaData.timescale }.sum()
if (track.handler == "vide") {
videoDuration += trackDuration
videoTracks.add(track)
} else if (track.handler == "soun") {
audioDuration += trackDuration
audioTracks.add(track)
}
}
// Adjusting Durations
adjustDurations(videoTracks, audioTracks, videoDuration, audioDuration).let {
audioDuration = it.audioDuration
videoDuration = it.videoDuration
}
videoTracksTotal.addAll(videoTracks)
audioTracksTotal.addAll(audioTracks)
}
if (videoTracksTotal.isNotEmpty() && audioTracksTotal.isNotEmpty()) {
finalMovie.addTrack(AppendTrack(*videoTracksTotal.toTypedArray()))
finalMovie.addTrack(AppendTrack(*audioTracksTotal.toTypedArray()))
}
val container = DefaultMp4Builder().build(finalMovie)
val fos = FileOutputStream(targetFilePath)
val bb = Channels.newChannel(fos)
container.writeContainer(bb)
fos.close()
}
class Durations(val audioDuration: Double, val videoDuration: Double)
private fun adjustDurations(
videoTracks: MutableList<Track>,
audioTracks: MutableList<Track>,
videoDuration: Double,
audioDuration: Double
): Durations {
var diff = audioDuration - videoDuration
val tracks: MutableList<Track>
var durationOperator: Double
val isAudioProblem: Boolean
when {
// audio and video match, no operations to perform
diff == 0.0 -> {
return Durations(audioDuration, videoDuration)
}
// audio tracks are longer than video
diff > 0 -> {
tracks = audioTracks
durationOperator = audioDuration
isAudioProblem = true
}
// video tracks are longer than audio
else -> {
tracks = videoTracks
durationOperator = videoDuration
diff *= -1.0
isAudioProblem = false
}
}
// Getting the last track in order to operate with it
var track: Track = tracks.last()
var counter: Long = 0
// Reversing SampleDuration list
track.sampleDurations.toList().asReversed().forEach { sampleDuration ->
// Calculating how much this track need to be re-adjusted
if (sampleDuration.toDouble() / track.trackMetaData.timescale > diff) {
return@forEach
}
diff -= sampleDuration.toDouble() / track.trackMetaData.timescale
durationOperator -= sampleDuration.toDouble() / track.trackMetaData.timescale
counter++
}
if (counter != 0L) {
// Cropping track
track = CroppedTrack(track, 0, track.samples.size - counter)
//update the original reference
tracks.removeAt(tracks.lastIndex)
tracks.add(track)
}
// Returning durations
return if (isAudioProblem) {
Durations(durationOperator, videoDuration)
} else {
Durations(audioDuration, durationOperator)
}
}