Make ffmpeg test output sync A/V properly

FFmpeg test output wasn't make any attempt to sync data before.  Should
be much more accurate now.

Also, added a restart message to audio settings if base audio settings
are changed.
This commit is contained in:
jp9000 2014-02-24 01:48:14 -07:00
parent 268e4e7811
commit 6c2d067e05
3 changed files with 60 additions and 8 deletions

View File

@ -288,6 +288,19 @@
</property>
</widget>
</item>
<item row="8" column="1">
<widget class="QLabel" name="audioMsg">
<property name="styleSheet">
<string notr="true">color: rgb(255, 0, 4);</string>
</property>
<property name="text">
<string notr="true"/>
</property>
<property name="wordWrap">
<bool>true</bool>
</property>
</widget>
</item>
</layout>
</widget>
<widget class="QWidget" name="videoPage">

View File

@ -493,7 +493,7 @@ void OBSBasicSettings::on_sampleRate_currentIndexChanged(int index)
{
if (!loading) {
audioChanged = true;
ui->videoMsg->setText(QTStr("Settings.ProgramRestart"));
ui->audioMsg->setText(QTStr("Settings.ProgramRestart"));
}
UNUSED_PARAMETER(index);
@ -503,7 +503,7 @@ void OBSBasicSettings::on_channelSetup_currentIndexChanged(int index)
{
if (!loading) {
audioChanged = true;
ui->videoMsg->setText(QTStr("Settings.ProgramRestart"));
ui->audioMsg->setText(QTStr("Settings.ProgramRestart"));
}
UNUSED_PARAMETER(index);
@ -513,7 +513,7 @@ void OBSBasicSettings::on_audioBufferingTime_valueChanged(int value)
{
if (!loading) {
audioChanged = true;
ui->videoMsg->setText(QTStr("Settings.ProgramRestart"));
ui->audioMsg->setText(QTStr("Settings.ProgramRestart"));
}
UNUSED_PARAMETER(value);

View File

@ -37,6 +37,9 @@ struct ffmpeg_data {
int frame_size;
int total_frames;
uint64_t start_timestamp;
uint32_t audio_samplerate;
enum audio_format audio_format;
size_t audio_planes;
size_t audio_size;
@ -255,6 +258,7 @@ static bool create_audio_stream(struct ffmpeg_data *data)
context->sample_fmt = data->acodec->sample_fmts ?
data->acodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
data->audio_samplerate = aoi.samples_per_sec;
data->audio_format = convert_ffmpeg_sample_format(context->sample_fmt);
data->audio_planes = get_audio_planes(data->audio_format, aoi.speakers);
data->audio_size = get_audio_size(data->audio_format, aoi.speakers, 1);
@ -388,8 +392,7 @@ static void ffmpeg_log_callback(void *param, int bla, const char *format,
UNUSED_PARAMETER(bla);
}
static void *ffmpeg_output_create(obs_data_t settings,
obs_output_t output)
static void *ffmpeg_output_create(obs_data_t settings, obs_output_t output)
{
struct ffmpeg_output *data = bzalloc(sizeof(struct ffmpeg_output));
data->output = output;
@ -452,6 +455,9 @@ static void receive_video(void *param, const struct video_data *frame)
av_init_packet(&packet);
if (!data->start_timestamp)
data->start_timestamp = frame->timestamp;
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
sws_scale(data->swscale, frame->data,
(const int*)frame->linesize,
@ -548,18 +554,51 @@ static inline void encode_audio(struct ffmpeg_data *output,
av_err2str(ret));
}
static bool prepare_audio(struct ffmpeg_data *data,
const struct audio_data *frame, struct audio_data *output)
{
*output = *frame;
if (frame->timestamp < data->start_timestamp) {
uint64_t duration = (uint64_t)frame->frames * 1000000000 /
(uint64_t)data->audio_samplerate;
uint64_t end_ts = (frame->timestamp + duration);
uint64_t cutoff;
if (end_ts <= data->start_timestamp)
return false;
cutoff = data->start_timestamp - frame->timestamp;
cutoff = cutoff * (uint64_t)data->audio_samplerate /
1000000000;
for (size_t i = 0; i < data->audio_planes; i++)
output->data[i] += data->audio_size * (uint32_t)cutoff;
output->frames -= (uint32_t)cutoff;
}
return true;
}
static void receive_audio(void *param, const struct audio_data *frame)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
size_t frame_size_bytes;
struct audio_data in;
AVCodecContext *context = data->audio->codec;
size_t frame_size_bytes = (size_t)data->frame_size * data->audio_size;
if (!data->start_timestamp)
return;
if (!prepare_audio(data, frame, &in))
return;
frame_size_bytes = (size_t)data->frame_size * data->audio_size;
for (size_t i = 0; i < data->audio_planes; i++)
circlebuf_push_back(&data->excess_frames[i], frame->data[0],
frame->frames * data->audio_size);
circlebuf_push_back(&data->excess_frames[i], in.data[0],
in.frames * data->audio_size);
while (data->excess_frames[0].size >= frame_size_bytes) {
for (size_t i = 0; i < data->audio_planes; i++)