- Add some temporary streaming code using FFmpeg. FFmpeg itself is not very ideal for streaming; lack of direct control of the sockets and no framedrop handling means that FFmpeg is definitely not something you want to use without wrapper code. I'd prefer writing my own network framework in this particular case just because you give away so much control of the network interface. Wasted an entire day trying to go through FFmpeg issues. There's just no way FFmpeg should be used for real streaming (at least without being patched or submitting some sort of patch, but I'm sort of feeling "meh" on that idea) I had to end up writing multiple threads just to handle both connecting and writing, because av_interleaved_write_frame blocks every call, stalling the main encoder thread, and thus also stalling draw signals. - Add some temporary user interface for streaming settings. This is just temporary for the time being. It's in the outputs section of the basic-mode settings - Make it so that dynamic arrays do not free all their data when the size just happens to be reduced to 0. This prevents constant reallocation when an array keeps going from 1 item to 0 items. Also, it was bad to become dependent upon that functionality. You must now always explicitly call "free" on it to ensure the data is free, and that's how it should be. Implicit functionality can lead to confusion and maintainability issues.
859 lines
22 KiB
C
859 lines
22 KiB
C
/******************************************************************************
|
|
Copyright (C) 2014 by Hugh Bailey <obs.jim@gmail.com>
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation, either version 2 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
******************************************************************************/
|
|
|
|
#include <obs.h>
|
|
#include <util/circlebuf.h>
|
|
#include <util/threading.h>
|
|
#include <util/dstr.h>
|
|
#include <util/darray.h>
|
|
#include <util/platform.h>
|
|
|
|
#include <libavutil/opt.h>
|
|
#include <libavformat/avformat.h>
|
|
#include <libswscale/swscale.h>
|
|
|
|
/* NOTE: much of this stuff is test stuff that was more or less copied from
|
|
* the muxing.c ffmpeg example */
|
|
|
|
struct ffmpeg_data {
|
|
AVStream *video;
|
|
AVStream *audio;
|
|
AVCodec *acodec;
|
|
AVCodec *vcodec;
|
|
AVFormatContext *output;
|
|
struct SwsContext *swscale;
|
|
|
|
int video_bitrate;
|
|
AVPicture dst_picture;
|
|
AVFrame *vframe;
|
|
int frame_size;
|
|
int total_frames;
|
|
|
|
uint64_t start_timestamp;
|
|
|
|
int audio_bitrate;
|
|
uint32_t audio_samplerate;
|
|
enum audio_format audio_format;
|
|
size_t audio_planes;
|
|
size_t audio_size;
|
|
struct circlebuf excess_frames[MAX_AV_PLANES];
|
|
uint8_t *samples[MAX_AV_PLANES];
|
|
AVFrame *aframe;
|
|
int total_samples;
|
|
|
|
const char *filename_test;
|
|
|
|
bool initialized;
|
|
};
|
|
|
|
struct ffmpeg_output {
|
|
obs_output_t output;
|
|
volatile bool active;
|
|
struct ffmpeg_data ff_data;
|
|
|
|
bool connecting;
|
|
pthread_t start_thread;
|
|
|
|
bool write_thread_active;
|
|
pthread_mutex_t write_mutex;
|
|
pthread_t write_thread;
|
|
sem_t write_sem;
|
|
event_t stop_event;
|
|
|
|
DARRAY(AVPacket) packets;
|
|
};
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
static inline enum AVPixelFormat obs_to_ffmpeg_video_format(
|
|
enum video_format format)
|
|
{
|
|
switch (format) {
|
|
case VIDEO_FORMAT_NONE: return AV_PIX_FMT_NONE;
|
|
case VIDEO_FORMAT_I420: return AV_PIX_FMT_YUV420P;
|
|
case VIDEO_FORMAT_NV12: return AV_PIX_FMT_NV12;
|
|
case VIDEO_FORMAT_YVYU: return AV_PIX_FMT_NONE;
|
|
case VIDEO_FORMAT_YUY2: return AV_PIX_FMT_YUYV422;
|
|
case VIDEO_FORMAT_UYVY: return AV_PIX_FMT_UYVY422;
|
|
case VIDEO_FORMAT_RGBA: return AV_PIX_FMT_RGBA;
|
|
case VIDEO_FORMAT_BGRA: return AV_PIX_FMT_BGRA;
|
|
case VIDEO_FORMAT_BGRX: return AV_PIX_FMT_BGRA;
|
|
}
|
|
|
|
return AV_PIX_FMT_NONE;
|
|
}
|
|
|
|
static inline enum audio_format convert_ffmpeg_sample_format(
|
|
enum AVSampleFormat format)
|
|
{
|
|
switch ((uint32_t)format) {
|
|
case AV_SAMPLE_FMT_U8: return AUDIO_FORMAT_U8BIT;
|
|
case AV_SAMPLE_FMT_S16: return AUDIO_FORMAT_16BIT;
|
|
case AV_SAMPLE_FMT_S32: return AUDIO_FORMAT_32BIT;
|
|
case AV_SAMPLE_FMT_FLT: return AUDIO_FORMAT_FLOAT;
|
|
case AV_SAMPLE_FMT_U8P: return AUDIO_FORMAT_U8BIT_PLANAR;
|
|
case AV_SAMPLE_FMT_S16P: return AUDIO_FORMAT_16BIT_PLANAR;
|
|
case AV_SAMPLE_FMT_S32P: return AUDIO_FORMAT_32BIT_PLANAR;
|
|
case AV_SAMPLE_FMT_FLTP: return AUDIO_FORMAT_FLOAT_PLANAR;
|
|
}
|
|
|
|
/* shouldn't get here */
|
|
return AUDIO_FORMAT_16BIT;
|
|
}
|
|
|
|
static bool new_stream(struct ffmpeg_data *data, AVStream **stream,
|
|
AVCodec **codec, enum AVCodecID id)
|
|
{
|
|
*codec = avcodec_find_encoder(id);
|
|
if (!*codec) {
|
|
blog(LOG_WARNING, "Couldn't find encoder '%s'",
|
|
avcodec_get_name(id));
|
|
return false;
|
|
}
|
|
|
|
*stream = avformat_new_stream(data->output, *codec);
|
|
if (!*stream) {
|
|
blog(LOG_WARNING, "Couldn't create stream for encoder '%s'",
|
|
avcodec_get_name(id));
|
|
return false;
|
|
}
|
|
|
|
(*stream)->id = data->output->nb_streams-1;
|
|
return true;
|
|
}
|
|
|
|
static bool open_video_codec(struct ffmpeg_data *data)
|
|
{
|
|
AVCodecContext *context = data->video->codec;
|
|
int ret;
|
|
|
|
if (data->vcodec->id == AV_CODEC_ID_H264) {
|
|
av_opt_set(context->priv_data, "preset", "veryfast", 0);
|
|
av_opt_set(context->priv_data, "x264-params", "nal-hrd=cbr", 0);
|
|
}
|
|
|
|
ret = avcodec_open2(context, data->vcodec, NULL);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "Failed to open video codec: %s",
|
|
av_err2str(ret));
|
|
return false;
|
|
}
|
|
|
|
data->vframe = av_frame_alloc();
|
|
if (!data->vframe) {
|
|
blog(LOG_WARNING, "Failed to allocate video frame");
|
|
return false;
|
|
}
|
|
|
|
data->vframe->format = context->pix_fmt;
|
|
data->vframe->width = context->width;
|
|
data->vframe->height = context->height;
|
|
|
|
ret = avpicture_alloc(&data->dst_picture, context->pix_fmt,
|
|
context->width, context->height);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "Failed to allocate dst_picture: %s",
|
|
av_err2str(ret));
|
|
return false;
|
|
}
|
|
|
|
*((AVPicture*)data->vframe) = data->dst_picture;
|
|
return true;
|
|
}
|
|
|
|
static bool init_swscale(struct ffmpeg_data *data, AVCodecContext *context)
|
|
{
|
|
data->swscale = sws_getContext(
|
|
context->width, context->height, AV_PIX_FMT_YUV420P,
|
|
context->width, context->height, context->pix_fmt,
|
|
SWS_BICUBIC, NULL, NULL, NULL);
|
|
|
|
if (!data->swscale) {
|
|
blog(LOG_WARNING, "Could not initialize swscale");
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool create_video_stream(struct ffmpeg_data *data)
|
|
{
|
|
AVCodecContext *context;
|
|
struct obs_video_info ovi;
|
|
|
|
if (!obs_get_video_info(&ovi)) {
|
|
blog(LOG_WARNING, "No active video");
|
|
return false;
|
|
}
|
|
|
|
if (!new_stream(data, &data->video, &data->vcodec,
|
|
data->output->oformat->video_codec))
|
|
return false;
|
|
|
|
context = data->video->codec;
|
|
context->codec_id = data->output->oformat->video_codec;
|
|
context->bit_rate = data->video_bitrate * 1000;
|
|
context->rc_buffer_size = data->video_bitrate * 1000;
|
|
context->rc_max_rate = data->video_bitrate * 1000;
|
|
context->width = ovi.output_width;
|
|
context->height = ovi.output_height;
|
|
context->time_base.num = ovi.fps_den;
|
|
context->time_base.den = ovi.fps_num;
|
|
context->gop_size = 120;
|
|
context->pix_fmt = AV_PIX_FMT_YUV420P;
|
|
|
|
if (data->output->oformat->flags & AVFMT_GLOBALHEADER)
|
|
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
|
|
|
if (!open_video_codec(data))
|
|
return false;
|
|
|
|
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
|
|
if (!init_swscale(data, context))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool open_audio_codec(struct ffmpeg_data *data)
|
|
{
|
|
AVCodecContext *context = data->audio->codec;
|
|
int ret;
|
|
|
|
data->aframe = av_frame_alloc();
|
|
if (!data->aframe) {
|
|
blog(LOG_WARNING, "Failed to allocate audio frame");
|
|
return false;
|
|
}
|
|
|
|
context->strict_std_compliance = -2;
|
|
|
|
ret = avcodec_open2(context, data->acodec, NULL);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "Failed to open audio codec: %s",
|
|
av_err2str(ret));
|
|
return false;
|
|
}
|
|
|
|
data->frame_size = context->frame_size ? context->frame_size : 1024;
|
|
|
|
ret = av_samples_alloc(data->samples, NULL, context->channels,
|
|
data->frame_size, context->sample_fmt, 0);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "Failed to create audio buffer: %s",
|
|
av_err2str(ret));
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool create_audio_stream(struct ffmpeg_data *data)
|
|
{
|
|
AVCodecContext *context;
|
|
struct audio_output_info aoi;
|
|
|
|
if (!obs_get_audio_info(&aoi)) {
|
|
blog(LOG_WARNING, "No active audio");
|
|
return false;
|
|
}
|
|
|
|
if (!new_stream(data, &data->audio, &data->acodec,
|
|
data->output->oformat->audio_codec))
|
|
return false;
|
|
|
|
context = data->audio->codec;
|
|
context->bit_rate = data->audio_bitrate * 1000;
|
|
context->channels = get_audio_channels(aoi.speakers);
|
|
context->sample_rate = aoi.samples_per_sec;
|
|
context->sample_fmt = data->acodec->sample_fmts ?
|
|
data->acodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
|
|
|
data->audio_samplerate = aoi.samples_per_sec;
|
|
data->audio_format = convert_ffmpeg_sample_format(context->sample_fmt);
|
|
data->audio_planes = get_audio_planes(data->audio_format, aoi.speakers);
|
|
data->audio_size = get_audio_size(data->audio_format, aoi.speakers, 1);
|
|
|
|
if (data->output->oformat->flags & AVFMT_GLOBALHEADER)
|
|
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
|
|
|
return open_audio_codec(data);
|
|
}
|
|
|
|
static inline bool init_streams(struct ffmpeg_data *data)
|
|
{
|
|
AVOutputFormat *format = data->output->oformat;
|
|
|
|
if (format->video_codec != AV_CODEC_ID_NONE)
|
|
if (!create_video_stream(data))
|
|
return false;
|
|
|
|
if (format->audio_codec != AV_CODEC_ID_NONE)
|
|
if (!create_audio_stream(data))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static inline bool open_output_file(struct ffmpeg_data *data)
|
|
{
|
|
AVOutputFormat *format = data->output->oformat;
|
|
int ret;
|
|
|
|
if ((format->flags & AVFMT_NOFILE) == 0) {
|
|
ret = avio_open(&data->output->pb, data->filename_test,
|
|
AVIO_FLAG_WRITE);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "Couldn't open file '%s', %s",
|
|
data->filename_test, av_err2str(ret));
|
|
return false;
|
|
}
|
|
}
|
|
|
|
ret = avformat_write_header(data->output, NULL);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "Error opening file '%s': %s",
|
|
data->filename_test, av_err2str(ret));
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static void close_video(struct ffmpeg_data *data)
|
|
{
|
|
avcodec_close(data->video->codec);
|
|
avpicture_free(&data->dst_picture);
|
|
av_frame_free(&data->vframe);
|
|
}
|
|
|
|
static void close_audio(struct ffmpeg_data *data)
|
|
{
|
|
for (size_t i = 0; i < MAX_AV_PLANES; i++)
|
|
circlebuf_free(&data->excess_frames[i]);
|
|
|
|
av_freep(&data->samples[0]);
|
|
avcodec_close(data->audio->codec);
|
|
av_frame_free(&data->aframe);
|
|
}
|
|
|
|
static void ffmpeg_data_free(struct ffmpeg_data *data)
|
|
{
|
|
if (data->initialized)
|
|
av_write_trailer(data->output);
|
|
|
|
if (data->video)
|
|
close_video(data);
|
|
if (data->audio)
|
|
close_audio(data);
|
|
if ((data->output->oformat->flags & AVFMT_NOFILE) == 0)
|
|
avio_close(data->output->pb);
|
|
|
|
avformat_free_context(data->output);
|
|
|
|
memset(data, 0, sizeof(struct ffmpeg_data));
|
|
}
|
|
|
|
static bool ffmpeg_data_init(struct ffmpeg_data *data, const char *filename)
|
|
{
|
|
bool is_rtmp = false;
|
|
|
|
memset(data, 0, sizeof(struct ffmpeg_data));
|
|
data->filename_test = filename;
|
|
|
|
if (!filename || !*filename)
|
|
return false;
|
|
|
|
av_register_all();
|
|
avformat_network_init();
|
|
|
|
is_rtmp = (astrcmp_n(filename, "rtmp://", 7) == 0);
|
|
|
|
/* TODO: settings */
|
|
avformat_alloc_output_context2(&data->output, NULL,
|
|
is_rtmp ? "flv" : NULL, data->filename_test);
|
|
data->output->oformat->video_codec = AV_CODEC_ID_H264;
|
|
data->output->oformat->audio_codec = AV_CODEC_ID_AAC;
|
|
|
|
if (!data->output) {
|
|
blog(LOG_WARNING, "Couldn't create avformat context");
|
|
goto fail;
|
|
}
|
|
|
|
if (!init_streams(data))
|
|
goto fail;
|
|
if (!open_output_file(data))
|
|
goto fail;
|
|
|
|
av_dump_format(data->output, 0, NULL, 1);
|
|
|
|
data->initialized = true;
|
|
return true;
|
|
|
|
fail:
|
|
blog(LOG_WARNING, "ffmpeg_data_init failed");
|
|
ffmpeg_data_free(data);
|
|
return false;
|
|
}
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
static const char *ffmpeg_output_getname(const char *locale)
|
|
{
|
|
UNUSED_PARAMETER(locale);
|
|
return "FFmpeg file output";
|
|
}
|
|
|
|
static void ffmpeg_log_callback(void *param, int level, const char *format,
|
|
va_list args)
|
|
{
|
|
if (level <= AV_LOG_INFO)
|
|
blogva(LOG_DEBUG, format, args);
|
|
|
|
UNUSED_PARAMETER(param);
|
|
}
|
|
|
|
static void *ffmpeg_output_create(obs_data_t settings, obs_output_t output)
|
|
{
|
|
struct ffmpeg_output *data = bzalloc(sizeof(struct ffmpeg_output));
|
|
pthread_mutex_init_value(&data->write_mutex);
|
|
data->output = output;
|
|
|
|
if (pthread_mutex_init(&data->write_mutex, NULL) != 0)
|
|
goto fail;
|
|
if (event_init(&data->stop_event, EVENT_TYPE_AUTO) != 0)
|
|
goto fail;
|
|
if (sem_init(&data->write_sem, 0, 0) != 0)
|
|
goto fail;
|
|
|
|
signal_handler_add(obs_output_signalhandler(output),
|
|
"void connect(ptr output, bool success)");
|
|
|
|
av_log_set_callback(ffmpeg_log_callback);
|
|
|
|
UNUSED_PARAMETER(settings);
|
|
return data;
|
|
|
|
fail:
|
|
pthread_mutex_destroy(&data->write_mutex);
|
|
event_destroy(data->stop_event);
|
|
bfree(data);
|
|
return NULL;
|
|
}
|
|
|
|
static void ffmpeg_output_stop(void *data);
|
|
|
|
static void ffmpeg_output_destroy(void *data)
|
|
{
|
|
struct ffmpeg_output *output = data;
|
|
|
|
if (output) {
|
|
if (output->connecting)
|
|
pthread_join(output->start_thread, NULL);
|
|
|
|
ffmpeg_output_stop(output);
|
|
|
|
pthread_mutex_destroy(&output->write_mutex);
|
|
sem_destroy(&output->write_sem);
|
|
event_destroy(output->stop_event);
|
|
bfree(data);
|
|
}
|
|
}
|
|
|
|
static inline int64_t rescale_ts(int64_t val, AVCodecContext *context,
|
|
AVStream *stream)
|
|
{
|
|
return av_rescale_q_rnd(val, context->time_base,
|
|
stream->time_base,
|
|
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
|
|
}
|
|
|
|
#define YUV420_PLANES 3
|
|
|
|
static inline void copy_data(AVPicture *pic, const struct video_data *frame,
|
|
int height)
|
|
{
|
|
for (int plane = 0; plane < YUV420_PLANES; plane++) {
|
|
int frame_rowsize = (int)frame->linesize[plane];
|
|
int pic_rowsize = pic->linesize[plane];
|
|
int bytes = frame_rowsize < pic_rowsize ?
|
|
frame_rowsize : pic_rowsize;
|
|
int plane_height = plane == 0 ? height : height/2;
|
|
|
|
for (int y = 0; y < plane_height; y++) {
|
|
int pos_frame = y * frame_rowsize;
|
|
int pos_pic = y * pic_rowsize;
|
|
|
|
memcpy(pic->data[plane] + pos_pic,
|
|
frame->data[plane] + pos_frame,
|
|
bytes);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void receive_video(void *param, const struct video_data *frame)
|
|
{
|
|
struct ffmpeg_output *output = param;
|
|
struct ffmpeg_data *data = &output->ff_data;
|
|
AVCodecContext *context = data->video->codec;
|
|
AVPacket packet = {0};
|
|
int ret, got_packet;
|
|
|
|
av_init_packet(&packet);
|
|
|
|
if (!data->start_timestamp)
|
|
data->start_timestamp = frame->timestamp;
|
|
|
|
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
|
|
sws_scale(data->swscale, frame->data,
|
|
(const int*)frame->linesize,
|
|
0, context->height, data->dst_picture.data,
|
|
data->dst_picture.linesize);
|
|
else
|
|
copy_data(&data->dst_picture, frame, context->height);
|
|
|
|
if (data->output->flags & AVFMT_RAWPICTURE) {
|
|
packet.flags |= AV_PKT_FLAG_KEY;
|
|
packet.stream_index = data->video->index;
|
|
packet.data = data->dst_picture.data[0];
|
|
packet.size = sizeof(AVPicture);
|
|
|
|
pthread_mutex_lock(&output->write_mutex);
|
|
da_push_back(output->packets, &packet);
|
|
pthread_mutex_unlock(&output->write_mutex);
|
|
sem_post(&output->write_sem);
|
|
|
|
} else {
|
|
data->vframe->pts = data->total_frames;
|
|
ret = avcodec_encode_video2(context, &packet, data->vframe,
|
|
&got_packet);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "receive_video: Error encoding "
|
|
"video: %s", av_err2str(ret));
|
|
return;
|
|
}
|
|
|
|
if (!ret && got_packet && packet.size) {
|
|
packet.pts = rescale_ts(packet.pts, context,
|
|
data->video);
|
|
packet.dts = rescale_ts(packet.dts, context,
|
|
data->video);
|
|
packet.duration = (int)av_rescale_q(packet.duration,
|
|
context->time_base,
|
|
data->video->time_base);
|
|
|
|
pthread_mutex_lock(&output->write_mutex);
|
|
da_push_back(output->packets, &packet);
|
|
pthread_mutex_unlock(&output->write_mutex);
|
|
sem_post(&output->write_sem);
|
|
} else {
|
|
ret = 0;
|
|
}
|
|
}
|
|
|
|
if (ret != 0) {
|
|
blog(LOG_WARNING, "receive_video: Error writing video: %s",
|
|
av_err2str(ret));
|
|
}
|
|
|
|
data->total_frames++;
|
|
}
|
|
|
|
static inline void encode_audio(struct ffmpeg_output *output,
|
|
struct AVCodecContext *context, size_t block_size)
|
|
{
|
|
struct ffmpeg_data *data = &output->ff_data;
|
|
|
|
AVPacket packet = {0};
|
|
int ret, got_packet;
|
|
size_t total_size = data->frame_size * block_size * context->channels;
|
|
|
|
data->aframe->nb_samples = data->frame_size;
|
|
data->aframe->pts = av_rescale_q(data->total_samples,
|
|
(AVRational){1, context->sample_rate},
|
|
context->time_base);
|
|
|
|
ret = avcodec_fill_audio_frame(data->aframe, context->channels,
|
|
context->sample_fmt, data->samples[0],
|
|
(int)total_size, 1);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "receive_audio: avcodec_fill_audio_frame "
|
|
"failed: %s", av_err2str(ret));
|
|
return;
|
|
}
|
|
|
|
data->total_samples += data->frame_size;
|
|
|
|
ret = avcodec_encode_audio2(context, &packet, data->aframe,
|
|
&got_packet);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "receive_audio: Error encoding audio: %s",
|
|
av_err2str(ret));
|
|
return;
|
|
}
|
|
|
|
if (!got_packet)
|
|
return;
|
|
|
|
packet.pts = rescale_ts(packet.pts, context, data->audio);
|
|
packet.dts = rescale_ts(packet.dts, context, data->audio);
|
|
packet.duration = (int)av_rescale_q(packet.duration, context->time_base,
|
|
data->audio->time_base);
|
|
packet.stream_index = data->audio->index;
|
|
|
|
pthread_mutex_lock(&output->write_mutex);
|
|
da_push_back(output->packets, &packet);
|
|
pthread_mutex_unlock(&output->write_mutex);
|
|
sem_post(&output->write_sem);
|
|
}
|
|
|
|
static bool prepare_audio(struct ffmpeg_data *data,
|
|
const struct audio_data *frame, struct audio_data *output)
|
|
{
|
|
*output = *frame;
|
|
|
|
if (frame->timestamp < data->start_timestamp) {
|
|
uint64_t duration = (uint64_t)frame->frames * 1000000000 /
|
|
(uint64_t)data->audio_samplerate;
|
|
uint64_t end_ts = (frame->timestamp + duration);
|
|
uint64_t cutoff;
|
|
|
|
if (end_ts <= data->start_timestamp)
|
|
return false;
|
|
|
|
cutoff = data->start_timestamp - frame->timestamp;
|
|
cutoff = cutoff * (uint64_t)data->audio_samplerate /
|
|
1000000000;
|
|
|
|
for (size_t i = 0; i < data->audio_planes; i++)
|
|
output->data[i] += data->audio_size * (uint32_t)cutoff;
|
|
output->frames -= (uint32_t)cutoff;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static void receive_audio(void *param, const struct audio_data *frame)
|
|
{
|
|
struct ffmpeg_output *output = param;
|
|
struct ffmpeg_data *data = &output->ff_data;
|
|
size_t frame_size_bytes;
|
|
struct audio_data in;
|
|
|
|
AVCodecContext *context = data->audio->codec;
|
|
|
|
if (!data->start_timestamp)
|
|
return;
|
|
if (!prepare_audio(data, frame, &in))
|
|
return;
|
|
|
|
frame_size_bytes = (size_t)data->frame_size * data->audio_size;
|
|
|
|
for (size_t i = 0; i < data->audio_planes; i++)
|
|
circlebuf_push_back(&data->excess_frames[i], in.data[i],
|
|
in.frames * data->audio_size);
|
|
|
|
while (data->excess_frames[0].size >= frame_size_bytes) {
|
|
for (size_t i = 0; i < data->audio_planes; i++)
|
|
circlebuf_pop_front(&data->excess_frames[i],
|
|
data->samples[i], frame_size_bytes);
|
|
|
|
encode_audio(output, context, data->audio_size);
|
|
}
|
|
}
|
|
|
|
static bool process_packet(struct ffmpeg_output *output)
|
|
{
|
|
AVPacket packet;
|
|
bool new_packet = false;
|
|
uint64_t time1, time2;
|
|
int ret;
|
|
|
|
pthread_mutex_lock(&output->write_mutex);
|
|
if (output->packets.num) {
|
|
packet = output->packets.array[0];
|
|
da_erase(output->packets, 0);
|
|
new_packet = true;
|
|
}
|
|
pthread_mutex_unlock(&output->write_mutex);
|
|
|
|
if (!new_packet)
|
|
return true;
|
|
|
|
time1 = os_gettime_ns();
|
|
|
|
ret = av_interleaved_write_frame(output->ff_data.output, &packet);
|
|
if (ret < 0) {
|
|
blog(LOG_WARNING, "receive_audio: Error writing packet: %s",
|
|
av_err2str(ret));
|
|
|
|
pthread_detach(output->write_thread);
|
|
output->write_thread_active = false;
|
|
return false;
|
|
}
|
|
|
|
time2 = os_gettime_ns();
|
|
/*blog(LOG_DEBUG, "%llu, size = %d, flags = %lX, stream = %d, "
|
|
"packets queued: %lu: time1; %llu",
|
|
time2-time1, packet.size, packet.flags,
|
|
packet.stream_index, output->packets.num, time1);*/
|
|
|
|
return true;
|
|
}
|
|
|
|
static void *write_thread(void *data)
|
|
{
|
|
struct ffmpeg_output *output = data;
|
|
|
|
while (sem_wait(&output->write_sem) == 0) {
|
|
/* check to see if shutting down */
|
|
if (event_try(output->stop_event) == 0)
|
|
break;
|
|
|
|
if (!process_packet(output)) {
|
|
ffmpeg_output_stop(output);
|
|
break;
|
|
}
|
|
}
|
|
|
|
output->active = false;
|
|
return NULL;
|
|
}
|
|
|
|
static bool try_connect(struct ffmpeg_output *output)
|
|
{
|
|
video_t video = obs_video();
|
|
audio_t audio = obs_audio();
|
|
const char *filename_test;
|
|
obs_data_t settings;
|
|
int audio_bitrate, video_bitrate;
|
|
int ret;
|
|
|
|
if (!video || !audio) {
|
|
blog(LOG_WARNING, "ffmpeg_output_start: audio and video must "
|
|
"both be active (as of this writing)");
|
|
return false;
|
|
}
|
|
|
|
settings = obs_output_get_settings(output->output);
|
|
filename_test = obs_data_getstring(settings, "filename");
|
|
video_bitrate = (int)obs_data_getint(settings, "video_bitrate");
|
|
audio_bitrate = (int)obs_data_getint(settings, "audio_bitrate");
|
|
obs_data_release(settings);
|
|
|
|
if (!filename_test || !*filename_test)
|
|
return false;
|
|
|
|
output->ff_data.video_bitrate = video_bitrate;
|
|
output->ff_data.audio_bitrate = audio_bitrate;
|
|
|
|
if (!ffmpeg_data_init(&output->ff_data, filename_test))
|
|
return false;
|
|
|
|
struct audio_convert_info aci = {
|
|
.format = output->ff_data.audio_format
|
|
};
|
|
|
|
struct video_scale_info vsi = {
|
|
.format = VIDEO_FORMAT_I420
|
|
};
|
|
|
|
output->active = true;
|
|
|
|
ret = pthread_create(&output->write_thread, NULL, write_thread, output);
|
|
if (ret != 0) {
|
|
blog(LOG_WARNING, "ffmpeg_output_start: failed to create write "
|
|
"thread.");
|
|
ffmpeg_output_stop(output);
|
|
return false;
|
|
}
|
|
|
|
video_output_connect(video, &vsi, receive_video, output);
|
|
audio_output_connect(audio, &aci, receive_audio, output);
|
|
output->write_thread_active = true;
|
|
return true;
|
|
}
|
|
|
|
static void *start_thread(void *data)
|
|
{
|
|
struct ffmpeg_output *output = data;
|
|
struct calldata params = {0};
|
|
|
|
bool success = try_connect(output);
|
|
|
|
output->connecting = false;
|
|
|
|
calldata_setbool(¶ms, "success", success);
|
|
calldata_setptr(¶ms, "output", output->output);
|
|
signal_handler_signal(obs_output_signalhandler(output->output),
|
|
"connect", ¶ms);
|
|
calldata_free(¶ms);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static bool ffmpeg_output_start(void *data)
|
|
{
|
|
struct ffmpeg_output *output = data;
|
|
int ret;
|
|
|
|
if (output->connecting)
|
|
return false;
|
|
|
|
ret = pthread_create(&output->start_thread, NULL, start_thread, output);
|
|
return (output->connecting = (ret == 0));
|
|
}
|
|
|
|
static void ffmpeg_output_stop(void *data)
|
|
{
|
|
struct ffmpeg_output *output = data;
|
|
|
|
if (output->active) {
|
|
video_output_disconnect(obs_video(), receive_video, data);
|
|
audio_output_disconnect(obs_audio(), receive_audio, data);
|
|
|
|
if (output->write_thread_active) {
|
|
event_signal(output->stop_event);
|
|
sem_post(&output->write_sem);
|
|
pthread_join(output->write_thread, NULL);
|
|
output->write_thread_active = false;
|
|
}
|
|
|
|
for (size_t i = 0; i < output->packets.num; i++)
|
|
av_free_packet(output->packets.array+i);
|
|
|
|
da_free(output->packets);
|
|
ffmpeg_data_free(&output->ff_data);
|
|
}
|
|
}
|
|
|
|
static bool ffmpeg_output_active(void *data)
|
|
{
|
|
struct ffmpeg_output *output = data;
|
|
return output->active;
|
|
}
|
|
|
|
struct obs_output_info ffmpeg_output = {
|
|
.id = "ffmpeg_output",
|
|
.getname = ffmpeg_output_getname,
|
|
.create = ffmpeg_output_create,
|
|
.destroy = ffmpeg_output_destroy,
|
|
.start = ffmpeg_output_start,
|
|
.stop = ffmpeg_output_stop,
|
|
.active = ffmpeg_output_active
|
|
};
|