clang-format: Apply formatting

Code submissions have continually suffered from formatting
inconsistencies that constantly have to be addressed.  Using
clang-format simplifies this by making code formatting more consistent,
and allows automation of the code formatting so that maintainers can
focus more on the code itself instead of code formatting.
This commit is contained in:
jp9000
2019-06-22 22:13:45 -07:00
parent 53615ee10f
commit f53df7da64
567 changed files with 34068 additions and 32903 deletions

View File

@@ -1,82 +1,42 @@
#pragma once
static const enum AVPixelFormat i420_formats[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE};
static const enum AVPixelFormat nv12_formats[] = {
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_NV12, AV_PIX_FMT_NV21, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE};
static const enum AVPixelFormat i444_formats[] = {
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_RGBA,
AV_PIX_FMT_BGRA,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_YUV444P, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422, AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21, AV_PIX_FMT_NONE};
static const enum AVPixelFormat yuy2_formats[] = {
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422, AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE};
static const enum AVPixelFormat uyvy_formats[] = {
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_UYVY422, AV_PIX_FMT_YUYV422, AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE};
static const enum AVPixelFormat rgba_formats[] = {
AV_PIX_FMT_RGBA,
AV_PIX_FMT_BGRA,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422, AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21, AV_PIX_FMT_NONE};
static const enum AVPixelFormat bgra_formats[] = {
AV_PIX_FMT_BGRA,
AV_PIX_FMT_RGBA,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21,
AV_PIX_FMT_NONE
};
AV_PIX_FMT_BGRA, AV_PIX_FMT_RGBA, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422, AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21, AV_PIX_FMT_NONE};
static enum AVPixelFormat get_best_format(
const enum AVPixelFormat *best,
const enum AVPixelFormat *formats)
static enum AVPixelFormat get_best_format(const enum AVPixelFormat *best,
const enum AVPixelFormat *formats)
{
while (*best != AV_PIX_FMT_NONE) {
enum AVPixelFormat best_format = *best;
@@ -97,9 +57,8 @@ static enum AVPixelFormat get_best_format(
return AV_PIX_FMT_NONE;
}
static inline enum AVPixelFormat get_closest_format(
enum AVPixelFormat format,
const enum AVPixelFormat *formats)
static inline enum AVPixelFormat
get_closest_format(enum AVPixelFormat format, const enum AVPixelFormat *formats)
{
enum AVPixelFormat best_format = AV_PIX_FMT_NONE;

View File

@@ -39,8 +39,8 @@
struct resize_buf {
uint8_t *buf;
size_t size;
size_t capacity;
size_t size;
size_t capacity;
};
static inline void resize_buf_resize(struct resize_buf *rb, size_t size)
@@ -96,15 +96,15 @@ struct header {
};
struct ffmpeg_mux {
AVFormatContext *output;
AVStream *video_stream;
AVStream **audio_streams;
struct main_params params;
struct audio_params *audio;
struct header video_header;
struct header *audio_header;
int num_audio_streams;
bool initialized;
AVFormatContext *output;
AVStream *video_stream;
AVStream **audio_streams;
struct main_params params;
struct audio_params *audio;
struct header video_header;
struct header *audio_header;
int num_audio_streams;
bool initialized;
char error[4096];
};
@@ -158,7 +158,7 @@ static void ffmpeg_mux_free(struct ffmpeg_mux *ffm)
}
static bool get_opt_str(int *p_argc, char ***p_argv, char **str,
const char *opt)
const char *opt)
{
int argc = *p_argc;
char **argv = *p_argv;
@@ -187,7 +187,7 @@ static bool get_opt_int(int *p_argc, char ***p_argv, int *i, const char *opt)
}
static bool get_audio_params(struct audio_params *audio, int *argc,
char ***argv)
char ***argv)
{
if (!get_opt_str(argc, argv, &audio->name, "audio track name"))
return false;
@@ -201,7 +201,7 @@ static bool get_audio_params(struct audio_params *audio, int *argc,
}
static bool init_params(int *argc, char ***argv, struct main_params *params,
struct audio_params **p_audio)
struct audio_params **p_audio)
{
struct audio_params *audio = NULL;
@@ -228,7 +228,8 @@ static bool init_params(int *argc, char ***argv, struct main_params *params,
if (params->has_video) {
if (!get_opt_str(argc, argv, &params->vcodec, "video codec"))
return false;
if (!get_opt_int(argc, argv, &params->vbitrate,"video bitrate"))
if (!get_opt_int(argc, argv, &params->vbitrate,
"video bitrate"))
return false;
if (!get_opt_int(argc, argv, &params->width, "video width"))
return false;
@@ -262,7 +263,7 @@ static bool init_params(int *argc, char ***argv, struct main_params *params,
}
static bool new_stream(struct ffmpeg_mux *ffm, AVStream **stream,
const char *name, enum AVCodecID *id)
const char *name, enum AVCodecID *id)
{
const AVCodecDescriptor *desc = avcodec_descriptor_get_by_name(name);
AVCodec *codec;
@@ -282,11 +283,12 @@ static bool new_stream(struct ffmpeg_mux *ffm, AVStream **stream,
*stream = avformat_new_stream(ffm->output, codec);
if (!*stream) {
fprintf(stderr, "Couldn't create stream for encoder '%s'\n", name);
fprintf(stderr, "Couldn't create stream for encoder '%s'\n",
name);
return false;
}
(*stream)->id = ffm->output->nb_streams-1;
(*stream)->id = ffm->output->nb_streams - 1;
return true;
}
@@ -296,21 +298,21 @@ static void create_video_stream(struct ffmpeg_mux *ffm)
void *extradata = NULL;
if (!new_stream(ffm, &ffm->video_stream, ffm->params.vcodec,
&ffm->output->oformat->video_codec))
&ffm->output->oformat->video_codec))
return;
if (ffm->video_header.size) {
extradata = av_memdup(ffm->video_header.data,
ffm->video_header.size);
ffm->video_header.size);
}
context = ffm->video_stream->codec;
context->bit_rate = ffm->params.vbitrate * 1000;
context->width = ffm->params.width;
context->height = ffm->params.height;
context->coded_width = ffm->params.width;
context->coded_height = ffm->params.height;
context->extradata = extradata;
context = ffm->video_stream->codec;
context->bit_rate = ffm->params.vbitrate * 1000;
context->width = ffm->params.width;
context->height = ffm->params.height;
context->coded_width = ffm->params.width;
context->coded_height = ffm->params.height;
context->extradata = extradata;
context->extradata_size = ffm->video_header.size;
context->time_base =
(AVRational){ffm->params.fps_den, ffm->params.fps_num};
@@ -329,7 +331,7 @@ static void create_audio_stream(struct ffmpeg_mux *ffm, int idx)
void *extradata = NULL;
if (!new_stream(ffm, &stream, ffm->params.acodec,
&ffm->output->oformat->audio_codec))
&ffm->output->oformat->audio_codec))
return;
ffm->audio_streams[idx] = stream;
@@ -340,19 +342,19 @@ static void create_audio_stream(struct ffmpeg_mux *ffm, int idx)
if (ffm->audio_header[idx].size) {
extradata = av_memdup(ffm->audio_header[idx].data,
ffm->audio_header[idx].size);
ffm->audio_header[idx].size);
}
context = stream->codec;
context->bit_rate = ffm->audio[idx].abitrate * 1000;
context->channels = ffm->audio[idx].channels;
context->sample_rate = ffm->audio[idx].sample_rate;
context->sample_fmt = AV_SAMPLE_FMT_S16;
context->time_base = stream->time_base;
context->extradata = extradata;
context = stream->codec;
context->bit_rate = ffm->audio[idx].abitrate * 1000;
context->channels = ffm->audio[idx].channels;
context->sample_rate = ffm->audio[idx].sample_rate;
context->sample_fmt = AV_SAMPLE_FMT_S16;
context->time_base = stream->time_base;
context->extradata = extradata;
context->extradata_size = ffm->audio_header[idx].size;
context->channel_layout =
av_get_default_channel_layout(context->channels);
av_get_default_channel_layout(context->channels);
//AVlib default channel layout for 4 channels is 4.0 ; fix for quad
if (context->channels == 4)
context->channel_layout = av_get_channel_layout("quad");
@@ -372,7 +374,7 @@ static bool init_streams(struct ffmpeg_mux *ffm)
if (ffm->params.tracks) {
ffm->audio_streams =
calloc(1, ffm->params.tracks * sizeof(void*));
calloc(1, ffm->params.tracks * sizeof(void *));
for (int i = 0; i < ffm->params.tracks; i++)
create_audio_stream(ffm, i);
@@ -392,20 +394,20 @@ static void set_header(struct header *header, uint8_t *data, size_t size)
}
static void ffmpeg_mux_header(struct ffmpeg_mux *ffm, uint8_t *data,
struct ffm_packet_info *info)
struct ffm_packet_info *info)
{
if (info->type == FFM_PACKET_VIDEO) {
set_header(&ffm->video_header, data, (size_t)info->size);
} else {
set_header(&ffm->audio_header[info->index], data,
(size_t)info->size);
(size_t)info->size);
}
}
static size_t safe_read(void *vdata, size_t size)
{
uint8_t *data = vdata;
size_t total = size;
size_t total = size;
while (size > 0) {
size_t in_size = fread(data, 1, size, stdin);
@@ -470,20 +472,20 @@ static inline int open_output_file(struct ffmpeg_mux *ffm)
AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Couldn't open '%s', %s",
ffm->params.file, av_err2str(ret));
ffm->params.file, av_err2str(ret));
return FFM_ERROR;
}
}
strncpy(ffm->output->filename, ffm->params.file,
sizeof(ffm->output->filename));
sizeof(ffm->output->filename));
ffm->output->filename[sizeof(ffm->output->filename) - 1] = 0;
AVDictionary *dict = NULL;
if ((ret = av_dict_parse_string(&dict, ffm->params.muxer_settings,
"=", " ", 0))) {
if ((ret = av_dict_parse_string(&dict, ffm->params.muxer_settings, "=",
" ", 0))) {
fprintf(stderr, "Failed to parse muxer settings: %s\n%s",
av_err2str(ret), ffm->params.muxer_settings);
av_err2str(ret), ffm->params.muxer_settings);
av_dict_free(&dict);
}
@@ -493,7 +495,7 @@ static inline int open_output_file(struct ffmpeg_mux *ffm)
AVDictionaryEntry *entry = NULL;
while ((entry = av_dict_get(dict, "", entry,
AV_DICT_IGNORE_SUFFIX)))
AV_DICT_IGNORE_SUFFIX)))
printf("\n\t%s=%s", entry->key, entry->value);
printf("\n");
@@ -501,8 +503,8 @@ static inline int open_output_file(struct ffmpeg_mux *ffm)
ret = avformat_write_header(ffm->output, &dict);
if (ret < 0) {
fprintf(stderr, "Error opening '%s': %s",
ffm->params.file, av_err2str(ret));
fprintf(stderr, "Error opening '%s': %s", ffm->params.file,
av_err2str(ret));
av_dict_free(&dict);
@@ -522,15 +524,15 @@ static int ffmpeg_mux_init_context(struct ffmpeg_mux *ffm)
output_format = av_guess_format(NULL, ffm->params.file, NULL);
if (output_format == NULL) {
fprintf(stderr, "Couldn't find an appropriate muxer for '%s'\n",
ffm->params.file);
ffm->params.file);
return FFM_ERROR;
}
ret = avformat_alloc_output_context2(&ffm->output, output_format,
NULL, NULL);
ret = avformat_alloc_output_context2(&ffm->output, output_format, NULL,
NULL);
if (ret < 0) {
fprintf(stderr, "Couldn't initialize output context: %s\n",
av_err2str(ret));
av_err2str(ret));
return FFM_ERROR;
}
@@ -552,7 +554,7 @@ static int ffmpeg_mux_init_context(struct ffmpeg_mux *ffm)
}
static int ffmpeg_mux_init_internal(struct ffmpeg_mux *ffm, int argc,
char *argv[])
char *argv[])
{
argc--;
argv++;
@@ -587,7 +589,7 @@ static int ffmpeg_mux_init(struct ffmpeg_mux *ffm, int argc, char *argv[])
}
static inline int get_index(struct ffmpeg_mux *ffm,
struct ffm_packet_info *info)
struct ffm_packet_info *info)
{
if (info->type == FFM_PACKET_VIDEO) {
if (ffm->video_stream) {
@@ -612,12 +614,12 @@ static inline int64_t rescale_ts(struct ffmpeg_mux *ffm, int64_t val, int idx)
AVStream *stream = get_stream(ffm, idx);
return av_rescale_q_rnd(val / stream->codec->time_base.num,
stream->codec->time_base, stream->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
stream->codec->time_base, stream->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
}
static inline bool ffmpeg_mux_packet(struct ffmpeg_mux *ffm, uint8_t *buf,
struct ffm_packet_info *info)
struct ffm_packet_info *info)
{
int idx = get_index(ffm, info);
AVPacket packet = {0};
@@ -627,7 +629,7 @@ static inline bool ffmpeg_mux_packet(struct ffmpeg_mux *ffm, uint8_t *buf,
return true;
}
av_init_packet(&packet);
av_init_packet(&packet);
packet.data = buf;
packet.size = (int)info->size;
@@ -660,16 +662,16 @@ int main(int argc, char *argv[])
SetErrorMode(SEM_FAILCRITICALERRORS);
argv = malloc(argc * sizeof(char*));
argv = malloc(argc * sizeof(char *));
for (int i = 0; i < argc; i++) {
size_t len = wcslen(argv_w[i]);
int size;
size = WideCharToMultiByte(CP_UTF8, 0, argv_w[i], (int)len,
NULL, 0, NULL, NULL);
NULL, 0, NULL, NULL);
argv[i] = malloc(size + 1);
WideCharToMultiByte(CP_UTF8, 0, argv_w[i], (int)len, argv[i],
size + 1, NULL, NULL);
size + 1, NULL, NULL);
argv[i][size] = 0;
}

View File

@@ -20,18 +20,18 @@
enum ffm_packet_type {
FFM_PACKET_VIDEO,
FFM_PACKET_AUDIO
FFM_PACKET_AUDIO,
};
#define FFM_SUCCESS 0
#define FFM_ERROR -1
#define FFM_SUCCESS 0
#define FFM_ERROR -1
#define FFM_UNSUPPORTED -2
struct ffm_packet_info {
int64_t pts;
int64_t dts;
uint32_t size;
uint32_t index;
int64_t pts;
int64_t dts;
uint32_t size;
uint32_t index;
enum ffm_packet_type type;
bool keyframe;
bool keyframe;
};

View File

@@ -7,16 +7,16 @@ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
NV_ENCODE_API_FUNCTION_LIST nv = {NV_ENCODE_API_FUNCTION_LIST_VER};
NV_CREATE_INSTANCE_FUNC nv_create_instance = NULL;
#define error(format, ...) \
blog(LOG_ERROR, "[jim-nvenc] " format, ##__VA_ARGS__)
#define error(format, ...) blog(LOG_ERROR, "[jim-nvenc] " format, ##__VA_ARGS__)
static inline bool nv_failed(NVENCSTATUS err, const char *func, const char *call)
static inline bool nv_failed(NVENCSTATUS err, const char *func,
const char *call)
{
if (err == NV_ENC_SUCCESS)
return false;
error("%s: %s failed: %d (%s)", func, call, (int)err,
nv_error_name(err));
nv_error_name(err));
return true;
}
@@ -24,7 +24,7 @@ static inline bool nv_failed(NVENCSTATUS err, const char *func, const char *call
bool load_nvenc_lib(void)
{
if (sizeof(void*) == 8) {
if (sizeof(void *) == 8) {
nvenc_lib = os_dlopen("nvEncodeAPI64.dll");
} else {
nvenc_lib = os_dlopen("nvEncodeAPI.dll");
@@ -42,40 +42,41 @@ static void *load_nv_func(const char *func)
return func_ptr;
}
typedef NVENCSTATUS (NVENCAPI *NV_MAX_VER_FUNC)(uint32_t*);
typedef NVENCSTATUS(NVENCAPI *NV_MAX_VER_FUNC)(uint32_t *);
const char *nv_error_name(NVENCSTATUS err)
{
#define RETURN_CASE(x) \
case x: return #x
case x: \
return #x
switch (err) {
RETURN_CASE(NV_ENC_SUCCESS);
RETURN_CASE(NV_ENC_ERR_NO_ENCODE_DEVICE);
RETURN_CASE(NV_ENC_ERR_UNSUPPORTED_DEVICE);
RETURN_CASE(NV_ENC_ERR_INVALID_ENCODERDEVICE);
RETURN_CASE(NV_ENC_ERR_INVALID_DEVICE);
RETURN_CASE(NV_ENC_ERR_DEVICE_NOT_EXIST);
RETURN_CASE(NV_ENC_ERR_INVALID_PTR);
RETURN_CASE(NV_ENC_ERR_INVALID_EVENT);
RETURN_CASE(NV_ENC_ERR_INVALID_PARAM);
RETURN_CASE(NV_ENC_ERR_INVALID_CALL);
RETURN_CASE(NV_ENC_ERR_OUT_OF_MEMORY);
RETURN_CASE(NV_ENC_ERR_ENCODER_NOT_INITIALIZED);
RETURN_CASE(NV_ENC_ERR_UNSUPPORTED_PARAM);
RETURN_CASE(NV_ENC_ERR_LOCK_BUSY);
RETURN_CASE(NV_ENC_ERR_NOT_ENOUGH_BUFFER);
RETURN_CASE(NV_ENC_ERR_INVALID_VERSION);
RETURN_CASE(NV_ENC_ERR_MAP_FAILED);
RETURN_CASE(NV_ENC_ERR_NEED_MORE_INPUT);
RETURN_CASE(NV_ENC_ERR_ENCODER_BUSY);
RETURN_CASE(NV_ENC_ERR_EVENT_NOT_REGISTERD);
RETURN_CASE(NV_ENC_ERR_GENERIC);
RETURN_CASE(NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY);
RETURN_CASE(NV_ENC_ERR_UNIMPLEMENTED);
RETURN_CASE(NV_ENC_ERR_RESOURCE_REGISTER_FAILED);
RETURN_CASE(NV_ENC_ERR_RESOURCE_NOT_REGISTERED);
RETURN_CASE(NV_ENC_ERR_RESOURCE_NOT_MAPPED);
RETURN_CASE(NV_ENC_SUCCESS);
RETURN_CASE(NV_ENC_ERR_NO_ENCODE_DEVICE);
RETURN_CASE(NV_ENC_ERR_UNSUPPORTED_DEVICE);
RETURN_CASE(NV_ENC_ERR_INVALID_ENCODERDEVICE);
RETURN_CASE(NV_ENC_ERR_INVALID_DEVICE);
RETURN_CASE(NV_ENC_ERR_DEVICE_NOT_EXIST);
RETURN_CASE(NV_ENC_ERR_INVALID_PTR);
RETURN_CASE(NV_ENC_ERR_INVALID_EVENT);
RETURN_CASE(NV_ENC_ERR_INVALID_PARAM);
RETURN_CASE(NV_ENC_ERR_INVALID_CALL);
RETURN_CASE(NV_ENC_ERR_OUT_OF_MEMORY);
RETURN_CASE(NV_ENC_ERR_ENCODER_NOT_INITIALIZED);
RETURN_CASE(NV_ENC_ERR_UNSUPPORTED_PARAM);
RETURN_CASE(NV_ENC_ERR_LOCK_BUSY);
RETURN_CASE(NV_ENC_ERR_NOT_ENOUGH_BUFFER);
RETURN_CASE(NV_ENC_ERR_INVALID_VERSION);
RETURN_CASE(NV_ENC_ERR_MAP_FAILED);
RETURN_CASE(NV_ENC_ERR_NEED_MORE_INPUT);
RETURN_CASE(NV_ENC_ERR_ENCODER_BUSY);
RETURN_CASE(NV_ENC_ERR_EVENT_NOT_REGISTERD);
RETURN_CASE(NV_ENC_ERR_GENERIC);
RETURN_CASE(NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY);
RETURN_CASE(NV_ENC_ERR_UNIMPLEMENTED);
RETURN_CASE(NV_ENC_ERR_RESOURCE_REGISTER_FAILED);
RETURN_CASE(NV_ENC_ERR_RESOURCE_NOT_REGISTERED);
RETURN_CASE(NV_ENC_ERR_RESOURCE_NOT_MAPPED);
}
#undef RETURN_CASE
@@ -91,8 +92,8 @@ static inline bool init_nvenc_internal(void)
return success;
initialized = true;
NV_MAX_VER_FUNC nv_max_ver = (NV_MAX_VER_FUNC)
load_nv_func("NvEncodeAPIGetMaxSupportedVersion");
NV_MAX_VER_FUNC nv_max_ver = (NV_MAX_VER_FUNC)load_nv_func(
"NvEncodeAPIGetMaxSupportedVersion");
if (!nv_max_ver) {
return false;
}
@@ -102,16 +103,16 @@ static inline bool init_nvenc_internal(void)
return false;
}
uint32_t cur_ver =
(NVENCAPI_MAJOR_VERSION << 4) | NVENCAPI_MINOR_VERSION;
uint32_t cur_ver = (NVENCAPI_MAJOR_VERSION << 4) |
NVENCAPI_MINOR_VERSION;
if (cur_ver > ver) {
error("Current driver version does not support this NVENC "
"version, please upgrade your driver");
"version, please upgrade your driver");
return false;
}
nv_create_instance = (NV_CREATE_INSTANCE_FUNC)
load_nv_func("NvEncodeAPICreateInstance");
nv_create_instance = (NV_CREATE_INSTANCE_FUNC)load_nv_func(
"NvEncodeAPICreateInstance");
if (!nv_create_instance) {
return false;
}

View File

@@ -12,17 +12,16 @@
#define EXTRA_BUFFERS 5
#define do_log(level, format, ...) \
#define do_log(level, format, ...) \
blog(level, "[jim-nvenc: '%s'] " format, \
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
#define error(format, ...) do_log(LOG_ERROR, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
#define error(format, ...) do_log(LOG_ERROR, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
#define error_hr(msg) \
error("%s: %s: 0x%08lX", __FUNCTION__, msg, (uint32_t)hr);
#define error_hr(msg) error("%s: %s: 0x%08lX", __FUNCTION__, msg, (uint32_t)hr);
struct nv_bitstream;
struct nv_texture;
@@ -39,57 +38,57 @@ struct handle_tex {
struct nvenc_data {
obs_encoder_t *encoder;
void *session;
void *session;
NV_ENC_INITIALIZE_PARAMS params;
NV_ENC_CONFIG config;
size_t buf_count;
size_t output_delay;
size_t buffers_queued;
size_t next_bitstream;
size_t cur_bitstream;
bool encode_started;
bool first_packet;
bool can_change_bitrate;
bool bframes;
NV_ENC_CONFIG config;
size_t buf_count;
size_t output_delay;
size_t buffers_queued;
size_t next_bitstream;
size_t cur_bitstream;
bool encode_started;
bool first_packet;
bool can_change_bitrate;
bool bframes;
DARRAY(struct nv_bitstream) bitstreams;
DARRAY(struct nv_texture) textures;
DARRAY(struct handle_tex) input_textures;
struct circlebuf dts_list;
DARRAY(struct nv_texture) textures;
DARRAY(struct handle_tex) input_textures;
struct circlebuf dts_list;
DARRAY(uint8_t) packet_data;
int64_t packet_pts;
bool packet_keyframe;
int64_t packet_pts;
bool packet_keyframe;
ID3D11Device *device;
ID3D11Device *device;
ID3D11DeviceContext *context;
uint32_t cx;
uint32_t cy;
uint8_t *header;
size_t header_size;
size_t header_size;
uint8_t *sei;
size_t sei_size;
size_t sei_size;
};
/* ------------------------------------------------------------------------- */
/* Bitstream Buffer */
struct nv_bitstream {
void *ptr;
void *ptr;
HANDLE event;
};
static inline bool nv_failed(struct nvenc_data *enc, NVENCSTATUS err,
const char *func, const char *call)
const char *func, const char *call)
{
if (err == NV_ENC_SUCCESS)
return false;
error("%s: %s failed: %d (%s)", func, call, (int)err,
nv_error_name(err));
nv_error_name(err));
return true;
}
@@ -97,7 +96,8 @@ static inline bool nv_failed(struct nvenc_data *enc, NVENCSTATUS err,
static bool nv_bitstream_init(struct nvenc_data *enc, struct nv_bitstream *bs)
{
NV_ENC_CREATE_BITSTREAM_BUFFER buf = {NV_ENC_CREATE_BITSTREAM_BUFFER_VER};
NV_ENC_CREATE_BITSTREAM_BUFFER buf = {
NV_ENC_CREATE_BITSTREAM_BUFFER_VER};
NV_ENC_EVENT_PARAMS params = {NV_ENC_EVENT_PARAMS_VER};
HANDLE event = NULL;
@@ -126,7 +126,7 @@ fail:
}
if (buf.bitstreamBuffer) {
nv.nvEncDestroyBitstreamBuffer(enc->session,
buf.bitstreamBuffer);
buf.bitstreamBuffer);
}
return false;
}
@@ -147,9 +147,9 @@ static void nv_bitstream_free(struct nvenc_data *enc, struct nv_bitstream *bs)
/* Texture Resource */
struct nv_texture {
void *res;
void *res;
ID3D11Texture2D *tex;
void *mapped_res;
void *mapped_res;
};
static bool nv_texture_init(struct nvenc_data *enc, struct nv_texture *nvtex)
@@ -159,13 +159,13 @@ static bool nv_texture_init(struct nvenc_data *enc, struct nv_texture *nvtex)
HRESULT hr;
D3D11_TEXTURE2D_DESC desc = {0};
desc.Width = enc->cx;
desc.Height = enc->cy;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_NV12;
desc.SampleDesc.Count = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
desc.Width = enc->cx;
desc.Height = enc->cy;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_NV12;
desc.SampleDesc.Count = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = device->lpVtbl->CreateTexture2D(device, &desc, NULL, &tex);
if (FAILED(hr)) {
@@ -176,11 +176,11 @@ static bool nv_texture_init(struct nvenc_data *enc, struct nv_texture *nvtex)
tex->lpVtbl->SetEvictionPriority(tex, DXGI_RESOURCE_PRIORITY_MAXIMUM);
NV_ENC_REGISTER_RESOURCE res = {NV_ENC_REGISTER_RESOURCE_VER};
res.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX;
res.resourceToRegister = tex;
res.width = enc->cx;
res.height = enc->cy;
res.bufferFormat = NV_ENC_BUFFER_FORMAT_NV12;
res.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX;
res.resourceToRegister = tex;
res.width = enc->cx;
res.height = enc->cy;
res.bufferFormat = NV_ENC_BUFFER_FORMAT_NV12;
if (NV_FAILED(nv.nvEncRegisterResource(enc->session, &res))) {
tex->lpVtbl->Release(tex);
@@ -197,7 +197,7 @@ static void nv_texture_free(struct nvenc_data *enc, struct nv_texture *nvtex)
if (nvtex->res) {
if (nvtex->mapped_res) {
nv.nvEncUnmapInputResource(enc->session,
nvtex->mapped_res);
nvtex->mapped_res);
}
nv.nvEncUnregisterResource(enc->session, nvtex->res);
nvtex->tex->lpVtbl->Release(nvtex->tex);
@@ -235,11 +235,11 @@ static bool nvenc_update(void *data, obs_data_t *settings)
int bitrate = (int)obs_data_get_int(settings, "bitrate");
enc->config.rcParams.averageBitRate = bitrate * 1000;
enc->config.rcParams.maxBitRate = bitrate * 1000;
enc->config.rcParams.maxBitRate = bitrate * 1000;
NV_ENC_RECONFIGURE_PARAMS params = {0};
params.version = NV_ENC_RECONFIGURE_PARAMS_VER;
params.reInitEncodeParams = enc->params;
params.version = NV_ENC_RECONFIGURE_PARAMS_VER;
params.reInitEncodeParams = enc->params;
if (FAILED(nv.nvEncReconfigureEncoder(enc->session, &params))) {
return false;
@@ -261,28 +261,28 @@ static HANDLE get_lib(struct nvenc_data *enc, const char *lib)
return mod;
}
typedef HRESULT (WINAPI *CREATEDXGIFACTORY1PROC)(REFIID, void **);
typedef HRESULT(WINAPI *CREATEDXGIFACTORY1PROC)(REFIID, void **);
static bool init_d3d11(struct nvenc_data *enc, obs_data_t *settings)
{
HMODULE dxgi = get_lib(enc, "DXGI.dll");
HMODULE d3d11 = get_lib(enc, "D3D11.dll");
CREATEDXGIFACTORY1PROC create_dxgi;
HMODULE dxgi = get_lib(enc, "DXGI.dll");
HMODULE d3d11 = get_lib(enc, "D3D11.dll");
CREATEDXGIFACTORY1PROC create_dxgi;
PFN_D3D11_CREATE_DEVICE create_device;
IDXGIFactory1 *factory;
IDXGIAdapter *adapter;
ID3D11Device *device;
ID3D11DeviceContext *context;
HRESULT hr;
IDXGIFactory1 *factory;
IDXGIAdapter *adapter;
ID3D11Device *device;
ID3D11DeviceContext *context;
HRESULT hr;
if (!dxgi || !d3d11) {
return false;
}
create_dxgi = (CREATEDXGIFACTORY1PROC)GetProcAddress(dxgi,
"CreateDXGIFactory1");
create_device = (PFN_D3D11_CREATE_DEVICE)GetProcAddress(d3d11,
"D3D11CreateDevice");
create_dxgi = (CREATEDXGIFACTORY1PROC)GetProcAddress(
dxgi, "CreateDXGIFactory1");
create_device = (PFN_D3D11_CREATE_DEVICE)GetProcAddress(
d3d11, "D3D11CreateDevice");
if (!create_dxgi || !create_device) {
error("Failed to load D3D11/DXGI procedures");
@@ -302,8 +302,8 @@ static bool init_d3d11(struct nvenc_data *enc, obs_data_t *settings)
return false;
}
hr = create_device(adapter, D3D_DRIVER_TYPE_UNKNOWN, NULL, 0,
NULL, 0, D3D11_SDK_VERSION, &device, NULL, &context);
hr = create_device(adapter, D3D_DRIVER_TYPE_UNKNOWN, NULL, 0, NULL, 0,
D3D11_SDK_VERSION, &device, NULL, &context);
adapter->lpVtbl->Release(adapter);
if (FAILED(hr)) {
error_hr("D3D11CreateDevice failed");
@@ -317,8 +317,8 @@ static bool init_d3d11(struct nvenc_data *enc, obs_data_t *settings)
static bool init_session(struct nvenc_data *enc)
{
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params =
{NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER};
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params = {
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER};
params.device = enc->device;
params.deviceType = NV_ENC_DEVICE_TYPE_DIRECTX;
params.apiVersion = NVENCAPI_VERSION;
@@ -384,19 +384,19 @@ static bool init_encoder(struct nvenc_data *enc, obs_data_t *settings)
}
if (astrcmpi(rc, "lossless") == 0) {
nv_preset = hp
? NV_ENC_PRESET_LOSSLESS_HP_GUID
: NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID;
nv_preset = hp ? NV_ENC_PRESET_LOSSLESS_HP_GUID
: NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID;
}
/* -------------------------- */
/* get preset default config */
NV_ENC_PRESET_CONFIG preset_config =
{NV_ENC_PRESET_CONFIG_VER, {NV_ENC_CONFIG_VER}};
NV_ENC_PRESET_CONFIG preset_config = {NV_ENC_PRESET_CONFIG_VER,
{NV_ENC_CONFIG_VER}};
err = nv.nvEncGetEncodePresetConfig(enc->session,
NV_ENC_CODEC_H264_GUID, nv_preset, &preset_config);
NV_ENC_CODEC_H264_GUID, nv_preset,
&preset_config);
if (nv_failed(enc, err, __FUNCTION__, "nvEncGetEncodePresetConfig")) {
return false;
}
@@ -406,9 +406,8 @@ static bool init_encoder(struct nvenc_data *enc, obs_data_t *settings)
enc->config = preset_config.presetCfg;
uint32_t gop_size = (keyint_sec)
? keyint_sec * voi->fps_num / voi->fps_den
: 250;
uint32_t gop_size =
(keyint_sec) ? keyint_sec * voi->fps_num / voi->fps_den : 250;
NV_ENC_INITIALIZE_PARAMS *params = &enc->params;
NV_ENC_CONFIG *config = &enc->config;
@@ -461,9 +460,8 @@ static bool init_encoder(struct nvenc_data *enc, obs_data_t *settings)
enc->can_change_bitrate =
nv_get_cap(enc, NV_ENC_CAPS_SUPPORT_DYN_BITRATE_CHANGE);
config->rcParams.rateControlMode = twopass
? NV_ENC_PARAMS_RC_VBR_HQ
: NV_ENC_PARAMS_RC_VBR;
config->rcParams.rateControlMode = twopass ? NV_ENC_PARAMS_RC_VBR_HQ
: NV_ENC_PARAMS_RC_VBR;
if (astrcmpi(rc, "cqp") == 0 || astrcmpi(rc, "lossless") == 0) {
if (astrcmpi(rc, "lossless") == 0)
@@ -480,9 +478,9 @@ static bool init_encoder(struct nvenc_data *enc, obs_data_t *settings)
} else if (astrcmpi(rc, "vbr") != 0) { /* CBR by default */
h264_config->outputBufferingPeriodSEI = 1;
config->rcParams.rateControlMode = twopass
? NV_ENC_PARAMS_RC_2_PASS_QUALITY
: NV_ENC_PARAMS_RC_CBR;
config->rcParams.rateControlMode =
twopass ? NV_ENC_PARAMS_RC_2_PASS_QUALITY
: NV_ENC_PARAMS_RC_CBR;
}
h264_config->outputPictureTimingSEI = 1;
@@ -508,7 +506,7 @@ static bool init_encoder(struct nvenc_data *enc, obs_data_t *settings)
}
enc->buf_count = config->frameIntervalP +
config->rcParams.lookaheadDepth + EXTRA_BUFFERS;
config->rcParams.lookaheadDepth + EXTRA_BUFFERS;
enc->output_delay = enc->buf_count - 1;
info("settings:\n"
@@ -524,12 +522,8 @@ static bool init_encoder(struct nvenc_data *enc, obs_data_t *settings)
"\tb-frames: %d\n"
"\tlookahead: %s\n"
"\tpsycho_aq: %s\n",
rc, bitrate, cqp, gop_size,
preset, profile,
enc->cx, enc->cy,
twopass ? "true" : "false",
bf,
lookahead ? "true" : "false",
rc, bitrate, cqp, gop_size, preset, profile, enc->cx, enc->cy,
twopass ? "true" : "false", bf, lookahead ? "true" : "false",
psycho_aq ? "true" : "false");
return true;
@@ -662,12 +656,13 @@ static void nvenc_destroy(void *data)
}
static ID3D11Texture2D *get_tex_from_handle(struct nvenc_data *enc,
uint32_t handle, IDXGIKeyedMutex **km_out)
uint32_t handle,
IDXGIKeyedMutex **km_out)
{
ID3D11Device *device = enc->device;
ID3D11Device *device = enc->device;
IDXGIKeyedMutex *km;
ID3D11Texture2D *input_tex;
HRESULT hr;
HRESULT hr;
for (size_t i = 0; i < enc->input_textures.num; i++) {
struct handle_tex *ht = &enc->input_textures.array[i];
@@ -678,15 +673,16 @@ static ID3D11Texture2D *get_tex_from_handle(struct nvenc_data *enc,
}
hr = device->lpVtbl->OpenSharedResource(device,
(HANDLE)(uintptr_t)handle,
&IID_ID3D11Texture2D, &input_tex);
(HANDLE)(uintptr_t)handle,
&IID_ID3D11Texture2D,
&input_tex);
if (FAILED(hr)) {
error_hr("OpenSharedResource failed");
return NULL;
}
hr = input_tex->lpVtbl->QueryInterface(input_tex, &IID_IDXGIKeyedMutex,
&km);
&km);
if (FAILED(hr)) {
error_hr("QueryInterface(IDXGIKeyedMutex) failed");
input_tex->lpVtbl->Release(input_tex);
@@ -694,7 +690,7 @@ static ID3D11Texture2D *get_tex_from_handle(struct nvenc_data *enc,
}
input_tex->lpVtbl->SetEvictionPriority(input_tex,
DXGI_RESOURCE_PRIORITY_MAXIMUM);
DXGI_RESOURCE_PRIORITY_MAXIMUM);
*km_out = km;
@@ -717,15 +713,15 @@ static bool get_encoded_packet(struct nvenc_data *enc, bool finalize)
size_t count = finalize ? enc->buffers_queued : 1;
for (size_t i = 0; i < count; i++) {
size_t cur_bs_idx = enc->cur_bitstream;
struct nv_bitstream *bs = &enc->bitstreams.array[cur_bs_idx];
struct nv_texture *nvtex = &enc->textures.array[cur_bs_idx];
size_t cur_bs_idx = enc->cur_bitstream;
struct nv_bitstream *bs = &enc->bitstreams.array[cur_bs_idx];
struct nv_texture *nvtex = &enc->textures.array[cur_bs_idx];
/* ---------------- */
NV_ENC_LOCK_BITSTREAM lock = {NV_ENC_LOCK_BITSTREAM_VER};
lock.outputBitstream = bs->ptr;
lock.doNotWait = false;
lock.outputBitstream = bs->ptr;
lock.doNotWait = false;
if (NV_FAILED(nv.nvEncLockBitstream(s, &lock))) {
return false;
@@ -736,19 +732,17 @@ static bool get_encoded_packet(struct nvenc_data *enc, bool finalize)
size_t size;
enc->first_packet = false;
obs_extract_avc_headers(
lock.bitstreamBufferPtr,
lock.bitstreamSizeInBytes,
&new_packet, &size,
&enc->header, &enc->header_size,
&enc->sei, &enc->sei_size);
obs_extract_avc_headers(lock.bitstreamBufferPtr,
lock.bitstreamSizeInBytes,
&new_packet, &size,
&enc->header, &enc->header_size,
&enc->sei, &enc->sei_size);
da_copy_array(enc->packet_data, new_packet, size);
bfree(new_packet);
} else {
da_copy_array(enc->packet_data,
lock.bitstreamBufferPtr,
lock.bitstreamSizeInBytes);
da_copy_array(enc->packet_data, lock.bitstreamBufferPtr,
lock.bitstreamSizeInBytes);
}
enc->packet_pts = (int64_t)lock.outputTimeStamp;
@@ -781,18 +775,19 @@ static bool get_encoded_packet(struct nvenc_data *enc, bool finalize)
}
static bool nvenc_encode_tex(void *data, uint32_t handle, int64_t pts,
uint64_t lock_key, uint64_t *next_key,
struct encoder_packet *packet, bool *received_packet)
uint64_t lock_key, uint64_t *next_key,
struct encoder_packet *packet,
bool *received_packet)
{
struct nvenc_data *enc = data;
ID3D11Device *device = enc->device;
struct nvenc_data *enc = data;
ID3D11Device *device = enc->device;
ID3D11DeviceContext *context = enc->context;
ID3D11Texture2D *input_tex;
ID3D11Texture2D *output_tex;
IDXGIKeyedMutex *km;
struct nv_texture *nvtex;
ID3D11Texture2D *input_tex;
ID3D11Texture2D *output_tex;
IDXGIKeyedMutex *km;
struct nv_texture *nvtex;
struct nv_bitstream *bs;
NVENCSTATUS err;
NVENCSTATUS err;
if (handle == GS_INVALID_HANDLE) {
error("Encode failed: bad texture handle");
@@ -800,10 +795,10 @@ static bool nvenc_encode_tex(void *data, uint32_t handle, int64_t pts,
return false;
}
bs = &enc->bitstreams.array[enc->next_bitstream];
bs = &enc->bitstreams.array[enc->next_bitstream];
nvtex = &enc->textures.array[enc->next_bitstream];
input_tex = get_tex_from_handle(enc, handle, &km);
input_tex = get_tex_from_handle(enc, handle, &km);
output_tex = nvtex->tex;
if (!input_tex) {
@@ -823,9 +818,8 @@ static bool nvenc_encode_tex(void *data, uint32_t handle, int64_t pts,
km->lpVtbl->AcquireSync(km, lock_key, INFINITE);
context->lpVtbl->CopyResource(context,
(ID3D11Resource *)output_tex,
(ID3D11Resource *)input_tex);
context->lpVtbl->CopyResource(context, (ID3D11Resource *)output_tex,
(ID3D11Resource *)input_tex);
km->lpVtbl->ReleaseSync(km, *next_key);
@@ -833,7 +827,7 @@ static bool nvenc_encode_tex(void *data, uint32_t handle, int64_t pts,
/* map output tex so nvenc can use it */
NV_ENC_MAP_INPUT_RESOURCE map = {NV_ENC_MAP_INPUT_RESOURCE_VER};
map.registeredResource = nvtex->res;
map.registeredResource = nvtex->res;
if (NV_FAILED(nv.nvEncMapInputResource(enc->session, &map))) {
return false;
}
@@ -844,15 +838,15 @@ static bool nvenc_encode_tex(void *data, uint32_t handle, int64_t pts,
/* do actual encode call */
NV_ENC_PIC_PARAMS params = {0};
params.version = NV_ENC_PIC_PARAMS_VER;
params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
params.inputBuffer = nvtex->mapped_res;
params.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12;
params.inputTimeStamp = (uint64_t)pts;
params.inputWidth = enc->cx;
params.inputHeight = enc->cy;
params.outputBitstream = bs->ptr;
params.completionEvent = bs->event;
params.version = NV_ENC_PIC_PARAMS_VER;
params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
params.inputBuffer = nvtex->mapped_res;
params.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12;
params.inputTimeStamp = (uint64_t)pts;
params.inputWidth = enc->cx;
params.inputHeight = enc->cy;
params.outputBitstream = bs->ptr;
params.completionEvent = bs->event;
err = nv.nvEncEncodePicture(enc->session, &params);
if (err != NV_ENC_SUCCESS && err != NV_ENC_ERR_NEED_MORE_INPUT) {
@@ -886,11 +880,11 @@ static bool nvenc_encode_tex(void *data, uint32_t handle, int64_t pts,
dts -= packet->timebase_num;
*received_packet = true;
packet->data = enc->packet_data.array;
packet->size = enc->packet_data.num;
packet->type = OBS_ENCODER_VIDEO;
packet->pts = enc->packet_pts;
packet->dts = dts;
packet->data = enc->packet_data.array;
packet->size = enc->packet_data.num;
packet->type = OBS_ENCODER_VIDEO;
packet->pts = enc->packet_pts;
packet->dts = dts;
packet->keyframe = enc->packet_keyframe;
} else {
*received_packet = false;
@@ -911,7 +905,7 @@ static bool nvenc_extra_data(void *data, uint8_t **header, size_t *size)
}
*header = enc->header;
*size = enc->header_size;
*size = enc->header_size;
return true;
}
@@ -923,23 +917,23 @@ static bool nvenc_sei_data(void *data, uint8_t **sei, size_t *size)
return false;
}
*sei = enc->sei;
*sei = enc->sei;
*size = enc->sei_size;
return true;
}
struct obs_encoder_info nvenc_info = {
.id = "jim_nvenc",
.codec = "h264",
.type = OBS_ENCODER_VIDEO,
.caps = OBS_ENCODER_CAP_PASS_TEXTURE,
.get_name = nvenc_get_name,
.create = nvenc_create,
.destroy = nvenc_destroy,
.update = nvenc_update,
.encode_texture = nvenc_encode_tex,
.get_defaults = nvenc_defaults,
.get_properties = nvenc_properties,
.get_extra_data = nvenc_extra_data,
.get_sei_data = nvenc_sei_data,
.id = "jim_nvenc",
.codec = "h264",
.type = OBS_ENCODER_VIDEO,
.caps = OBS_ENCODER_CAP_PASS_TEXTURE,
.get_name = nvenc_get_name,
.create = nvenc_create,
.destroy = nvenc_destroy,
.update = nvenc_update,
.encode_texture = nvenc_encode_tex,
.get_defaults = nvenc_defaults,
.get_properties = nvenc_properties,
.get_extra_data = nvenc_extra_data,
.get_sei_data = nvenc_sei_data,
};

View File

@@ -6,7 +6,8 @@
#include <obs-module.h>
#include "nvEncodeAPI.h"
typedef NVENCSTATUS (NVENCAPI *NV_CREATE_INSTANCE_FUNC)(NV_ENCODE_API_FUNCTION_LIST*);
typedef NVENCSTATUS(NVENCAPI *NV_CREATE_INSTANCE_FUNC)(
NV_ENCODE_API_FUNCTION_LIST *);
extern const char *nv_error_name(NVENCSTATUS err);
extern NV_ENCODE_API_FUNCTION_LIST nv;

View File

@@ -26,68 +26,82 @@
#include "obs-ffmpeg-formats.h"
#include "obs-ffmpeg-compat.h"
#define do_log(level, format, ...) \
blog(level, "[FFmpeg %s encoder: '%s'] " format, \
enc->type, \
obs_encoder_get_name(enc->encoder), \
##__VA_ARGS__)
#define do_log(level, format, ...) \
blog(level, "[FFmpeg %s encoder: '%s'] " format, enc->type, \
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
struct enc_encoder {
obs_encoder_t *encoder;
obs_encoder_t *encoder;
const char *type;
const char *type;
AVCodec *codec;
AVCodecContext *context;
AVCodec *codec;
AVCodecContext *context;
uint8_t *samples[MAX_AV_PLANES];
AVFrame *aframe;
int64_t total_samples;
uint8_t *samples[MAX_AV_PLANES];
AVFrame *aframe;
int64_t total_samples;
DARRAY(uint8_t) packet_buffer;
DARRAY(uint8_t) packet_buffer;
size_t audio_planes;
size_t audio_size;
size_t audio_planes;
size_t audio_size;
int frame_size; /* pretty much always 1024 for AAC */
int frame_size_bytes;
int frame_size; /* pretty much always 1024 for AAC */
int frame_size_bytes;
};
static inline uint64_t convert_speaker_layout(enum speaker_layout layout)
{
switch (layout) {
case SPEAKERS_UNKNOWN: return 0;
case SPEAKERS_MONO: return AV_CH_LAYOUT_MONO;
case SPEAKERS_STEREO: return AV_CH_LAYOUT_STEREO;
case SPEAKERS_2POINT1: return AV_CH_LAYOUT_SURROUND;
case SPEAKERS_4POINT0: return AV_CH_LAYOUT_4POINT0;
case SPEAKERS_4POINT1: return AV_CH_LAYOUT_4POINT1;
case SPEAKERS_5POINT1: return AV_CH_LAYOUT_5POINT1_BACK;
case SPEAKERS_7POINT1: return AV_CH_LAYOUT_7POINT1;
case SPEAKERS_UNKNOWN:
return 0;
case SPEAKERS_MONO:
return AV_CH_LAYOUT_MONO;
case SPEAKERS_STEREO:
return AV_CH_LAYOUT_STEREO;
case SPEAKERS_2POINT1:
return AV_CH_LAYOUT_SURROUND;
case SPEAKERS_4POINT0:
return AV_CH_LAYOUT_4POINT0;
case SPEAKERS_4POINT1:
return AV_CH_LAYOUT_4POINT1;
case SPEAKERS_5POINT1:
return AV_CH_LAYOUT_5POINT1_BACK;
case SPEAKERS_7POINT1:
return AV_CH_LAYOUT_7POINT1;
}
/* shouldn't get here */
return 0;
}
static inline enum speaker_layout convert_ff_channel_layout(uint64_t channel_layout)
static inline enum speaker_layout
convert_ff_channel_layout(uint64_t channel_layout)
{
switch (channel_layout) {
case AV_CH_LAYOUT_MONO: return SPEAKERS_MONO;
case AV_CH_LAYOUT_STEREO: return SPEAKERS_STEREO;
case AV_CH_LAYOUT_SURROUND: return SPEAKERS_2POINT1;
case AV_CH_LAYOUT_4POINT0: return SPEAKERS_4POINT0;
case AV_CH_LAYOUT_4POINT1: return SPEAKERS_4POINT1;
case AV_CH_LAYOUT_5POINT1_BACK: return SPEAKERS_5POINT1;
case AV_CH_LAYOUT_7POINT1: return SPEAKERS_7POINT1;
case AV_CH_LAYOUT_MONO:
return SPEAKERS_MONO;
case AV_CH_LAYOUT_STEREO:
return SPEAKERS_STEREO;
case AV_CH_LAYOUT_SURROUND:
return SPEAKERS_2POINT1;
case AV_CH_LAYOUT_4POINT0:
return SPEAKERS_4POINT0;
case AV_CH_LAYOUT_4POINT1:
return SPEAKERS_4POINT1;
case AV_CH_LAYOUT_5POINT1_BACK:
return SPEAKERS_5POINT1;
case AV_CH_LAYOUT_7POINT1:
return SPEAKERS_7POINT1;
}
/* shouldn't get here */
return SPEAKERS_UNKNOWN;
return SPEAKERS_UNKNOWN;
}
static const char *aac_getname(void *unused)
@@ -121,7 +135,7 @@ static bool initialize_codec(struct enc_encoder *enc)
{
int ret;
enc->aframe = av_frame_alloc();
enc->aframe = av_frame_alloc();
if (!enc->aframe) {
warn("Failed to allocate audio frame");
return false;
@@ -144,7 +158,7 @@ static bool initialize_codec(struct enc_encoder *enc)
enc->frame_size_bytes = enc->frame_size * (int)enc->audio_size;
ret = av_samples_alloc(enc->samples, NULL, enc->context->channels,
enc->frame_size, enc->context->sample_fmt, 0);
enc->frame_size, enc->context->sample_fmt, 0);
if (ret < 0) {
warn("Failed to create audio buffer: %s", av_err2str(ret));
return false;
@@ -158,11 +172,11 @@ static void init_sizes(struct enc_encoder *enc, audio_t *audio)
const struct audio_output_info *aoi;
enum audio_format format;
aoi = audio_output_get_info(audio);
aoi = audio_output_get_info(audio);
format = convert_ffmpeg_sample_format(enc->context->sample_fmt);
enc->audio_planes = get_audio_planes(format, aoi->speakers);
enc->audio_size = get_audio_size(format, aoi->speakers, 1);
enc->audio_size = get_audio_size(format, aoi->speakers, 1);
}
#ifndef MIN
@@ -170,22 +184,22 @@ static void init_sizes(struct enc_encoder *enc, audio_t *audio)
#endif
static void *enc_create(obs_data_t *settings, obs_encoder_t *encoder,
const char *type, const char *alt)
const char *type, const char *alt)
{
struct enc_encoder *enc;
int bitrate = (int)obs_data_get_int(settings, "bitrate");
audio_t *audio = obs_encoder_audio(encoder);
int bitrate = (int)obs_data_get_int(settings, "bitrate");
audio_t *audio = obs_encoder_audio(encoder);
avcodec_register_all();
enc = bzalloc(sizeof(struct enc_encoder));
enc = bzalloc(sizeof(struct enc_encoder));
enc->encoder = encoder;
enc->codec = avcodec_find_encoder_by_name(type);
enc->type = type;
enc->codec = avcodec_find_encoder_by_name(type);
enc->type = type;
if (!enc->codec && alt) {
enc->codec = avcodec_find_encoder_by_name(alt);
enc->type = alt;
enc->type = alt;
}
blog(LOG_INFO, "---------------------------------");
@@ -206,14 +220,15 @@ static void *enc_create(obs_data_t *settings, obs_encoder_t *encoder,
goto fail;
}
enc->context->bit_rate = bitrate * 1000;
enc->context->bit_rate = bitrate * 1000;
const struct audio_output_info *aoi;
aoi = audio_output_get_info(audio);
enc->context->channels = (int)audio_output_get_channels(audio);
enc->context->channels = (int)audio_output_get_channels(audio);
enc->context->channel_layout = convert_speaker_layout(aoi->speakers);
enc->context->sample_rate = audio_output_get_sample_rate(audio);
enc->context->sample_fmt = enc->codec->sample_fmts ?
enc->codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
enc->context->sample_fmt = enc->codec->sample_fmts
? enc->codec->sample_fmts[0]
: AV_SAMPLE_FMT_FLTP;
/* check to make sure sample rate is supported */
if (enc->codec->supported_samplerates) {
@@ -239,9 +254,9 @@ static void *enc_create(obs_data_t *settings, obs_encoder_t *encoder,
}
info("bitrate: %" PRId64 ", channels: %d, channel_layout: %x\n",
(int64_t)enc->context->bit_rate / 1000,
(int)enc->context->channels,
(unsigned int)enc->context->channel_layout);
(int64_t)enc->context->bit_rate / 1000,
(int)enc->context->channels,
(unsigned int)enc->context->channel_layout);
init_sizes(enc, audio);
@@ -268,22 +283,23 @@ static void *opus_create(obs_data_t *settings, obs_encoder_t *encoder)
return enc_create(settings, encoder, "libopus", "opus");
}
static bool do_encode(struct enc_encoder *enc,
struct encoder_packet *packet, bool *received_packet)
static bool do_encode(struct enc_encoder *enc, struct encoder_packet *packet,
bool *received_packet)
{
AVRational time_base = {1, enc->context->sample_rate};
AVPacket avpacket = {0};
int got_packet;
int ret;
AVPacket avpacket = {0};
int got_packet;
int ret;
enc->aframe->nb_samples = enc->frame_size;
enc->aframe->pts = av_rescale_q(enc->total_samples,
(AVRational){1, enc->context->sample_rate},
enc->context->time_base);
enc->aframe->pts = av_rescale_q(
enc->total_samples, (AVRational){1, enc->context->sample_rate},
enc->context->time_base);
ret = avcodec_fill_audio_frame(enc->aframe, enc->context->channels,
enc->context->sample_fmt, enc->samples[0],
enc->frame_size_bytes * enc->context->channels, 1);
ret = avcodec_fill_audio_frame(
enc->aframe, enc->context->channels, enc->context->sample_fmt,
enc->samples[0], enc->frame_size_bytes * enc->context->channels,
1);
if (ret < 0) {
warn("avcodec_fill_audio_frame failed: %s", av_err2str(ret));
return false;
@@ -302,7 +318,7 @@ static bool do_encode(struct enc_encoder *enc,
ret = 0;
#else
ret = avcodec_encode_audio2(enc->context, &avpacket, enc->aframe,
&got_packet);
&got_packet);
#endif
if (ret < 0) {
warn("avcodec_encode_audio2 failed: %s", av_err2str(ret));
@@ -316,8 +332,8 @@ static bool do_encode(struct enc_encoder *enc,
da_resize(enc->packet_buffer, 0);
da_push_back_array(enc->packet_buffer, avpacket.data, avpacket.size);
packet->pts = rescale_ts(avpacket.pts, enc->context, time_base);
packet->dts = rescale_ts(avpacket.dts, enc->context, time_base);
packet->pts = rescale_ts(avpacket.pts, enc->context, time_base);
packet->dts = rescale_ts(avpacket.dts, enc->context, time_base);
packet->data = enc->packet_buffer.array;
packet->size = avpacket.size;
packet->type = OBS_ENCODER_AUDIO;
@@ -328,7 +344,7 @@ static bool do_encode(struct enc_encoder *enc,
}
static bool enc_encode(void *data, struct encoder_frame *frame,
struct encoder_packet *packet, bool *received_packet)
struct encoder_packet *packet, bool *received_packet)
{
struct enc_encoder *enc = data;
@@ -348,8 +364,8 @@ static obs_properties_t *enc_properties(void *unused)
UNUSED_PARAMETER(unused);
obs_properties_t *props = obs_properties_create();
obs_properties_add_int(props, "bitrate",
obs_module_text("Bitrate"), 64, 1024, 32);
obs_properties_add_int(props, "bitrate", obs_module_text("Bitrate"), 64,
1024, 32);
return props;
}
@@ -358,7 +374,7 @@ static bool enc_extra_data(void *data, uint8_t **extra_data, size_t *size)
struct enc_encoder *enc = data;
*extra_data = enc->context->extradata;
*size = enc->context->extradata_size;
*size = enc->context->extradata_size;
return true;
}
@@ -367,41 +383,42 @@ static void enc_audio_info(void *data, struct audio_convert_info *info)
struct enc_encoder *enc = data;
info->format = convert_ffmpeg_sample_format(enc->context->sample_fmt);
info->samples_per_sec = (uint32_t)enc->context->sample_rate;
info->speakers = convert_ff_channel_layout(enc->context->channel_layout);
info->speakers =
convert_ff_channel_layout(enc->context->channel_layout);
}
static size_t enc_frame_size(void *data)
{
struct enc_encoder *enc =data;
struct enc_encoder *enc = data;
return enc->frame_size;
}
struct obs_encoder_info aac_encoder_info = {
.id = "ffmpeg_aac",
.type = OBS_ENCODER_AUDIO,
.codec = "AAC",
.get_name = aac_getname,
.create = aac_create,
.destroy = enc_destroy,
.encode = enc_encode,
.id = "ffmpeg_aac",
.type = OBS_ENCODER_AUDIO,
.codec = "AAC",
.get_name = aac_getname,
.create = aac_create,
.destroy = enc_destroy,
.encode = enc_encode,
.get_frame_size = enc_frame_size,
.get_defaults = enc_defaults,
.get_defaults = enc_defaults,
.get_properties = enc_properties,
.get_extra_data = enc_extra_data,
.get_audio_info = enc_audio_info
.get_audio_info = enc_audio_info,
};
struct obs_encoder_info opus_encoder_info = {
.id = "ffmpeg_opus",
.type = OBS_ENCODER_AUDIO,
.codec = "opus",
.get_name = opus_getname,
.create = opus_create,
.destroy = enc_destroy,
.encode = enc_encode,
.id = "ffmpeg_opus",
.type = OBS_ENCODER_AUDIO,
.codec = "opus",
.get_name = opus_getname,
.create = opus_create,
.destroy = enc_destroy,
.encode = enc_encode,
.get_frame_size = enc_frame_size,
.get_defaults = enc_defaults,
.get_defaults = enc_defaults,
.get_properties = enc_properties,
.get_extra_data = enc_extra_data,
.get_audio_info = enc_audio_info
.get_audio_info = enc_audio_info,
};

View File

@@ -6,18 +6,20 @@
* a is the major version
* b and c the minor and micro versions of libav
* d and e the minor and micro versions of FFmpeg */
#define LIBAVCODEC_VERSION_CHECK( a, b, c, d, e ) \
( (LIBAVCODEC_VERSION_MICRO < 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( a, b, c ) ) || \
(LIBAVCODEC_VERSION_MICRO >= 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( a, d, e ) ) )
#define LIBAVCODEC_VERSION_CHECK(a, b, c, d, e) \
((LIBAVCODEC_VERSION_MICRO < 100 && \
LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(a, b, c)) || \
(LIBAVCODEC_VERSION_MICRO >= 100 && \
LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(a, d, e)))
#if !LIBAVCODEC_VERSION_CHECK(54, 28, 0, 59, 100)
# define avcodec_free_frame av_freep
#define avcodec_free_frame av_freep
#endif
#if LIBAVCODEC_VERSION_INT < 0x371c01
# define av_frame_alloc avcodec_alloc_frame
# define av_frame_unref avcodec_get_frame_defaults
# define av_frame_free avcodec_free_frame
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_unref avcodec_get_frame_defaults
#define av_frame_free avcodec_free_frame
#endif
#if LIBAVCODEC_VERSION_MAJOR >= 57

View File

@@ -1,63 +1,93 @@
#pragma once
static inline int64_t rescale_ts(int64_t val, AVCodecContext *context,
AVRational new_base)
AVRational new_base)
{
return av_rescale_q_rnd(val, context->time_base, new_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
}
static inline enum AVPixelFormat obs_to_ffmpeg_video_format(
enum video_format format)
static inline enum AVPixelFormat
obs_to_ffmpeg_video_format(enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_NONE: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_I444: return AV_PIX_FMT_YUV444P;
case VIDEO_FORMAT_I420: return AV_PIX_FMT_YUV420P;
case VIDEO_FORMAT_NV12: return AV_PIX_FMT_NV12;
case VIDEO_FORMAT_YVYU: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_YUY2: return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY: return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_RGBA: return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA: return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX: return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_Y800: return AV_PIX_FMT_GRAY8;
case VIDEO_FORMAT_BGR3: return AV_PIX_FMT_BGR24;
case VIDEO_FORMAT_NONE:
return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_I444:
return AV_PIX_FMT_YUV444P;
case VIDEO_FORMAT_I420:
return AV_PIX_FMT_YUV420P;
case VIDEO_FORMAT_NV12:
return AV_PIX_FMT_NV12;
case VIDEO_FORMAT_YVYU:
return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_YUY2:
return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY:
return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_RGBA:
return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA:
return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX:
return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_Y800:
return AV_PIX_FMT_GRAY8;
case VIDEO_FORMAT_BGR3:
return AV_PIX_FMT_BGR24;
}
return AV_PIX_FMT_NONE;
}
static inline enum video_format ffmpeg_to_obs_video_format(
enum AVPixelFormat format)
static inline enum video_format
ffmpeg_to_obs_video_format(enum AVPixelFormat format)
{
switch (format) {
case AV_PIX_FMT_YUV444P: return VIDEO_FORMAT_I444;
case AV_PIX_FMT_YUV420P: return VIDEO_FORMAT_I420;
case AV_PIX_FMT_NV12: return VIDEO_FORMAT_NV12;
case AV_PIX_FMT_YUYV422: return VIDEO_FORMAT_YUY2;
case AV_PIX_FMT_UYVY422: return VIDEO_FORMAT_UYVY;
case AV_PIX_FMT_RGBA: return VIDEO_FORMAT_RGBA;
case AV_PIX_FMT_BGRA: return VIDEO_FORMAT_BGRA;
case AV_PIX_FMT_GRAY8: return VIDEO_FORMAT_Y800;
case AV_PIX_FMT_BGR24: return VIDEO_FORMAT_BGR3;
case AV_PIX_FMT_YUV444P:
return VIDEO_FORMAT_I444;
case AV_PIX_FMT_YUV420P:
return VIDEO_FORMAT_I420;
case AV_PIX_FMT_NV12:
return VIDEO_FORMAT_NV12;
case AV_PIX_FMT_YUYV422:
return VIDEO_FORMAT_YUY2;
case AV_PIX_FMT_UYVY422:
return VIDEO_FORMAT_UYVY;
case AV_PIX_FMT_RGBA:
return VIDEO_FORMAT_RGBA;
case AV_PIX_FMT_BGRA:
return VIDEO_FORMAT_BGRA;
case AV_PIX_FMT_GRAY8:
return VIDEO_FORMAT_Y800;
case AV_PIX_FMT_BGR24:
return VIDEO_FORMAT_BGR3;
case AV_PIX_FMT_NONE:
default: return VIDEO_FORMAT_NONE;
default:
return VIDEO_FORMAT_NONE;
}
}
static inline enum audio_format convert_ffmpeg_sample_format(
enum AVSampleFormat format)
static inline enum audio_format
convert_ffmpeg_sample_format(enum AVSampleFormat format)
{
switch ((uint32_t)format) {
case AV_SAMPLE_FMT_U8: return AUDIO_FORMAT_U8BIT;
case AV_SAMPLE_FMT_S16: return AUDIO_FORMAT_16BIT;
case AV_SAMPLE_FMT_S32: return AUDIO_FORMAT_32BIT;
case AV_SAMPLE_FMT_FLT: return AUDIO_FORMAT_FLOAT;
case AV_SAMPLE_FMT_U8P: return AUDIO_FORMAT_U8BIT_PLANAR;
case AV_SAMPLE_FMT_S16P: return AUDIO_FORMAT_16BIT_PLANAR;
case AV_SAMPLE_FMT_S32P: return AUDIO_FORMAT_32BIT_PLANAR;
case AV_SAMPLE_FMT_FLTP: return AUDIO_FORMAT_FLOAT_PLANAR;
case AV_SAMPLE_FMT_U8:
return AUDIO_FORMAT_U8BIT;
case AV_SAMPLE_FMT_S16:
return AUDIO_FORMAT_16BIT;
case AV_SAMPLE_FMT_S32:
return AUDIO_FORMAT_32BIT;
case AV_SAMPLE_FMT_FLT:
return AUDIO_FORMAT_FLOAT;
case AV_SAMPLE_FMT_U8P:
return AUDIO_FORMAT_U8BIT_PLANAR;
case AV_SAMPLE_FMT_S16P:
return AUDIO_FORMAT_16BIT_PLANAR;
case AV_SAMPLE_FMT_S32P:
return AUDIO_FORMAT_32BIT_PLANAR;
case AV_SAMPLE_FMT_FLTP:
return AUDIO_FORMAT_FLOAT_PLANAR;
}
/* shouldn't get here */

View File

@@ -32,38 +32,38 @@
#include <libavformat/avformat.h>
#define do_log(level, format, ...) \
#define do_log(level, format, ...) \
blog(level, "[ffmpeg muxer: '%s'] " format, \
obs_output_get_name(stream->output), ##__VA_ARGS__)
obs_output_get_name(stream->output), ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
struct ffmpeg_muxer {
obs_output_t *output;
obs_output_t *output;
os_process_pipe_t *pipe;
int64_t stop_ts;
uint64_t total_bytes;
struct dstr path;
bool sent_headers;
volatile bool active;
volatile bool stopping;
volatile bool capturing;
int64_t stop_ts;
uint64_t total_bytes;
struct dstr path;
bool sent_headers;
volatile bool active;
volatile bool stopping;
volatile bool capturing;
/* replay buffer */
struct circlebuf packets;
int64_t cur_size;
int64_t cur_time;
int64_t max_size;
int64_t max_time;
int64_t save_ts;
int keyframes;
obs_hotkey_id hotkey;
struct circlebuf packets;
int64_t cur_size;
int64_t cur_time;
int64_t max_size;
int64_t max_time;
int64_t save_ts;
int keyframes;
obs_hotkey_id hotkey;
DARRAY(struct encoder_packet) mux_packets;
pthread_t mux_thread;
bool mux_thread_joinable;
volatile bool muxing;
pthread_t mux_thread;
bool mux_thread_joinable;
volatile bool muxing;
};
static const char *ffmpeg_mux_getname(void *type)
@@ -136,7 +136,7 @@ static inline bool active(struct ffmpeg_muxer *stream)
/* TODO: allow codecs other than h264 whenever we start using them */
static void add_video_encoder_params(struct ffmpeg_muxer *stream,
struct dstr *cmd, obs_encoder_t *vencoder)
struct dstr *cmd, obs_encoder_t *vencoder)
{
obs_data_t *settings = obs_encoder_get_settings(vencoder);
int bitrate = (int)obs_data_get_int(settings, "bitrate");
@@ -145,13 +145,10 @@ static void add_video_encoder_params(struct ffmpeg_muxer *stream,
obs_data_release(settings);
dstr_catf(cmd, "%s %d %d %d %d %d ",
obs_encoder_get_codec(vencoder),
bitrate,
obs_output_get_width(stream->output),
obs_output_get_height(stream->output),
(int)info->fps_num,
(int)info->fps_den);
dstr_catf(cmd, "%s %d %d %d %d %d ", obs_encoder_get_codec(vencoder),
bitrate, obs_output_get_width(stream->output),
obs_output_get_height(stream->output), (int)info->fps_num,
(int)info->fps_den);
}
static void add_audio_encoder_params(struct dstr *cmd, obs_encoder_t *aencoder)
@@ -166,11 +163,9 @@ static void add_audio_encoder_params(struct dstr *cmd, obs_encoder_t *aencoder)
dstr_copy(&name, obs_encoder_get_name(aencoder));
dstr_replace(&name, "\"", "\"\"");
dstr_catf(cmd, "\"%s\" %d %d %d ",
name.array,
bitrate,
(int)obs_encoder_get_sample_rate(aencoder),
(int)audio_output_get_channels(audio));
dstr_catf(cmd, "\"%s\" %d %d %d ", name.array, bitrate,
(int)obs_encoder_get_sample_rate(aencoder),
(int)audio_output_get_channels(audio));
dstr_free(&name);
}
@@ -181,8 +176,8 @@ static void log_muxer_params(struct ffmpeg_muxer *stream, const char *settings)
AVDictionary *dict = NULL;
if ((ret = av_dict_parse_string(&dict, settings, "=", " ", 0))) {
warn("Failed to parse muxer settings: %s\n%s",
av_err2str(ret), settings);
warn("Failed to parse muxer settings: %s\n%s", av_err2str(ret),
settings);
av_dict_free(&dict);
return;
@@ -193,7 +188,7 @@ static void log_muxer_params(struct ffmpeg_muxer *stream, const char *settings)
AVDictionaryEntry *entry = NULL;
while ((entry = av_dict_get(dict, "", entry,
AV_DICT_IGNORE_SUFFIX)))
AV_DICT_IGNORE_SUFFIX)))
dstr_catf(&str, "\n\t%s=%s", entry->key, entry->value);
info("Using muxer settings:%s", str.array);
@@ -221,7 +216,7 @@ static void add_muxer_params(struct dstr *cmd, struct ffmpeg_muxer *stream)
}
static void build_command_line(struct ffmpeg_muxer *stream, struct dstr *cmd,
const char *path)
const char *path)
{
obs_encoder_t *vencoder = obs_output_get_video_encoder(stream->output);
obs_encoder_t *aencoders[MAX_AUDIO_MIXES];
@@ -229,7 +224,7 @@ static void build_command_line(struct ffmpeg_muxer *stream, struct dstr *cmd,
for (;;) {
obs_encoder_t *aencoder = obs_output_get_audio_encoder(
stream->output, num_tracks);
stream->output, num_tracks);
if (!aencoder)
break;
@@ -289,7 +284,7 @@ static bool ffmpeg_mux_start(void *data)
if (!test_file) {
struct dstr error_message;
dstr_init_copy(&error_message,
obs_module_text("UnableToWritePath"));
obs_module_text("UnableToWritePath"));
#ifdef _WIN32
// special warning for Windows 10 users about Defender
struct win_version_info ver;
@@ -297,12 +292,11 @@ static bool ffmpeg_mux_start(void *data)
if (ver.major >= 10) {
dstr_cat(&error_message, "\n\n");
dstr_cat(&error_message,
obs_module_text("WarnWindowsDefender"));
obs_module_text("WarnWindowsDefender"));
}
#endif
dstr_replace(&error_message, "%1", path);
obs_output_set_last_error(stream->output,
error_message.array);
obs_output_set_last_error(stream->output, error_message.array);
dstr_free(&error_message);
obs_data_release(settings);
return false;
@@ -315,8 +309,8 @@ static bool ffmpeg_mux_start(void *data)
obs_data_release(settings);
if (!stream->pipe) {
obs_output_set_last_error(stream->output,
obs_module_text("HelperProcessFailed"));
obs_output_set_last_error(
stream->output, obs_module_text("HelperProcessFailed"));
warn("Failed to create process pipe");
return false;
}
@@ -375,19 +369,22 @@ static void signal_failure(struct ffmpeg_muxer *stream)
size_t len;
len = os_process_pipe_read_err(stream->pipe, (uint8_t *)error,
sizeof(error) - 1);
sizeof(error) - 1);
if (len > 0) {
error[len] = 0;
warn ("ffmpeg-mux: %s", error);
obs_output_set_last_error (stream->output, error);
warn("ffmpeg-mux: %s", error);
obs_output_set_last_error(stream->output, error);
}
ret = deactivate(stream, 0);
switch (ret) {
case FFM_UNSUPPORTED: code = OBS_OUTPUT_UNSUPPORTED; break;
default: code = OBS_OUTPUT_ERROR;
case FFM_UNSUPPORTED:
code = OBS_OUTPUT_UNSUPPORTED;
break;
default:
code = OBS_OUTPUT_ERROR;
}
obs_output_signal_stop(stream->output, code);
@@ -395,22 +392,21 @@ static void signal_failure(struct ffmpeg_muxer *stream)
}
static bool write_packet(struct ffmpeg_muxer *stream,
struct encoder_packet *packet)
struct encoder_packet *packet)
{
bool is_video = packet->type == OBS_ENCODER_VIDEO;
size_t ret;
struct ffm_packet_info info = {
.pts = packet->pts,
.dts = packet->dts,
.size = (uint32_t)packet->size,
.index = (int)packet->track_idx,
.type = is_video ? FFM_PACKET_VIDEO : FFM_PACKET_AUDIO,
.keyframe = packet->keyframe
};
struct ffm_packet_info info = {.pts = packet->pts,
.dts = packet->dts,
.size = (uint32_t)packet->size,
.index = (int)packet->track_idx,
.type = is_video ? FFM_PACKET_VIDEO
: FFM_PACKET_AUDIO,
.keyframe = packet->keyframe};
ret = os_process_pipe_write(stream->pipe, (const uint8_t*)&info,
sizeof(info));
ret = os_process_pipe_write(stream->pipe, (const uint8_t *)&info,
sizeof(info));
if (ret != sizeof(info)) {
warn("os_process_pipe_write for info structure failed");
signal_failure(stream);
@@ -429,13 +425,10 @@ static bool write_packet(struct ffmpeg_muxer *stream,
}
static bool send_audio_headers(struct ffmpeg_muxer *stream,
obs_encoder_t *aencoder, size_t idx)
obs_encoder_t *aencoder, size_t idx)
{
struct encoder_packet packet = {
.type = OBS_ENCODER_AUDIO,
.timebase_den = 1,
.track_idx = idx
};
.type = OBS_ENCODER_AUDIO, .timebase_den = 1, .track_idx = idx};
obs_encoder_get_extra_data(aencoder, &packet.data, &packet.size);
return write_packet(stream, &packet);
@@ -445,10 +438,8 @@ static bool send_video_headers(struct ffmpeg_muxer *stream)
{
obs_encoder_t *vencoder = obs_output_get_video_encoder(stream->output);
struct encoder_packet packet = {
.type = OBS_ENCODER_VIDEO,
.timebase_den = 1
};
struct encoder_packet packet = {.type = OBS_ENCODER_VIDEO,
.timebase_den = 1};
obs_encoder_get_extra_data(vencoder, &packet.data, &packet.size);
return write_packet(stream, &packet);
@@ -511,9 +502,8 @@ static obs_properties_t *ffmpeg_mux_properties(void *unused)
obs_properties_t *props = obs_properties_create();
obs_properties_add_text(props, "path",
obs_module_text("FilePath"),
OBS_TEXT_DEFAULT);
obs_properties_add_text(props, "path", obs_module_text("FilePath"),
OBS_TEXT_DEFAULT);
return props;
}
@@ -524,18 +514,16 @@ static uint64_t ffmpeg_mux_total_bytes(void *data)
}
struct obs_output_info ffmpeg_muxer = {
.id = "ffmpeg_muxer",
.flags = OBS_OUTPUT_AV |
OBS_OUTPUT_ENCODED |
OBS_OUTPUT_MULTI_TRACK,
.get_name = ffmpeg_mux_getname,
.create = ffmpeg_mux_create,
.destroy = ffmpeg_mux_destroy,
.start = ffmpeg_mux_start,
.stop = ffmpeg_mux_stop,
.id = "ffmpeg_muxer",
.flags = OBS_OUTPUT_AV | OBS_OUTPUT_ENCODED | OBS_OUTPUT_MULTI_TRACK,
.get_name = ffmpeg_mux_getname,
.create = ffmpeg_mux_create,
.destroy = ffmpeg_mux_destroy,
.start = ffmpeg_mux_start,
.stop = ffmpeg_mux_stop,
.encoded_packet = ffmpeg_mux_data,
.get_total_bytes= ffmpeg_mux_total_bytes,
.get_properties = ffmpeg_mux_properties
.get_total_bytes = ffmpeg_mux_total_bytes,
.get_properties = ffmpeg_mux_properties,
};
/* ------------------------------------------------------------------------ */
@@ -547,7 +535,7 @@ static const char *replay_buffer_getname(void *type)
}
static void replay_buffer_hotkey(void *data, obs_hotkey_id id,
obs_hotkey_t *hotkey, bool pressed)
obs_hotkey_t *hotkey, bool pressed)
{
UNUSED_PARAMETER(id);
UNUSED_PARAMETER(hotkey);
@@ -577,15 +565,15 @@ static void *replay_buffer_create(obs_data_t *settings, obs_output_t *output)
struct ffmpeg_muxer *stream = bzalloc(sizeof(*stream));
stream->output = output;
stream->hotkey = obs_hotkey_register_output(output,
"ReplayBuffer.Save",
obs_module_text("ReplayBuffer.Save"),
replay_buffer_hotkey, stream);
stream->hotkey =
obs_hotkey_register_output(output, "ReplayBuffer.Save",
obs_module_text("ReplayBuffer.Save"),
replay_buffer_hotkey, stream);
proc_handler_t *ph = obs_output_get_proc_handler(output);
proc_handler_add(ph, "void save()", save_replay_proc, stream);
proc_handler_add(ph, "void get_last_replay(out string path)",
get_last_replay, stream);
get_last_replay, stream);
return stream;
}
@@ -653,7 +641,7 @@ static inline void purge(struct ffmpeg_muxer *stream)
for (;;) {
circlebuf_peek_front(&stream->packets, &pkt,
sizeof(pkt));
sizeof(pkt));
if (pkt.type == OBS_ENCODER_VIDEO && pkt.keyframe)
return;
@@ -663,14 +651,14 @@ static inline void purge(struct ffmpeg_muxer *stream)
}
static inline void replay_buffer_purge(struct ffmpeg_muxer *stream,
struct encoder_packet *pkt)
struct encoder_packet *pkt)
{
if (stream->max_size) {
if (!stream->packets.size || stream->keyframes <= 2)
return;
while ((stream->cur_size + (int64_t)pkt->size) >
stream->max_size)
stream->max_size)
purge(stream);
}
@@ -682,8 +670,8 @@ static inline void replay_buffer_purge(struct ffmpeg_muxer *stream,
}
static void insert_packet(struct darray *array, struct encoder_packet *packet,
int64_t video_offset, int64_t *audio_offsets,
int64_t video_dts_offset, int64_t *audio_dts_offsets)
int64_t video_offset, int64_t *audio_offsets,
int64_t video_dts_offset, int64_t *audio_dts_offsets)
{
struct encoder_packet pkt;
DARRAY(struct encoder_packet) packets;
@@ -725,7 +713,7 @@ static void *replay_buffer_mux_thread(void *data)
if (!send_headers(stream)) {
warn("Could not write headers for file '%s'",
stream->path.array);
stream->path.array);
goto error;
}
@@ -780,9 +768,9 @@ static void replay_buffer_save(struct ffmpeg_muxer *stream)
}
}
insert_packet(&stream->mux_packets.da, pkt,
video_offset, audio_offsets,
video_dts_offset, audio_dts_offsets);
insert_packet(&stream->mux_packets.da, pkt, video_offset,
audio_offsets, video_dts_offset,
audio_dts_offsets);
}
/* ---------------------------- */
@@ -809,7 +797,8 @@ static void replay_buffer_save(struct ffmpeg_muxer *stream)
os_atomic_set_bool(&stream->muxing, true);
stream->mux_thread_joinable = pthread_create(&stream->mux_thread, NULL,
replay_buffer_mux_thread, stream) == 0;
replay_buffer_mux_thread,
stream) == 0;
}
static void deactivate_replay_buffer(struct ffmpeg_muxer *stream, int code)
@@ -883,16 +872,14 @@ static void replay_buffer_defaults(obs_data_t *s)
}
struct obs_output_info replay_buffer = {
.id = "replay_buffer",
.flags = OBS_OUTPUT_AV |
OBS_OUTPUT_ENCODED |
OBS_OUTPUT_MULTI_TRACK,
.get_name = replay_buffer_getname,
.create = replay_buffer_create,
.destroy = replay_buffer_destroy,
.start = replay_buffer_start,
.stop = ffmpeg_mux_stop,
.id = "replay_buffer",
.flags = OBS_OUTPUT_AV | OBS_OUTPUT_ENCODED | OBS_OUTPUT_MULTI_TRACK,
.get_name = replay_buffer_getname,
.create = replay_buffer_create,
.destroy = replay_buffer_destroy,
.start = replay_buffer_start,
.stop = ffmpeg_mux_stop,
.encoded_packet = replay_buffer_data,
.get_total_bytes= ffmpeg_mux_total_bytes,
.get_defaults = replay_buffer_defaults
.get_total_bytes = ffmpeg_mux_total_bytes,
.get_defaults = replay_buffer_defaults,
};

View File

@@ -28,33 +28,33 @@
#include "obs-ffmpeg-formats.h"
#define do_log(level, format, ...) \
#define do_log(level, format, ...) \
blog(level, "[NVENC encoder: '%s'] " format, \
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
struct nvenc_encoder {
obs_encoder_t *encoder;
obs_encoder_t *encoder;
AVCodec *nvenc;
AVCodecContext *context;
AVCodec *nvenc;
AVCodecContext *context;
AVFrame *vframe;
AVFrame *vframe;
DARRAY(uint8_t) buffer;
DARRAY(uint8_t) buffer;
uint8_t *header;
size_t header_size;
uint8_t *header;
size_t header_size;
uint8_t *sei;
size_t sei_size;
uint8_t *sei;
size_t sei_size;
int height;
bool first_packet;
bool initialized;
int height;
bool first_packet;
bool initialized;
};
static const char *nvenc_getname(void *unused)
@@ -65,8 +65,7 @@ static const char *nvenc_getname(void *unused)
static inline bool valid_format(enum video_format format)
{
return format == VIDEO_FORMAT_I420 ||
format == VIDEO_FORMAT_NV12 ||
return format == VIDEO_FORMAT_I420 || format == VIDEO_FORMAT_NV12 ||
format == VIDEO_FORMAT_I444;
}
@@ -78,8 +77,8 @@ static void nvenc_video_info(void *data, struct video_scale_info *info)
pref_format = obs_encoder_get_preferred_video_format(enc->encoder);
if (!valid_format(pref_format)) {
pref_format = valid_format(info->format) ?
info->format : VIDEO_FORMAT_NV12;
pref_format = valid_format(info->format) ? info->format
: VIDEO_FORMAT_NV12;
}
info->format = pref_format;
@@ -117,12 +116,7 @@ static bool nvenc_init_codec(struct nvenc_encoder *enc)
return true;
}
enum RC_MODE {
RC_MODE_CBR,
RC_MODE_VBR,
RC_MODE_CQP,
RC_MODE_LOSSLESS
};
enum RC_MODE { RC_MODE_CBR, RC_MODE_VBR, RC_MODE_CQP, RC_MODE_LOSSLESS };
static bool nvenc_update(void *data, obs_data_t *settings)
{
@@ -177,10 +171,10 @@ static bool nvenc_update(void *data, obs_data_t *settings)
cqp = 0;
bool hp = (astrcmpi(preset, "hp") == 0 ||
astrcmpi(preset, "llhp") == 0);
astrcmpi(preset, "llhp") == 0);
av_opt_set(enc->context->priv_data, "preset",
hp ? "losslesshp" : "lossless", 0);
hp ? "losslesshp" : "lossless", 0);
} else if (astrcmpi(rc, "vbr") != 0) { /* CBR by default */
av_opt_set_int(enc->context->priv_data, "cbr", true, 0);
@@ -189,7 +183,6 @@ static bool nvenc_update(void *data, obs_data_t *settings)
cqp = 0;
}
av_opt_set(enc->context->priv_data, "level", "auto", 0);
av_opt_set_int(enc->context->priv_data, "2pass", twopass, 0);
av_opt_set_int(enc->context->priv_data, "gpu", gpu, 0);
@@ -200,15 +193,17 @@ static bool nvenc_update(void *data, obs_data_t *settings)
enc->context->height = obs_encoder_get_height(enc->encoder);
enc->context->time_base = (AVRational){voi->fps_den, voi->fps_num};
enc->context->pix_fmt = obs_to_ffmpeg_video_format(info.format);
enc->context->colorspace = info.colorspace == VIDEO_CS_709 ?
AVCOL_SPC_BT709 : AVCOL_SPC_BT470BG;
enc->context->color_range = info.range == VIDEO_RANGE_FULL ?
AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
enc->context->colorspace = info.colorspace == VIDEO_CS_709
? AVCOL_SPC_BT709
: AVCOL_SPC_BT470BG;
enc->context->color_range = info.range == VIDEO_RANGE_FULL
? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
enc->context->max_b_frames = bf;
if (keyint_sec)
enc->context->gop_size = keyint_sec * voi->fps_num /
voi->fps_den;
enc->context->gop_size =
keyint_sec * voi->fps_num / voi->fps_den;
else
enc->context->gop_size = 250;
@@ -226,12 +221,9 @@ static bool nvenc_update(void *data, obs_data_t *settings)
"\t2-pass: %s\n"
"\tb-frames: %d\n"
"\tGPU: %d\n",
rc, bitrate, cqp, enc->context->gop_size,
preset, profile,
rc, bitrate, cqp, enc->context->gop_size, preset, profile,
enc->context->width, enc->context->height,
twopass ? "true" : "false",
enc->context->max_b_frames,
gpu);
twopass ? "true" : "false", enc->context->max_b_frames, gpu);
return nvenc_init_codec(enc);
}
@@ -250,7 +242,7 @@ static void nvenc_destroy(void *data)
break;
#else
if (avcodec_encode_video2(enc->context, &pkt, NULL,
&r_pkt) < 0)
&r_pkt) < 0)
break;
#endif
@@ -306,33 +298,33 @@ fail:
}
static inline void copy_data(AVFrame *pic, const struct encoder_frame *frame,
int height, enum AVPixelFormat format)
int height, enum AVPixelFormat format)
{
int h_chroma_shift, v_chroma_shift;
av_pix_fmt_get_chroma_sub_sample(format, &h_chroma_shift, &v_chroma_shift);
av_pix_fmt_get_chroma_sub_sample(format, &h_chroma_shift,
&v_chroma_shift);
for (int plane = 0; plane < MAX_AV_PLANES; plane++) {
if (!frame->data[plane])
continue;
int frame_rowsize = (int)frame->linesize[plane];
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ?
frame_rowsize : pic_rowsize;
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ? frame_rowsize
: pic_rowsize;
int plane_height = height >> (plane ? v_chroma_shift : 0);
for (int y = 0; y < plane_height; y++) {
int pos_frame = y * frame_rowsize;
int pos_pic = y * pic_rowsize;
int pos_pic = y * pic_rowsize;
memcpy(pic->data[plane] + pos_pic,
frame->data[plane] + pos_frame,
bytes);
frame->data[plane] + pos_frame, bytes);
}
}
}
static bool nvenc_encode(void *data, struct encoder_frame *frame,
struct encoder_packet *packet, bool *received_packet)
struct encoder_packet *packet, bool *received_packet)
{
struct nvenc_encoder *enc = data;
AVPacket av_pkt = {0};
@@ -355,7 +347,7 @@ static bool nvenc_encode(void *data, struct encoder_frame *frame,
ret = 0;
#else
ret = avcodec_encode_video2(enc->context, &av_pkt, enc->vframe,
&got_packet);
&got_packet);
#endif
if (ret < 0) {
warn("nvenc_encode: Error encoding: %s", av_err2str(ret));
@@ -369,9 +361,9 @@ static bool nvenc_encode(void *data, struct encoder_frame *frame,
enc->first_packet = false;
obs_extract_avc_headers(av_pkt.data, av_pkt.size,
&new_packet, &size,
&enc->header, &enc->header_size,
&enc->sei, &enc->sei_size);
&new_packet, &size,
&enc->header, &enc->header_size,
&enc->sei, &enc->sei_size);
da_copy_array(enc->buffer, new_packet, size);
bfree(new_packet);
@@ -409,7 +401,7 @@ void nvenc_defaults(obs_data_t *settings)
}
static bool rate_control_modified(obs_properties_t *ppts, obs_property_t *p,
obs_data_t *settings)
obs_data_t *settings)
{
const char *rc = obs_data_get_string(settings, "rate_control");
bool cqp = astrcmpi(rc, "CQP") == 0;
@@ -441,35 +433,39 @@ obs_properties_t *nvenc_properties_internal(bool ffmpeg)
obs_property_t *p;
p = obs_properties_add_list(props, "rate_control",
obs_module_text("RateControl"),
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
obs_module_text("RateControl"),
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
obs_property_list_add_string(p, "CBR", "CBR");
obs_property_list_add_string(p, "CQP", "CQP");
obs_property_list_add_string(p, "VBR", "VBR");
obs_property_list_add_string(p, obs_module_text("Lossless"),
"lossless");
"lossless");
obs_property_set_modified_callback(p, rate_control_modified);
p = obs_properties_add_int(props, "bitrate",
obs_module_text("Bitrate"), 50, 300000, 50);
p = obs_properties_add_int(props, "bitrate", obs_module_text("Bitrate"),
50, 300000, 50);
obs_property_int_set_suffix(p, " Kbps");
p = obs_properties_add_int(props, "max_bitrate",
obs_module_text("MaxBitrate"), 50, 300000, 50);
obs_module_text("MaxBitrate"), 50, 300000,
50);
obs_property_int_set_suffix(p, " Kbps");
obs_properties_add_int(props, "cqp", obs_module_text("NVENC.CQLevel"),
1, 30, 1);
1, 30, 1);
obs_properties_add_int(props, "keyint_sec",
obs_module_text("KeyframeIntervalSec"), 0, 10, 1);
obs_module_text("KeyframeIntervalSec"), 0, 10,
1);
p = obs_properties_add_list(props, "preset", obs_module_text("Preset"),
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
#define add_preset(val) \
#define add_preset(val) \
obs_property_list_add_string(p, obs_module_text("NVENC.Preset." val), \
val)
val)
add_preset("mq");
add_preset("hq");
add_preset("default");
@@ -479,11 +475,12 @@ obs_properties_t *nvenc_properties_internal(bool ffmpeg)
add_preset("llhp");
#undef add_preset
p = obs_properties_add_list(props, "profile", obs_module_text("Profile"),
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
p = obs_properties_add_list(props, "profile",
obs_module_text("Profile"),
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
#define add_profile(val) \
obs_property_list_add_string(p, val, val)
#define add_profile(val) obs_property_list_add_string(p, val, val)
add_profile("high");
add_profile("main");
add_profile("baseline");
@@ -491,20 +488,21 @@ obs_properties_t *nvenc_properties_internal(bool ffmpeg)
if (!ffmpeg) {
p = obs_properties_add_bool(props, "lookahead",
obs_module_text("NVENC.LookAhead"));
obs_property_set_long_description(p,
obs_module_text("NVENC.LookAhead.ToolTip"));
obs_module_text("NVENC.LookAhead"));
obs_property_set_long_description(
p, obs_module_text("NVENC.LookAhead.ToolTip"));
p = obs_properties_add_bool(props, "psycho_aq",
obs_module_text("NVENC.PsychoVisualTuning"));
obs_property_set_long_description(p,
obs_module_text("NVENC.PsychoVisualTuning.ToolTip"));
p = obs_properties_add_bool(
props, "psycho_aq",
obs_module_text("NVENC.PsychoVisualTuning"));
obs_property_set_long_description(
p, obs_module_text("NVENC.PsychoVisualTuning.ToolTip"));
}
obs_properties_add_int(props, "gpu", obs_module_text("GPU"), 0, 8, 1);
obs_properties_add_int(props, "bf", obs_module_text("BFrames"),
0, 4, 1);
obs_properties_add_int(props, "bf", obs_module_text("BFrames"), 0, 4,
1);
return props;
}
@@ -526,7 +524,7 @@ static bool nvenc_extra_data(void *data, uint8_t **extra_data, size_t *size)
struct nvenc_encoder *enc = data;
*extra_data = enc->header;
*size = enc->header_size;
*size = enc->header_size;
return true;
}
@@ -535,21 +533,21 @@ static bool nvenc_sei_data(void *data, uint8_t **extra_data, size_t *size)
struct nvenc_encoder *enc = data;
*extra_data = enc->sei;
*size = enc->sei_size;
*size = enc->sei_size;
return true;
}
struct obs_encoder_info nvenc_encoder_info = {
.id = "ffmpeg_nvenc",
.type = OBS_ENCODER_VIDEO,
.codec = "h264",
.get_name = nvenc_getname,
.create = nvenc_create,
.destroy = nvenc_destroy,
.encode = nvenc_encode,
.get_defaults = nvenc_defaults,
.id = "ffmpeg_nvenc",
.type = OBS_ENCODER_VIDEO,
.codec = "h264",
.get_name = nvenc_getname,
.create = nvenc_create,
.destroy = nvenc_destroy,
.encode = nvenc_encode,
.get_defaults = nvenc_defaults,
.get_properties = nvenc_properties_ffmpeg,
.get_extra_data = nvenc_extra_data,
.get_sei_data = nvenc_sei_data,
.get_video_info = nvenc_video_info
.get_sei_data = nvenc_sei_data,
.get_video_info = nvenc_video_info,
};

View File

@@ -32,92 +32,92 @@
#include "obs-ffmpeg-compat.h"
struct ffmpeg_cfg {
const char *url;
const char *format_name;
const char *format_mime_type;
const char *muxer_settings;
int gop_size;
int video_bitrate;
int audio_bitrate;
const char *video_encoder;
int video_encoder_id;
const char *audio_encoder;
int audio_encoder_id;
const char *video_settings;
const char *audio_settings;
int audio_mix_count;
int audio_tracks;
const char *url;
const char *format_name;
const char *format_mime_type;
const char *muxer_settings;
int gop_size;
int video_bitrate;
int audio_bitrate;
const char *video_encoder;
int video_encoder_id;
const char *audio_encoder;
int audio_encoder_id;
const char *video_settings;
const char *audio_settings;
int audio_mix_count;
int audio_tracks;
enum AVPixelFormat format;
enum AVColorRange color_range;
enum AVColorSpace color_space;
int scale_width;
int scale_height;
int width;
int height;
enum AVColorRange color_range;
enum AVColorSpace color_space;
int scale_width;
int scale_height;
int width;
int height;
};
struct ffmpeg_data {
AVStream *video;
AVStream **audio_streams;
AVCodec *acodec;
AVCodec *vcodec;
AVFormatContext *output;
struct SwsContext *swscale;
AVStream *video;
AVStream **audio_streams;
AVCodec *acodec;
AVCodec *vcodec;
AVFormatContext *output;
struct SwsContext *swscale;
int64_t total_frames;
AVFrame *vframe;
int frame_size;
int64_t total_frames;
AVFrame *vframe;
int frame_size;
uint64_t start_timestamp;
uint64_t start_timestamp;
int64_t total_samples[MAX_AUDIO_MIXES];
uint32_t audio_samplerate;
enum audio_format audio_format;
size_t audio_planes;
size_t audio_size;
int num_audio_streams;
int64_t total_samples[MAX_AUDIO_MIXES];
uint32_t audio_samplerate;
enum audio_format audio_format;
size_t audio_planes;
size_t audio_size;
int num_audio_streams;
/* audio_tracks is a bitmask storing the indices of the mixes */
int audio_tracks;
struct circlebuf excess_frames[MAX_AUDIO_MIXES][MAX_AV_PLANES];
uint8_t *samples[MAX_AUDIO_MIXES][MAX_AV_PLANES];
AVFrame *aframe[MAX_AUDIO_MIXES];
int audio_tracks;
struct circlebuf excess_frames[MAX_AUDIO_MIXES][MAX_AV_PLANES];
uint8_t *samples[MAX_AUDIO_MIXES][MAX_AV_PLANES];
AVFrame *aframe[MAX_AUDIO_MIXES];
struct ffmpeg_cfg config;
struct ffmpeg_cfg config;
bool initialized;
bool initialized;
char *last_error;
char *last_error;
};
struct ffmpeg_output {
obs_output_t *output;
volatile bool active;
obs_output_t *output;
volatile bool active;
struct ffmpeg_data ff_data;
bool connecting;
pthread_t start_thread;
bool connecting;
pthread_t start_thread;
uint64_t total_bytes;
uint64_t total_bytes;
uint64_t audio_start_ts;
uint64_t video_start_ts;
uint64_t stop_ts;
volatile bool stopping;
uint64_t audio_start_ts;
uint64_t video_start_ts;
uint64_t stop_ts;
volatile bool stopping;
bool write_thread_active;
pthread_mutex_t write_mutex;
pthread_t write_thread;
os_sem_t *write_sem;
os_event_t *stop_event;
bool write_thread_active;
pthread_mutex_t write_mutex;
pthread_t write_thread;
os_sem_t *write_sem;
os_event_t *stop_event;
DARRAY(AVPacket) packets;
DARRAY(AVPacket) packets;
};
/* ------------------------------------------------------------------------- */
static void ffmpeg_output_set_last_error(struct ffmpeg_data *data,
const char *error)
const char *error)
{
if (data->last_error)
bfree(data->last_error);
@@ -126,7 +126,7 @@ static void ffmpeg_output_set_last_error(struct ffmpeg_data *data,
}
void ffmpeg_log_error(int log_level, struct ffmpeg_data *data,
const char *format, ...)
const char *format, ...)
{
va_list args;
char out[4096];
@@ -141,26 +141,27 @@ void ffmpeg_log_error(int log_level, struct ffmpeg_data *data,
}
static bool new_stream(struct ffmpeg_data *data, AVStream **stream,
AVCodec **codec, enum AVCodecID id, const char *name)
AVCodec **codec, enum AVCodecID id, const char *name)
{
*codec = (!!name && *name) ?
avcodec_find_encoder_by_name(name) :
avcodec_find_encoder(id);
*codec = (!!name && *name) ? avcodec_find_encoder_by_name(name)
: avcodec_find_encoder(id);
if (!*codec) {
ffmpeg_log_error(LOG_WARNING, data, "Couldn't find encoder '%s'",
avcodec_get_name(id));
ffmpeg_log_error(LOG_WARNING, data,
"Couldn't find encoder '%s'",
avcodec_get_name(id));
return false;
}
*stream = avformat_new_stream(data->output, *codec);
if (!*stream) {
ffmpeg_log_error(LOG_WARNING, data, "Couldn't create stream for encoder '%s'",
avcodec_get_name(id));
ffmpeg_log_error(LOG_WARNING, data,
"Couldn't create stream for encoder '%s'",
avcodec_get_name(id));
return false;
}
(*stream)->id = data->output->nb_streams-1;
(*stream)->id = data->output->nb_streams - 1;
return true;
}
@@ -180,10 +181,11 @@ static bool parse_params(AVCodecContext *context, char **opts)
char *value;
*assign = 0;
value = assign+1;
value = assign + 1;
if (av_opt_set(context->priv_data, name, value, 0)) {
blog(LOG_WARNING, "Failed to set %s=%s", name, value);
blog(LOG_WARNING, "Failed to set %s=%s", name,
value);
ret = false;
}
}
@@ -205,34 +207,39 @@ static bool open_video_codec(struct ffmpeg_data *data)
if (opts) {
// libav requires x264 parameters in a special format which may be non-obvious
if (!parse_params(context, opts) && strcmp(data->vcodec->name, "libx264") == 0)
blog(LOG_WARNING, "If you're trying to set x264 parameters, use x264-params=name=value:name=value");
if (!parse_params(context, opts) &&
strcmp(data->vcodec->name, "libx264") == 0)
blog(LOG_WARNING,
"If you're trying to set x264 parameters, use x264-params=name=value:name=value");
strlist_free(opts);
}
ret = avcodec_open2(context, data->vcodec, NULL);
if (ret < 0) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to open video codec: %s",
av_err2str(ret));
ffmpeg_log_error(LOG_WARNING, data,
"Failed to open video codec: %s",
av_err2str(ret));
return false;
}
data->vframe = av_frame_alloc();
if (!data->vframe) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to allocate video frame");
ffmpeg_log_error(LOG_WARNING, data,
"Failed to allocate video frame");
return false;
}
data->vframe->format = context->pix_fmt;
data->vframe->width = context->width;
data->vframe->width = context->width;
data->vframe->height = context->height;
data->vframe->colorspace = data->config.color_space;
data->vframe->color_range = data->config.color_range;
ret = av_frame_get_buffer(data->vframe, base_get_alignment());
if (ret < 0) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to allocate vframe: %s",
av_err2str(ret));
ffmpeg_log_error(LOG_WARNING, data,
"Failed to allocate vframe: %s",
av_err2str(ret));
return false;
}
@@ -242,14 +249,13 @@ static bool open_video_codec(struct ffmpeg_data *data)
static bool init_swscale(struct ffmpeg_data *data, AVCodecContext *context)
{
data->swscale = sws_getContext(
data->config.width, data->config.height,
data->config.format,
data->config.scale_width, data->config.scale_height,
context->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
data->config.width, data->config.height, data->config.format,
data->config.scale_width, data->config.scale_height,
context->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
if (!data->swscale) {
ffmpeg_log_error(LOG_WARNING, data, "Could not initialize swscale");
ffmpeg_log_error(LOG_WARNING, data,
"Could not initialize swscale");
return false;
}
@@ -268,23 +274,23 @@ static bool create_video_stream(struct ffmpeg_data *data)
}
if (!new_stream(data, &data->video, &data->vcodec,
data->output->oformat->video_codec,
data->config.video_encoder))
data->output->oformat->video_codec,
data->config.video_encoder))
return false;
closest_format = get_closest_format(data->config.format,
data->vcodec->pix_fmts);
closest_format =
get_closest_format(data->config.format, data->vcodec->pix_fmts);
context = data->video->codec;
context->bit_rate = data->config.video_bitrate * 1000;
context->width = data->config.scale_width;
context->height = data->config.scale_height;
context->time_base = (AVRational){ ovi.fps_den, ovi.fps_num };
context->gop_size = data->config.gop_size;
context->pix_fmt = closest_format;
context->colorspace = data->config.color_space;
context->color_range = data->config.color_range;
context->thread_count = 0;
context = data->video->codec;
context->bit_rate = data->config.video_bitrate * 1000;
context->width = data->config.scale_width;
context->height = data->config.scale_height;
context->time_base = (AVRational){ovi.fps_den, ovi.fps_num};
context->gop_size = data->config.gop_size;
context->pix_fmt = closest_format;
context->colorspace = data->config.color_space;
context->color_range = data->config.color_range;
context->thread_count = 0;
data->video->time_base = context->time_base;
@@ -294,8 +300,8 @@ static bool create_video_stream(struct ffmpeg_data *data)
if (!open_video_codec(data))
return false;
if (context->pix_fmt != data->config.format ||
data->config.width != data->config.scale_width ||
if (context->pix_fmt != data->config.format ||
data->config.width != data->config.scale_width ||
data->config.height != data->config.scale_height) {
if (!init_swscale(data, context))
@@ -318,7 +324,8 @@ static bool open_audio_codec(struct ffmpeg_data *data, int idx)
data->aframe[idx] = av_frame_alloc();
if (!data->aframe[idx]) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to allocate audio frame");
ffmpeg_log_error(LOG_WARNING, data,
"Failed to allocate audio frame");
return false;
}
@@ -331,18 +338,20 @@ static bool open_audio_codec(struct ffmpeg_data *data, int idx)
ret = avcodec_open2(context, data->acodec, NULL);
if (ret < 0) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to open audio codec: %s",
av_err2str(ret));
ffmpeg_log_error(LOG_WARNING, data,
"Failed to open audio codec: %s",
av_err2str(ret));
return false;
}
data->frame_size = context->frame_size ? context->frame_size : 1024;
ret = av_samples_alloc(data->samples[idx], NULL, context->channels,
data->frame_size, context->sample_fmt, 0);
data->frame_size, context->sample_fmt, 0);
if (ret < 0) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to create audio buffer: %s",
av_err2str(ret));
ffmpeg_log_error(LOG_WARNING, data,
"Failed to create audio buffer: %s",
av_err2str(ret));
return false;
}
@@ -361,25 +370,26 @@ static bool create_audio_stream(struct ffmpeg_data *data, int idx)
}
if (!new_stream(data, &stream, &data->acodec,
data->output->oformat->audio_codec,
data->config.audio_encoder))
data->output->oformat->audio_codec,
data->config.audio_encoder))
return false;
data->audio_streams[idx] = stream;
context = data->audio_streams[idx]->codec;
context->bit_rate = data->config.audio_bitrate * 1000;
context->time_base = (AVRational){ 1, aoi.samples_per_sec };
context->channels = get_audio_channels(aoi.speakers);
context->sample_rate = aoi.samples_per_sec;
context->channel_layout =
av_get_default_channel_layout(context->channels);
context = data->audio_streams[idx]->codec;
context->bit_rate = data->config.audio_bitrate * 1000;
context->time_base = (AVRational){1, aoi.samples_per_sec};
context->channels = get_audio_channels(aoi.speakers);
context->sample_rate = aoi.samples_per_sec;
context->channel_layout =
av_get_default_channel_layout(context->channels);
//AVlib default channel layout for 5 channels is 5.0 ; fix for 4.1
if (aoi.speakers == SPEAKERS_4POINT1)
context->channel_layout = av_get_channel_layout("4.1");
context->sample_fmt = data->acodec->sample_fmts ?
data->acodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
context->sample_fmt = data->acodec->sample_fmts
? data->acodec->sample_fmts[0]
: AV_SAMPLE_FMT_FLTP;
data->audio_streams[idx]->time_base = context->time_base;
@@ -402,9 +412,10 @@ static inline bool init_streams(struct ffmpeg_data *data)
if (!create_video_stream(data))
return false;
if (format->audio_codec != AV_CODEC_ID_NONE && data->num_audio_streams) {
data->audio_streams = calloc(1,
data->num_audio_streams * sizeof(void*));
if (format->audio_codec != AV_CODEC_ID_NONE &&
data->num_audio_streams) {
data->audio_streams =
calloc(1, data->num_audio_streams * sizeof(void *));
for (int i = 0; i < data->num_audio_streams; i++) {
if (!create_audio_stream(data, i))
return false;
@@ -420,10 +431,11 @@ static inline bool open_output_file(struct ffmpeg_data *data)
int ret;
AVDictionary *dict = NULL;
if ((ret = av_dict_parse_string(&dict, data->config.muxer_settings,
"=", " ", 0))) {
ffmpeg_log_error(LOG_WARNING, data, "Failed to parse muxer settings: %s\n%s",
av_err2str(ret), data->config.muxer_settings);
if ((ret = av_dict_parse_string(&dict, data->config.muxer_settings, "=",
" ", 0))) {
ffmpeg_log_error(LOG_WARNING, data,
"Failed to parse muxer settings: %s\n%s",
av_err2str(ret), data->config.muxer_settings);
av_dict_free(&dict);
return false;
@@ -434,7 +446,7 @@ static inline bool open_output_file(struct ffmpeg_data *data)
AVDictionaryEntry *entry = NULL;
while ((entry = av_dict_get(dict, "", entry,
AV_DICT_IGNORE_SUFFIX)))
AV_DICT_IGNORE_SUFFIX)))
dstr_catf(&str, "\n\t%s=%s", entry->key, entry->value);
blog(LOG_INFO, "Using muxer settings: %s", str.array);
@@ -443,24 +455,24 @@ static inline bool open_output_file(struct ffmpeg_data *data)
if ((format->flags & AVFMT_NOFILE) == 0) {
ret = avio_open2(&data->output->pb, data->config.url,
AVIO_FLAG_WRITE, NULL, &dict);
AVIO_FLAG_WRITE, NULL, &dict);
if (ret < 0) {
ffmpeg_log_error(LOG_WARNING, data,
"Couldn't open '%s', %s", data->config.url,
av_err2str(ret));
"Couldn't open '%s', %s",
data->config.url, av_err2str(ret));
av_dict_free(&dict);
return false;
}
}
strncpy(data->output->filename, data->config.url,
sizeof(data->output->filename));
sizeof(data->output->filename));
data->output->filename[sizeof(data->output->filename) - 1] = 0;
ret = avformat_write_header(data->output, &dict);
if (ret < 0) {
ffmpeg_log_error(LOG_WARNING, data, "Error opening '%s': %s",
data->config.url, av_err2str(ret));
data->config.url, av_err2str(ret));
return false;
}
@@ -469,7 +481,7 @@ static inline bool open_output_file(struct ffmpeg_data *data)
AVDictionaryEntry *entry = NULL;
while ((entry = av_dict_get(dict, "", entry,
AV_DICT_IGNORE_SUFFIX)))
AV_DICT_IGNORE_SUFFIX)))
dstr_catf(&str, "\n\t%s=%s", entry->key, entry->value);
blog(LOG_INFO, "Invalid muxer settings: %s", str.array);
@@ -564,16 +576,14 @@ static enum AVCodecID get_codec_id(const char *name, int id)
static void set_encoder_ids(struct ffmpeg_data *data)
{
data->output->oformat->video_codec = get_codec_id(
data->config.video_encoder,
data->config.video_encoder_id);
data->config.video_encoder, data->config.video_encoder_id);
data->output->oformat->audio_codec = get_codec_id(
data->config.audio_encoder,
data->config.audio_encoder_id);
data->config.audio_encoder, data->config.audio_encoder_id);
}
static bool ffmpeg_data_init(struct ffmpeg_data *data,
struct ffmpeg_cfg *config)
struct ffmpeg_cfg *config)
{
bool is_rtmp = false;
@@ -590,29 +600,28 @@ static bool ffmpeg_data_init(struct ffmpeg_data *data,
is_rtmp = (astrcmpi_n(config->url, "rtmp://", 7) == 0);
AVOutputFormat *output_format = av_guess_format(
is_rtmp ? "flv" : data->config.format_name,
data->config.url,
is_rtmp ? NULL : data->config.format_mime_type);
is_rtmp ? "flv" : data->config.format_name, data->config.url,
is_rtmp ? NULL : data->config.format_mime_type);
if (output_format == NULL) {
ffmpeg_log_error(LOG_WARNING, data,
ffmpeg_log_error(
LOG_WARNING, data,
"Couldn't find matching output format with "
"parameters: name=%s, url=%s, mime=%s",
safe_str(is_rtmp ?
"flv" : data->config.format_name),
safe_str(is_rtmp ? "flv" : data->config.format_name),
safe_str(data->config.url),
safe_str(is_rtmp ?
NULL : data->config.format_mime_type));
safe_str(is_rtmp ? NULL
: data->config.format_mime_type));
goto fail;
}
avformat_alloc_output_context2(&data->output, output_format,
NULL, NULL);
avformat_alloc_output_context2(&data->output, output_format, NULL,
NULL);
if (!data->output) {
ffmpeg_log_error(LOG_WARNING, data,
"Couldn't create avformat context");
"Couldn't create avformat context");
goto fail;
}
@@ -653,7 +662,7 @@ static const char *ffmpeg_output_getname(void *unused)
}
static void ffmpeg_log_callback(void *param, int level, const char *format,
va_list args)
va_list args)
{
if (level <= AV_LOG_INFO)
blogva(LOG_DEBUG, format, args);
@@ -707,27 +716,27 @@ static void ffmpeg_output_destroy(void *data)
}
static inline void copy_data(AVFrame *pic, const struct video_data *frame,
int height, enum AVPixelFormat format)
int height, enum AVPixelFormat format)
{
int h_chroma_shift, v_chroma_shift;
av_pix_fmt_get_chroma_sub_sample(format, &h_chroma_shift, &v_chroma_shift);
av_pix_fmt_get_chroma_sub_sample(format, &h_chroma_shift,
&v_chroma_shift);
for (int plane = 0; plane < MAX_AV_PLANES; plane++) {
if (!frame->data[plane])
continue;
int frame_rowsize = (int)frame->linesize[plane];
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ?
frame_rowsize : pic_rowsize;
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ? frame_rowsize
: pic_rowsize;
int plane_height = height >> (plane ? v_chroma_shift : 0);
for (int y = 0; y < plane_height; y++) {
int pos_frame = y * frame_rowsize;
int pos_pic = y * pic_rowsize;
int pos_pic = y * pic_rowsize;
memcpy(pic->data[plane] + pos_pic,
frame->data[plane] + pos_frame,
bytes);
frame->data[plane] + pos_frame, bytes);
}
}
}
@@ -735,7 +744,7 @@ static inline void copy_data(AVFrame *pic, const struct video_data *frame,
static void receive_video(void *param, struct video_data *frame)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
struct ffmpeg_data *data = &output->ff_data;
// codec doesn't support video or none configured
if (!data->video)
@@ -754,17 +763,17 @@ static void receive_video(void *param, struct video_data *frame)
if (!!data->swscale)
sws_scale(data->swscale, (const uint8_t *const *)frame->data,
(const int*)frame->linesize,
0, data->config.height, data->vframe->data,
data->vframe->linesize);
(const int *)frame->linesize, 0, data->config.height,
data->vframe->data, data->vframe->linesize);
else
copy_data(data->vframe, frame, context->height, context->pix_fmt);
copy_data(data->vframe, frame, context->height,
context->pix_fmt);
#if LIBAVFORMAT_VERSION_MAJOR < 58
if (data->output->flags & AVFMT_RAWPICTURE) {
packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index = data->video->index;
packet.data = data->vframe->data[0];
packet.size = sizeof(AVPicture);
packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index = data->video->index;
packet.data = data->vframe->data[0];
packet.size = sizeof(AVPicture);
pthread_mutex_lock(&output->write_mutex);
da_push_back(output->packets, &packet);
@@ -784,24 +793,26 @@ static void receive_video(void *param, struct video_data *frame)
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
ret = 0;
#else
ret = avcodec_encode_video2(context, &packet, data->vframe,
&got_packet);
ret = avcodec_encode_video2(context, &packet, data->vframe,
&got_packet);
#endif
if (ret < 0) {
blog(LOG_WARNING, "receive_video: Error encoding "
"video: %s", av_err2str(ret));
blog(LOG_WARNING,
"receive_video: Error encoding "
"video: %s",
av_err2str(ret));
//FIXME: stop the encode with an error
return;
}
if (!ret && got_packet && packet.size) {
packet.pts = rescale_ts(packet.pts, context,
data->video->time_base);
data->video->time_base);
packet.dts = rescale_ts(packet.dts, context,
data->video->time_base);
packet.duration = (int)av_rescale_q(packet.duration,
context->time_base,
data->video->time_base);
data->video->time_base);
packet.duration = (int)av_rescale_q(
packet.duration, context->time_base,
data->video->time_base);
pthread_mutex_lock(&output->write_mutex);
da_push_back(output->packets, &packet);
@@ -815,7 +826,7 @@ static void receive_video(void *param, struct video_data *frame)
#endif
if (ret != 0) {
blog(LOG_WARNING, "receive_video: Error writing video: %s",
av_err2str(ret));
av_err2str(ret));
//FIXME: stop the encode with an error
}
@@ -823,7 +834,7 @@ static void receive_video(void *param, struct video_data *frame)
}
static void encode_audio(struct ffmpeg_output *output, int idx,
struct AVCodecContext *context, size_t block_size)
struct AVCodecContext *context, size_t block_size)
{
struct ffmpeg_data *data = &output->ff_data;
@@ -832,16 +843,19 @@ static void encode_audio(struct ffmpeg_output *output, int idx,
size_t total_size = data->frame_size * block_size * context->channels;
data->aframe[idx]->nb_samples = data->frame_size;
data->aframe[idx]->pts = av_rescale_q(data->total_samples[idx],
(AVRational){1, context->sample_rate},
context->time_base);
data->aframe[idx]->pts = av_rescale_q(
data->total_samples[idx], (AVRational){1, context->sample_rate},
context->time_base);
ret = avcodec_fill_audio_frame(data->aframe[idx], context->channels,
context->sample_fmt, data->samples[idx][0],
(int)total_size, 1);
context->sample_fmt,
data->samples[idx][0], (int)total_size,
1);
if (ret < 0) {
blog(LOG_WARNING, "encode_audio: avcodec_fill_audio_frame "
"failed: %s", av_err2str(ret));
blog(LOG_WARNING,
"encode_audio: avcodec_fill_audio_frame "
"failed: %s",
av_err2str(ret));
//FIXME: stop the encode with an error
return;
}
@@ -859,11 +873,11 @@ static void encode_audio(struct ffmpeg_output *output, int idx,
ret = 0;
#else
ret = avcodec_encode_audio2(context, &packet, data->aframe[idx],
&got_packet);
&got_packet);
#endif
if (ret < 0) {
blog(LOG_WARNING, "encode_audio: Error encoding audio: %s",
av_err2str(ret));
av_err2str(ret));
//FIXME: stop the encode with an error
return;
}
@@ -872,11 +886,12 @@ static void encode_audio(struct ffmpeg_output *output, int idx,
return;
packet.pts = rescale_ts(packet.pts, context,
data->audio_streams[idx]->time_base);
data->audio_streams[idx]->time_base);
packet.dts = rescale_ts(packet.dts, context,
data->audio_streams[idx]->time_base);
packet.duration = (int)av_rescale_q(packet.duration, context->time_base,
data->audio_streams[idx]->time_base);
data->audio_streams[idx]->time_base);
packet.duration =
(int)av_rescale_q(packet.duration, context->time_base,
data->audio_streams[idx]->time_base);
packet.stream_index = data->audio_streams[idx]->index;
pthread_mutex_lock(&output->write_mutex);
@@ -886,13 +901,14 @@ static void encode_audio(struct ffmpeg_output *output, int idx,
}
static bool prepare_audio(struct ffmpeg_data *data,
const struct audio_data *frame, struct audio_data *output)
const struct audio_data *frame,
struct audio_data *output)
{
*output = *frame;
if (frame->timestamp < data->start_timestamp) {
uint64_t duration = (uint64_t)frame->frames * 1000000000 /
(uint64_t)data->audio_samplerate;
(uint64_t)data->audio_samplerate;
uint64_t end_ts = (frame->timestamp + duration);
uint64_t cutoff;
@@ -902,8 +918,7 @@ static bool prepare_audio(struct ffmpeg_data *data,
cutoff = data->start_timestamp - frame->timestamp;
output->timestamp += cutoff;
cutoff = cutoff * (uint64_t)data->audio_samplerate /
1000000000;
cutoff = cutoff * (uint64_t)data->audio_samplerate / 1000000000;
for (size_t i = 0; i < data->audio_planes; i++)
output->data[i] += data->audio_size * (uint32_t)cutoff;
@@ -928,7 +943,7 @@ static int get_track_order(int track_config, size_t mix_index)
static void receive_audio(void *param, size_t mix_idx, struct audio_data *frame)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
struct ffmpeg_data *data = &output->ff_data;
size_t frame_size_bytes;
struct audio_data in;
int track_order;
@@ -958,20 +973,21 @@ static void receive_audio(void *param, size_t mix_idx, struct audio_data *frame)
for (size_t i = 0; i < data->audio_planes; i++)
circlebuf_push_back(&data->excess_frames[track_order][i],
in.data[i], in.frames * data->audio_size);
in.data[i], in.frames * data->audio_size);
while (data->excess_frames[track_order][0].size >= frame_size_bytes) {
for (size_t i = 0; i < data->audio_planes; i++)
circlebuf_pop_front(&data->excess_frames[track_order][i],
data->samples[track_order][i],
frame_size_bytes);
circlebuf_pop_front(
&data->excess_frames[track_order][i],
data->samples[track_order][i],
frame_size_bytes);
encode_audio(output, track_order, context, data->audio_size);
}
}
static uint64_t get_packet_sys_dts(struct ffmpeg_output *output,
AVPacket *packet)
AVPacket *packet)
{
struct ffmpeg_data *data = &output->ff_data;
uint64_t start_ts;
@@ -986,8 +1002,8 @@ static uint64_t get_packet_sys_dts(struct ffmpeg_output *output,
start_ts = output->audio_start_ts;
}
return start_ts + (uint64_t)av_rescale_q(packet->dts,
time_base, (AVRational){1, 1000000000});
return start_ts + (uint64_t)av_rescale_q(packet->dts, time_base,
(AVRational){1, 1000000000});
}
static int process_packet(struct ffmpeg_output *output)
@@ -1026,8 +1042,8 @@ static int process_packet(struct ffmpeg_output *output)
if (ret < 0) {
av_free_packet(&packet);
ffmpeg_log_error(LOG_WARNING, &output->ff_data,
"receive_audio: Error writing packet: %s",
av_err2str(ret));
"receive_audio: Error writing packet: %s",
av_err2str(ret));
return ret;
}
@@ -1064,7 +1080,7 @@ static void *write_thread(void *data)
}
static inline const char *get_string_or_null(obs_data_t *settings,
const char *name)
const char *name)
{
const char *value = obs_data_get_string(settings, name);
if (!value || !strlen(value))
@@ -1099,34 +1115,36 @@ static bool try_connect(struct ffmpeg_output *output)
config.url = obs_data_get_string(settings, "url");
config.format_name = get_string_or_null(settings, "format_name");
config.format_mime_type = get_string_or_null(settings,
"format_mime_type");
config.format_mime_type =
get_string_or_null(settings, "format_mime_type");
config.muxer_settings = obs_data_get_string(settings, "muxer_settings");
config.video_bitrate = (int)obs_data_get_int(settings, "video_bitrate");
config.audio_bitrate = (int)obs_data_get_int(settings, "audio_bitrate");
config.gop_size = (int)obs_data_get_int(settings, "gop_size");
config.video_encoder = get_string_or_null(settings, "video_encoder");
config.video_encoder_id = (int)obs_data_get_int(settings,
"video_encoder_id");
config.video_encoder_id =
(int)obs_data_get_int(settings, "video_encoder_id");
config.audio_encoder = get_string_or_null(settings, "audio_encoder");
config.audio_encoder_id = (int)obs_data_get_int(settings,
"audio_encoder_id");
config.audio_encoder_id =
(int)obs_data_get_int(settings, "audio_encoder_id");
config.video_settings = obs_data_get_string(settings, "video_settings");
config.audio_settings = obs_data_get_string(settings, "audio_settings");
config.scale_width = (int)obs_data_get_int(settings, "scale_width");
config.scale_height = (int)obs_data_get_int(settings, "scale_height");
config.width = (int)obs_output_get_width(output->output);
config.width = (int)obs_output_get_width(output->output);
config.height = (int)obs_output_get_height(output->output);
config.format = obs_to_ffmpeg_video_format(
video_output_get_format(video));
config.format =
obs_to_ffmpeg_video_format(video_output_get_format(video));
config.audio_tracks = (int)obs_output_get_mixers(output->output);
config.audio_mix_count = get_audio_mix_count(config.audio_tracks);
if (format_is_yuv(voi->format)) {
config.color_range = voi->range == VIDEO_RANGE_FULL ?
AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
config.color_space = voi->colorspace == VIDEO_CS_709 ?
AVCOL_SPC_BT709 : AVCOL_SPC_BT470BG;
config.color_range = voi->range == VIDEO_RANGE_FULL
? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
config.color_space = voi->colorspace == VIDEO_CS_709
? AVCOL_SPC_BT709
: AVCOL_SPC_BT470BG;
} else {
config.color_range = AVCOL_RANGE_UNSPECIFIED;
config.color_space = AVCOL_SPC_RGB;
@@ -1148,15 +1166,14 @@ static bool try_connect(struct ffmpeg_output *output)
if (!success) {
if (output->ff_data.last_error) {
obs_output_set_last_error(output->output,
output->ff_data.last_error);
output->ff_data.last_error);
}
ffmpeg_data_free(&output->ff_data);
return false;
}
struct audio_convert_info aci = {
.format = output->ff_data.audio_format
};
struct audio_convert_info aci = {.format =
output->ff_data.audio_format};
output->active = true;
@@ -1166,8 +1183,8 @@ static bool try_connect(struct ffmpeg_output *output)
ret = pthread_create(&output->write_thread, NULL, write_thread, output);
if (ret != 0) {
ffmpeg_log_error(LOG_WARNING, &output->ff_data,
"ffmpeg_output_start: failed to create write "
"thread.");
"ffmpeg_output_start: failed to create write "
"thread.");
ffmpeg_output_full_stop(output);
return false;
}
@@ -1185,7 +1202,7 @@ static void *start_thread(void *data)
if (!try_connect(output))
obs_output_signal_stop(output->output,
OBS_OUTPUT_CONNECT_FAILED);
OBS_OUTPUT_CONNECT_FAILED);
output->connecting = false;
return NULL;
@@ -1244,7 +1261,7 @@ static void ffmpeg_deactivate(struct ffmpeg_output *output)
pthread_mutex_lock(&output->write_mutex);
for (size_t i = 0; i < output->packets.num; i++)
av_free_packet(output->packets.array+i);
av_free_packet(output->packets.array + i);
da_free(output->packets);
pthread_mutex_unlock(&output->write_mutex);
@@ -1259,15 +1276,13 @@ static uint64_t ffmpeg_output_total_bytes(void *data)
}
struct obs_output_info ffmpeg_output = {
.id = "ffmpeg_output",
.flags = OBS_OUTPUT_AUDIO |
OBS_OUTPUT_VIDEO |
OBS_OUTPUT_MULTI_TRACK,
.get_name = ffmpeg_output_getname,
.create = ffmpeg_output_create,
.destroy = ffmpeg_output_destroy,
.start = ffmpeg_output_start,
.stop = ffmpeg_output_stop,
.id = "ffmpeg_output",
.flags = OBS_OUTPUT_AUDIO | OBS_OUTPUT_VIDEO | OBS_OUTPUT_MULTI_TRACK,
.get_name = ffmpeg_output_getname,
.create = ffmpeg_output_create,
.destroy = ffmpeg_output_destroy,
.start = ffmpeg_output_start,
.stop = ffmpeg_output_stop,
.raw_video = receive_video,
.raw_audio2 = receive_audio,
.get_total_bytes = ffmpeg_output_total_bytes,

View File

@@ -25,9 +25,9 @@
#define FF_LOG(level, format, ...) \
blog(level, "[Media Source]: " format, ##__VA_ARGS__)
#define FF_LOG_S(source, level, format, ...) \
#define FF_LOG_S(source, level, format, ...) \
blog(level, "[Media Source '%s']: " format, \
obs_source_get_name(source), ##__VA_ARGS__)
obs_source_get_name(source), ##__VA_ARGS__)
#define FF_BLOG(level, format, ...) \
FF_LOG_S(s->source, level, format, ##__VA_ARGS__)
@@ -60,18 +60,19 @@ struct ffmpeg_source {
};
static bool is_local_file_modified(obs_properties_t *props,
obs_property_t *prop, obs_data_t *settings)
obs_property_t *prop, obs_data_t *settings)
{
UNUSED_PARAMETER(prop);
bool enabled = obs_data_get_bool(settings, "is_local_file");
obs_property_t *input = obs_properties_get(props, "input");
obs_property_t *input_format =obs_properties_get(props,
"input_format");
obs_property_t *input_format =
obs_properties_get(props, "input_format");
obs_property_t *local_file = obs_properties_get(props, "local_file");
obs_property_t *looping = obs_properties_get(props, "looping");
obs_property_t *buffering = obs_properties_get(props, "buffering_mb");
obs_property_t *close = obs_properties_get(props, "close_when_inactive");
obs_property_t *close =
obs_properties_get(props, "close_when_inactive");
obs_property_t *seekable = obs_properties_get(props, "seekable");
obs_property_t *speed = obs_properties_get(props, "speed_percent");
obs_property_set_visible(input, !enabled);
@@ -103,8 +104,7 @@ static const char *media_filter =
" (*.mp4 *.ts *.mov *.flv *.mkv *.avi *.mp3 *.ogg *.aac *.wav *.gif *.webm);;";
static const char *video_filter =
" (*.mp4 *.ts *.mov *.flv *.mkv *.avi *.gif *.webm);;";
static const char *audio_filter =
" (*.mp3 *.aac *.ogg *.wav);;";
static const char *audio_filter = " (*.mp3 *.aac *.ogg *.wav);;";
static obs_properties_t *ffmpeg_source_getproperties(void *data)
{
@@ -120,7 +120,7 @@ static obs_properties_t *ffmpeg_source_getproperties(void *data)
obs_property_t *prop;
// use this when obs allows non-readonly paths
prop = obs_properties_add_bool(props, "is_local_file",
obs_module_text("LocalFile"));
obs_module_text("LocalFile"));
obs_property_set_modified_callback(prop, is_local_file_modified);
@@ -144,53 +144,56 @@ static obs_properties_t *ffmpeg_source_getproperties(void *data)
}
obs_properties_add_path(props, "local_file",
obs_module_text("LocalFile"), OBS_PATH_FILE,
filter.array, path.array);
obs_module_text("LocalFile"), OBS_PATH_FILE,
filter.array, path.array);
dstr_free(&filter);
dstr_free(&path);
prop = obs_properties_add_bool(props, "looping",
obs_module_text("Looping"));
obs_module_text("Looping"));
obs_properties_add_bool(props, "restart_on_activate",
obs_module_text("RestartWhenActivated"));
obs_module_text("RestartWhenActivated"));
obs_properties_add_int_slider(props, "buffering_mb",
obs_module_text("BufferingMB"),
1, 16, 1);
obs_module_text("BufferingMB"), 1, 16, 1);
obs_properties_add_text(props, "input",
obs_module_text("Input"), OBS_TEXT_DEFAULT);
obs_properties_add_text(props, "input", obs_module_text("Input"),
OBS_TEXT_DEFAULT);
obs_properties_add_text(props, "input_format",
obs_module_text("InputFormat"), OBS_TEXT_DEFAULT);
obs_module_text("InputFormat"),
OBS_TEXT_DEFAULT);
#ifndef __APPLE__
obs_properties_add_bool(props, "hw_decode",
obs_module_text("HardwareDecode"));
obs_module_text("HardwareDecode"));
#endif
obs_properties_add_bool(props, "clear_on_media_end",
obs_module_text("ClearOnMediaEnd"));
obs_module_text("ClearOnMediaEnd"));
prop = obs_properties_add_bool(props, "close_when_inactive",
obs_module_text("CloseFileWhenInactive"));
prop = obs_properties_add_bool(
props, "close_when_inactive",
obs_module_text("CloseFileWhenInactive"));
obs_property_set_long_description(prop,
obs_module_text("CloseFileWhenInactive.ToolTip"));
obs_property_set_long_description(
prop, obs_module_text("CloseFileWhenInactive.ToolTip"));
obs_properties_add_int_slider(props, "speed_percent",
obs_module_text("SpeedPercentage"), 1, 200, 1);
obs_module_text("SpeedPercentage"), 1,
200, 1);
prop = obs_properties_add_list(props, "color_range",
obs_module_text("ColorRange"), OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_INT);
obs_module_text("ColorRange"),
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_INT);
obs_property_list_add_int(prop, obs_module_text("ColorRange.Auto"),
VIDEO_RANGE_DEFAULT);
VIDEO_RANGE_DEFAULT);
obs_property_list_add_int(prop, obs_module_text("ColorRange.Partial"),
VIDEO_RANGE_PARTIAL);
VIDEO_RANGE_PARTIAL);
obs_property_list_add_int(prop, obs_module_text("ColorRange.Full"),
VIDEO_RANGE_FULL);
VIDEO_RANGE_FULL);
obs_properties_add_bool(props, "seekable", obs_module_text("Seekable"));
@@ -198,26 +201,24 @@ static obs_properties_t *ffmpeg_source_getproperties(void *data)
}
static void dump_source_info(struct ffmpeg_source *s, const char *input,
const char *input_format)
const char *input_format)
{
FF_BLOG(LOG_INFO,
"settings:\n"
"\tinput: %s\n"
"\tinput_format: %s\n"
"\tspeed: %d\n"
"\tis_looping: %s\n"
"\tis_hw_decoding: %s\n"
"\tis_clear_on_media_end: %s\n"
"\trestart_on_activate: %s\n"
"\tclose_when_inactive: %s",
input ? input : "(null)",
input_format ? input_format : "(null)",
s->speed_percent,
s->is_looping ? "yes" : "no",
s->is_hw_decoding ? "yes" : "no",
s->is_clear_on_media_end ? "yes" : "no",
s->restart_on_activate ? "yes" : "no",
s->close_when_inactive ? "yes" : "no");
"settings:\n"
"\tinput: %s\n"
"\tinput_format: %s\n"
"\tspeed: %d\n"
"\tis_looping: %s\n"
"\tis_hw_decoding: %s\n"
"\tis_clear_on_media_end: %s\n"
"\trestart_on_activate: %s\n"
"\tclose_when_inactive: %s",
input ? input : "(null)",
input_format ? input_format : "(null)", s->speed_percent,
s->is_looping ? "yes" : "no", s->is_hw_decoding ? "yes" : "no",
s->is_clear_on_media_end ? "yes" : "no",
s->restart_on_activate ? "yes" : "no",
s->close_when_inactive ? "yes" : "no");
}
static void get_frame(void *opaque, struct obs_source_frame *f)
@@ -267,8 +268,7 @@ static void ffmpeg_source_open(struct ffmpeg_source *s)
.speed = s->speed_percent,
.force_range = s->range,
.hardware_decoding = s->is_hw_decoding,
.is_local_file = s->is_local_file || s->seekable
};
.is_local_file = s->is_local_file || s->seekable};
s->media_valid = mp_media_init(&s->media, &info);
}
@@ -316,14 +316,14 @@ static void ffmpeg_source_update(void *data, obs_data_t *settings)
input = (char *)obs_data_get_string(settings, "local_file");
input_format = NULL;
s->is_looping = obs_data_get_bool(settings, "looping");
s->close_when_inactive = obs_data_get_bool(settings,
"close_when_inactive");
s->close_when_inactive =
obs_data_get_bool(settings, "close_when_inactive");
obs_source_set_async_unbuffered(s->source, true);
} else {
input = (char *)obs_data_get_string(settings, "input");
input_format = (char *)obs_data_get_string(settings,
"input_format");
input_format =
(char *)obs_data_get_string(settings, "input_format");
s->is_looping = false;
s->close_when_inactive = true;
@@ -335,12 +335,12 @@ static void ffmpeg_source_update(void *data, obs_data_t *settings)
#ifndef __APPLE__
s->is_hw_decoding = obs_data_get_bool(settings, "hw_decode");
#endif
s->is_clear_on_media_end = obs_data_get_bool(settings,
"clear_on_media_end");
s->restart_on_activate = obs_data_get_bool(settings,
"restart_on_activate");
s->is_clear_on_media_end =
obs_data_get_bool(settings, "clear_on_media_end");
s->restart_on_activate =
obs_data_get_bool(settings, "restart_on_activate");
s->range = (enum video_range_type)obs_data_get_int(settings,
"color_range");
"color_range");
s->buffering_mb = (int)obs_data_get_int(settings, "buffering_mb");
s->speed_percent = (int)obs_data_get_int(settings, "speed_percent");
s->is_local_file = is_local_file;
@@ -369,8 +369,8 @@ static const char *ffmpeg_source_getname(void *unused)
return obs_module_text("FFMpegSource");
}
static void restart_hotkey(void *data, obs_hotkey_id id,
obs_hotkey_t *hotkey, bool pressed)
static void restart_hotkey(void *data, obs_hotkey_id id, obs_hotkey_t *hotkey,
bool pressed)
{
UNUSED_PARAMETER(id);
UNUSED_PARAMETER(hotkey);
@@ -407,12 +407,12 @@ static void get_nb_frames(void *data, calldata_t *cd)
return;
}
int video_stream_index = av_find_best_stream(s->media.fmt,
AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
int video_stream_index = av_find_best_stream(
s->media.fmt, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (video_stream_index < 0) {
FF_BLOG(LOG_WARNING, "Getting number of frames failed: No "
"video stream in media file!");
"video stream in media file!");
calldata_set_int(cd, "num_frames", frames);
return;
}
@@ -423,12 +423,12 @@ static void get_nb_frames(void *data, calldata_t *cd)
frames = stream->nb_frames;
} else {
FF_BLOG(LOG_DEBUG, "nb_frames not set, estimating using frame "
"rate and duration");
"rate and duration");
AVRational avg_frame_rate = stream->avg_frame_rate;
frames = (int64_t)ceil((double)s->media.fmt->duration /
(double)AV_TIME_BASE *
(double)avg_frame_rate.num /
(double)avg_frame_rate.den);
(double)AV_TIME_BASE *
(double)avg_frame_rate.num /
(double)avg_frame_rate.den);
}
calldata_set_int(cd, "num_frames", frames);
@@ -441,17 +441,16 @@ static void *ffmpeg_source_create(obs_data_t *settings, obs_source_t *source)
struct ffmpeg_source *s = bzalloc(sizeof(struct ffmpeg_source));
s->source = source;
s->hotkey = obs_hotkey_register_source(source,
"MediaSource.Restart",
obs_module_text("RestartMedia"),
restart_hotkey, s);
s->hotkey = obs_hotkey_register_source(source, "MediaSource.Restart",
obs_module_text("RestartMedia"),
restart_hotkey, s);
proc_handler_t *ph = obs_source_get_proc_handler(source);
proc_handler_add(ph, "void restart()", restart_proc, s);
proc_handler_add(ph, "void get_duration(out int duration)",
get_duration, s);
get_duration, s);
proc_handler_add(ph, "void get_nb_frames(out int num_frames)",
get_nb_frames, s);
get_nb_frames, s);
ffmpeg_source_update(s, settings);
return s;
@@ -497,17 +496,17 @@ static void ffmpeg_source_deactivate(void *data)
}
struct obs_source_info ffmpeg_source = {
.id = "ffmpeg_source",
.type = OBS_SOURCE_TYPE_INPUT,
.output_flags = OBS_SOURCE_ASYNC_VIDEO | OBS_SOURCE_AUDIO |
OBS_SOURCE_DO_NOT_DUPLICATE,
.get_name = ffmpeg_source_getname,
.create = ffmpeg_source_create,
.destroy = ffmpeg_source_destroy,
.get_defaults = ffmpeg_source_defaults,
.id = "ffmpeg_source",
.type = OBS_SOURCE_TYPE_INPUT,
.output_flags = OBS_SOURCE_ASYNC_VIDEO | OBS_SOURCE_AUDIO |
OBS_SOURCE_DO_NOT_DUPLICATE,
.get_name = ffmpeg_source_getname,
.create = ffmpeg_source_create,
.destroy = ffmpeg_source_destroy,
.get_defaults = ffmpeg_source_defaults,
.get_properties = ffmpeg_source_getproperties,
.activate = ffmpeg_source_activate,
.deactivate = ffmpeg_source_deactivate,
.video_tick = ffmpeg_source_tick,
.update = ffmpeg_source_update
.activate = ffmpeg_source_activate,
.deactivate = ffmpeg_source_deactivate,
.video_tick = ffmpeg_source_tick,
.update = ffmpeg_source_update,
};

View File

@@ -37,9 +37,9 @@
#include "obs-ffmpeg-formats.h"
#define do_log(level, format, ...) \
#define do_log(level, format, ...) \
blog(level, "[FFMPEG VAAPI encoder: '%s'] " format, \
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
obs_encoder_get_name(enc->encoder), ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
@@ -51,7 +51,7 @@ struct vaapi_encoder {
AVBufferRef *vadevice_ref;
AVBufferRef *vaframes_ref;
AVCodec * vaapi;
AVCodec *vaapi;
AVCodecContext *context;
AVFrame *vframe;
@@ -59,12 +59,12 @@ struct vaapi_encoder {
DARRAY(uint8_t) buffer;
uint8_t *header;
size_t header_size;
size_t header_size;
uint8_t *sei;
size_t sei_size;
size_t sei_size;
int height;
int height;
bool first_packet;
bool initialized;
};
@@ -83,7 +83,7 @@ static inline bool valid_format(enum video_format format)
static void vaapi_video_info(void *data, struct video_scale_info *info)
{
struct vaapi_encoder *enc = data;
enum video_format pref_format;
enum video_format pref_format;
pref_format = obs_encoder_get_preferred_video_format(enc->encoder);
@@ -100,10 +100,10 @@ static bool vaapi_init_codec(struct vaapi_encoder *enc, const char *path)
int ret;
ret = av_hwdevice_ctx_create(&enc->vadevice_ref, AV_HWDEVICE_TYPE_VAAPI,
path, NULL, 0);
path, NULL, 0);
if (ret < 0) {
warn("Failed to create VAAPI device context: %s",
av_err2str(ret));
av_err2str(ret));
return false;
}
@@ -114,11 +114,11 @@ static bool vaapi_init_codec(struct vaapi_encoder *enc, const char *path)
}
AVHWFramesContext *frames_ctx =
(AVHWFramesContext *)enc->vaframes_ref->data;
frames_ctx->format = AV_PIX_FMT_VAAPI;
frames_ctx->sw_format = AV_PIX_FMT_NV12;
frames_ctx->width = enc->context->width;
frames_ctx->height = enc->context->height;
(AVHWFramesContext *)enc->vaframes_ref->data;
frames_ctx->format = AV_PIX_FMT_VAAPI;
frames_ctx->sw_format = AV_PIX_FMT_NV12;
frames_ctx->width = enc->context->width;
frames_ctx->height = enc->context->height;
frames_ctx->initial_pool_size = 20;
ret = av_hwframe_ctx_init(enc->vaframes_ref);
@@ -135,9 +135,9 @@ static bool vaapi_init_codec(struct vaapi_encoder *enc, const char *path)
}
enc->vframe->format = enc->context->pix_fmt;
enc->vframe->width = enc->context->width;
enc->vframe->width = enc->context->width;
enc->vframe->height = enc->context->height;
enc->vframe->colorspace = enc->context->colorspace;
enc->vframe->colorspace = enc->context->colorspace;
enc->vframe->color_range = enc->context->color_range;
ret = av_frame_get_buffer(enc->vframe, base_get_alignment());
@@ -147,7 +147,7 @@ static bool vaapi_init_codec(struct vaapi_encoder *enc, const char *path)
}
/* 3. set up codec */
enc->context->pix_fmt = AV_PIX_FMT_VAAPI;
enc->context->pix_fmt = AV_PIX_FMT_VAAPI;
enc->context->hw_frames_ctx = av_buffer_ref(enc->vaframes_ref);
ret = avcodec_open2(enc->context, enc->vaapi, NULL);
@@ -167,49 +167,49 @@ static bool vaapi_update(void *data, obs_data_t *settings)
const char *device = obs_data_get_string(settings, "vaapi_device");
int profile = (int)obs_data_get_int(settings, "profile");
int bf = (int)obs_data_get_int(settings, "bf");
int bf = (int)obs_data_get_int(settings, "bf");
int level = (int)obs_data_get_int(settings, "level");
int bitrate = (int)obs_data_get_int(settings, "bitrate");
int level = (int)obs_data_get_int(settings, "level");
int bitrate = (int)obs_data_get_int(settings, "bitrate");
int keyint_sec = (int)obs_data_get_int(settings, "keyint_sec");
int qp = (int)obs_data_get_int(settings, "qp");
int qp = (int)obs_data_get_int(settings, "qp");
int quality = (int)obs_data_get_int(settings, "quality");
av_opt_set_int(enc->context->priv_data, "qp", qp, 0);
av_opt_set_int(enc->context->priv_data, "quality", quality, 0);
video_t * video = obs_encoder_video(enc->encoder);
const struct video_output_info *voi = video_output_get_info(video);
struct video_scale_info info;
video_t *video = obs_encoder_video(enc->encoder);
const struct video_output_info *voi = video_output_get_info(video);
struct video_scale_info info;
info.format = voi->format;
info.format = voi->format;
info.colorspace = voi->colorspace;
info.range = voi->range;
info.range = voi->range;
vaapi_video_info(enc, &info);
enc->context->profile = profile;
enc->context->profile = profile;
enc->context->max_b_frames = bf;
enc->context->level = level;
enc->context->bit_rate = bitrate * 1000;
enc->context->rc_max_rate = bitrate * 1000;
enc->context->level = level;
enc->context->bit_rate = bitrate * 1000;
enc->context->rc_max_rate = bitrate * 1000;
enc->context->width = obs_encoder_get_width(enc->encoder);
enc->context->width = obs_encoder_get_width(enc->encoder);
enc->context->height = obs_encoder_get_height(enc->encoder);
enc->context->time_base = (AVRational){voi->fps_den, voi->fps_num};
enc->context->pix_fmt = obs_to_ffmpeg_video_format(info.format);
enc->context->time_base = (AVRational){voi->fps_den, voi->fps_num};
enc->context->pix_fmt = obs_to_ffmpeg_video_format(info.format);
enc->context->colorspace = info.colorspace == VIDEO_CS_709
? AVCOL_SPC_BT709
: AVCOL_SPC_BT470BG;
? AVCOL_SPC_BT709
: AVCOL_SPC_BT470BG;
enc->context->color_range = info.range == VIDEO_RANGE_FULL
? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
if (keyint_sec > 0) {
enc->context->gop_size =
keyint_sec * voi->fps_num / voi->fps_den;
keyint_sec * voi->fps_num / voi->fps_den;
} else {
enc->context->gop_size = 120;
}
@@ -227,9 +227,9 @@ static bool vaapi_update(void *data, obs_data_t *settings)
"\twidth: %d\n"
"\theight: %d\n"
"\tb-frames: %d\n",
device, qp, quality, profile, level, bitrate,
enc->context->gop_size, enc->context->width,
enc->context->height, enc->context->max_b_frames);
device, qp, quality, profile, level, bitrate,
enc->context->gop_size, enc->context->width, enc->context->height,
enc->context->max_b_frames);
return vaapi_init_codec(enc, device);
}
@@ -239,8 +239,8 @@ static void vaapi_destroy(void *data)
struct vaapi_encoder *enc = data;
if (enc->initialized) {
AVPacket pkt = {0};
int r_pkt = 1;
AVPacket pkt = {0};
int r_pkt = 1;
while (r_pkt) {
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
@@ -248,7 +248,7 @@ static void vaapi_destroy(void *data)
break;
#else
if (avcodec_encode_video2(enc->context, &pkt, NULL,
&r_pkt) < 0)
&r_pkt) < 0)
break;
#endif
@@ -274,7 +274,7 @@ static void *vaapi_create(obs_data_t *settings, obs_encoder_t *encoder)
struct vaapi_encoder *enc;
avcodec_register_all();
enc = bzalloc(sizeof(*enc));
enc = bzalloc(sizeof(*enc));
enc->encoder = encoder;
int vaapi_codec = (int)obs_data_get_int(settings, "vaapi_codec");
@@ -309,39 +309,39 @@ fail:
}
static inline void copy_data(AVFrame *pic, const struct encoder_frame *frame,
int height, enum AVPixelFormat format)
int height, enum AVPixelFormat format)
{
int h_chroma_shift, v_chroma_shift;
av_pix_fmt_get_chroma_sub_sample(
format, &h_chroma_shift, &v_chroma_shift);
av_pix_fmt_get_chroma_sub_sample(format, &h_chroma_shift,
&v_chroma_shift);
for (int plane = 0; plane < MAX_AV_PLANES; plane++) {
if (!frame->data[plane])
continue;
int frame_rowsize = (int)frame->linesize[plane];
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ? frame_rowsize
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ? frame_rowsize
: pic_rowsize;
int plane_height = height >> (plane ? v_chroma_shift : 0);
for (int y = 0; y < plane_height; y++) {
int pos_frame = y * frame_rowsize;
int pos_pic = y * pic_rowsize;
int pos_pic = y * pic_rowsize;
memcpy(pic->data[plane] + pos_pic,
frame->data[plane] + pos_frame, bytes);
frame->data[plane] + pos_frame, bytes);
}
}
}
static bool vaapi_encode(void *data, struct encoder_frame *frame,
struct encoder_packet *packet, bool *received_packet)
struct encoder_packet *packet, bool *received_packet)
{
struct vaapi_encoder *enc = data;
AVFrame * hwframe = NULL;
AVPacket av_pkt;
int got_packet;
int ret;
struct vaapi_encoder *enc = data;
AVFrame *hwframe = NULL;
AVPacket av_pkt;
int got_packet;
int ret;
hwframe = av_frame_alloc();
if (!hwframe) {
@@ -352,28 +352,28 @@ static bool vaapi_encode(void *data, struct encoder_frame *frame,
ret = av_hwframe_get_buffer(enc->vaframes_ref, hwframe, 0);
if (ret < 0) {
warn("vaapi_encode: failed to get buffer for hw frame: %s",
av_err2str(ret));
av_err2str(ret));
goto fail;
}
copy_data(enc->vframe, frame, enc->height, enc->context->pix_fmt);
enc->vframe->pts = frame->pts;
hwframe->pts = frame->pts;
hwframe->width = enc->vframe->width;
hwframe->height = enc->vframe->height;
hwframe->pts = frame->pts;
hwframe->width = enc->vframe->width;
hwframe->height = enc->vframe->height;
ret = av_hwframe_transfer_data(hwframe, enc->vframe, 0);
if (ret < 0) {
warn("vaapi_encode: failed to upload hw frame: %s",
av_err2str(ret));
av_err2str(ret));
goto fail;
}
ret = av_frame_copy_props(hwframe, enc->vframe);
if (ret < 0) {
warn("vaapi_encode: failed to copy props to hw frame: %s",
av_err2str(ret));
av_err2str(ret));
goto fail;
}
@@ -389,8 +389,8 @@ static bool vaapi_encode(void *data, struct encoder_frame *frame,
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
ret = 0;
#else
ret = avcodec_encode_video2(
enc->context, &av_pkt, hwframe, &got_packet);
ret = avcodec_encode_video2(enc->context, &av_pkt, hwframe,
&got_packet);
#endif
if (ret < 0) {
warn("vaapi_encode: Error encoding: %s", av_err2str(ret));
@@ -400,13 +400,13 @@ static bool vaapi_encode(void *data, struct encoder_frame *frame,
if (got_packet && av_pkt.size) {
if (enc->first_packet) {
uint8_t *new_packet;
size_t size;
size_t size;
enc->first_packet = false;
obs_extract_avc_headers(av_pkt.data, av_pkt.size,
&new_packet, &size, &enc->header,
&enc->header_size, &enc->sei,
&enc->sei_size);
&new_packet, &size,
&enc->header, &enc->header_size,
&enc->sei, &enc->sei_size);
da_copy_array(enc->buffer, new_packet, size);
bfree(new_packet);
@@ -414,11 +414,11 @@ static bool vaapi_encode(void *data, struct encoder_frame *frame,
da_copy_array(enc->buffer, av_pkt.data, av_pkt.size);
}
packet->pts = av_pkt.pts;
packet->dts = av_pkt.dts;
packet->data = enc->buffer.array;
packet->size = enc->buffer.num;
packet->type = OBS_ENCODER_VIDEO;
packet->pts = av_pkt.pts;
packet->dts = av_pkt.dts;
packet->data = enc->buffer.array;
packet->size = enc->buffer.num;
packet->type = OBS_ENCODER_VIDEO;
packet->keyframe = obs_avc_keyframe(packet->data, packet->size);
*received_packet = true;
} else {
@@ -442,11 +442,11 @@ static void set_visible(obs_properties_t *ppts, const char *name, bool visible)
static void vaapi_defaults(obs_data_t *settings)
{
obs_data_set_default_string(
settings, "vaapi_device", "/dev/dri/renderD128");
obs_data_set_default_string(settings, "vaapi_device",
"/dev/dri/renderD128");
obs_data_set_default_int(settings, "vaapi_codec", AV_CODEC_ID_H264);
obs_data_set_default_int(settings, "profile",
FF_PROFILE_H264_CONSTRAINED_BASELINE);
FF_PROFILE_H264_CONSTRAINED_BASELINE);
obs_data_set_default_int(settings, "level", 40);
obs_data_set_default_int(settings, "bitrate", 2500);
obs_data_set_default_int(settings, "keyint_sec", 0);
@@ -461,10 +461,11 @@ static obs_properties_t *vaapi_properties(void *unused)
UNUSED_PARAMETER(unused);
obs_properties_t *props = obs_properties_create();
obs_property_t * list;
obs_property_t *list;
list = obs_properties_add_list(props, "vaapi_device", "VAAPI Device",
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
char path[32] = "/dev/dri/renderD1";
for (int i = 28;; i++) {
sprintf(path, "/dev/dri/renderD1%d", i);
@@ -478,27 +479,29 @@ static obs_properties_t *vaapi_properties(void *unused)
}
list = obs_properties_add_list(props, "vaapi_codec", "VAAPI Codec",
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_INT);
obs_property_list_add_int(list, "H.264 (default)", AV_CODEC_ID_H264);
list = obs_properties_add_list(props, "level", "Level",
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_INT);
obs_property_list_add_int(list, "480p30 (3.0)", 30);
obs_property_list_add_int(list, "720p30/480p60 (3.1)", 31);
obs_property_list_add_int(
list, "Compatibility mode (4.0 default)", 40);
obs_property_list_add_int(list, "Compatibility mode (4.0 default)",
40);
obs_property_list_add_int(list, "720p60/1080p30 (4.1)", 41);
obs_property_list_add_int(list, "1080p60 (4.2)", 42);
obs_property_t *p;
p = obs_properties_add_int(props, "bitrate", obs_module_text("Bitrate"), 0,
300000, 50);
p = obs_properties_add_int(props, "bitrate", obs_module_text("Bitrate"),
0, 300000, 50);
obs_property_int_set_suffix(p, " Kbps");
obs_properties_add_int(props, "keyint_sec",
obs_module_text("Keyframe Interval (seconds)"), 0, 20,
1);
obs_module_text("Keyframe Interval (seconds)"),
0, 20, 1);
return props;
}
@@ -508,7 +511,7 @@ static bool vaapi_extra_data(void *data, uint8_t **extra_data, size_t *size)
struct vaapi_encoder *enc = data;
*extra_data = enc->header;
*size = enc->header_size;
*size = enc->header_size;
return true;
}
@@ -517,23 +520,23 @@ static bool vaapi_sei_data(void *data, uint8_t **extra_data, size_t *size)
struct vaapi_encoder *enc = data;
*extra_data = enc->sei;
*size = enc->sei_size;
*size = enc->sei_size;
return true;
}
struct obs_encoder_info vaapi_encoder_info = {
.id = "ffmpeg_vaapi",
.type = OBS_ENCODER_VIDEO,
.codec = "h264",
.get_name = vaapi_getname,
.create = vaapi_create,
.destroy = vaapi_destroy,
.encode = vaapi_encode,
.get_defaults = vaapi_defaults,
.id = "ffmpeg_vaapi",
.type = OBS_ENCODER_VIDEO,
.codec = "h264",
.get_name = vaapi_getname,
.create = vaapi_create,
.destroy = vaapi_destroy,
.encode = vaapi_encode,
.get_defaults = vaapi_defaults,
.get_properties = vaapi_properties,
.get_extra_data = vaapi_extra_data,
.get_sei_data = vaapi_sei_data,
.get_video_info = vaapi_video_info
.get_sei_data = vaapi_sei_data,
.get_video_info = vaapi_video_info,
};
#endif

View File

@@ -20,10 +20,10 @@ MODULE_EXPORT const char *obs_module_description(void)
return "FFmpeg based sources/outputs/encoders";
}
extern struct obs_source_info ffmpeg_source;
extern struct obs_output_info ffmpeg_output;
extern struct obs_output_info ffmpeg_muxer;
extern struct obs_output_info replay_buffer;
extern struct obs_source_info ffmpeg_source;
extern struct obs_output_info ffmpeg_output;
extern struct obs_output_info ffmpeg_muxer;
extern struct obs_output_info replay_buffer;
extern struct obs_encoder_info aac_encoder_info;
extern struct obs_encoder_info opus_encoder_info;
extern struct obs_encoder_info nvenc_encoder_info;
@@ -84,8 +84,8 @@ static void destroy_log_context(struct log_context *log_context)
pthread_mutex_unlock(&log_contexts_mutex);
}
static void ffmpeg_log_callback(void* context, int level, const char* format,
va_list args)
static void ffmpeg_log_callback(void *context, int level, const char *format,
va_list args)
{
if (format == NULL)
return;
@@ -95,8 +95,8 @@ static void ffmpeg_log_callback(void* context, int level, const char* format,
char *str = log_context->str;
av_log_format_line(context, level, format, args, str + strlen(str),
(int)(sizeof(log_context->str) - strlen(str)),
&log_context->print_prefix);
(int)(sizeof(log_context->str) - strlen(str)),
&log_context->print_prefix);
int obs_level;
switch (level) {
@@ -121,7 +121,7 @@ static void ffmpeg_log_callback(void* context, int level, const char* format,
return;
char *str_end = str + strlen(str) - 1;
while(str < str_end) {
while (str < str_end) {
if (*str_end != '\n')
break;
*str_end-- = '\0';
@@ -142,28 +142,10 @@ static const char *nvenc_check_name = "nvenc_check";
#ifdef _WIN32
static const wchar_t *blacklisted_adapters[] = {
L"720M",
L"730M",
L"740M",
L"745M",
L"820M",
L"830M",
L"840M",
L"845M",
L"920M",
L"930M",
L"940M",
L"945M",
L"1030",
L"MX110",
L"MX130",
L"MX150",
L"MX230",
L"MX250",
L"M520",
L"M500",
L"P500",
L"K620M"
L"720M", L"730M", L"740M", L"745M", L"820M", L"830M",
L"840M", L"845M", L"920M", L"930M", L"940M", L"945M",
L"1030", L"MX110", L"MX130", L"MX150", L"MX230", L"MX250",
L"M520", L"M500", L"P500", L"K620M",
};
static const size_t num_blacklisted =
@@ -202,7 +184,7 @@ static bool is_blacklisted(const wchar_t *name)
return false;
}
typedef HRESULT (WINAPI *create_dxgi_proc)(const IID *, IDXGIFactory1 **);
typedef HRESULT(WINAPI *create_dxgi_proc)(const IID *, IDXGIFactory1 **);
static bool nvenc_device_available(void)
{
@@ -226,7 +208,7 @@ static bool nvenc_device_available(void)
if (!create) {
create = (create_dxgi_proc)GetProcAddress(dxgi,
"CreateDXGIFactory1");
"CreateDXGIFactory1");
if (!create) {
return true;
}